You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2016/10/31 20:55:41 UTC

[01/50] [abbrv] incubator-geode git commit: GEODE-999: remove accidentally checked in file. [Forced Update!]

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-1930 86a3fb5a6 -> 24f496df4 (forced update)


GEODE-999: remove accidentally checked in file.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/582694d3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/582694d3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/582694d3

Branch: refs/heads/feature/GEODE-1930
Commit: 582694d3d67511095531abc43f0646c22b038c7f
Parents: 313bbab
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Oct 13 09:01:34 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Thu Oct 13 09:01:34 2016 -0700

----------------------------------------------------------------------
 .../test/gemfire-jstewartgeode999-files.tgz       | Bin 877528 -> 0 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/582694d3/artifacts-jstewartgeode999/test/gemfire-jstewartgeode999-files.tgz
----------------------------------------------------------------------
diff --git a/artifacts-jstewartgeode999/test/gemfire-jstewartgeode999-files.tgz b/artifacts-jstewartgeode999/test/gemfire-jstewartgeode999-files.tgz
deleted file mode 100644
index a15d245..0000000
Binary files a/artifacts-jstewartgeode999/test/gemfire-jstewartgeode999-files.tgz and /dev/null differ


[08/50] [abbrv] incubator-geode git commit: GEODE-1952 Add Apache license to new files; add Gemfile.lock to rat.gradle

Posted by kl...@apache.org.
GEODE-1952 Add Apache license to new files; add Gemfile.lock to rat.gradle


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e9669d61
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e9669d61
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e9669d61

Branch: refs/heads/feature/GEODE-1930
Commit: e9669d6123725e12cbd2b0c5a145fc36b615c292
Parents: 131e99e
Author: Joey McAllister <jm...@pivotal.io>
Authored: Thu Oct 13 16:22:56 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Fri Oct 14 14:51:03 2016 -0700

----------------------------------------------------------------------
 geode-book/.gitignore                              |  4 ++--
 geode-book/Gemfile                                 | 17 +++++++++++++++++
 geode-book/Gemfile.lock                            |  2 +-
 geode-book/config.yml                              | 17 +++++++++++++++++
 geode-book/master_middleman/source/index.html.erb  | 16 ++++++++++++++++
 .../master_middleman/source/javascripts/book.js    | 15 +++++++++++++++
 .../source/javascripts/waypoints/context.js        | 15 +++++++++++++++
 .../source/javascripts/waypoints/group.js          | 15 +++++++++++++++
 .../javascripts/waypoints/noframeworkAdapter.js    | 15 +++++++++++++++
 .../source/javascripts/waypoints/sticky.js         | 15 +++++++++++++++
 .../source/javascripts/waypoints/waypoint.js       | 15 +++++++++++++++
 .../source/layouts/_book-footer.erb                | 16 ++++++++++++++++
 .../master_middleman/source/layouts/_title.erb     | 17 ++++++++++++++++-
 .../source/stylesheets/book-styles.css.scss        | 15 +++++++++++++++
 .../stylesheets/partials/_book-base-values.scss    | 14 ++++++++++++++
 .../source/stylesheets/partials/_book-vars.scss    | 16 +++++++++++++++-
 .../source/subnavs/geode-subnav.erb                | 16 ++++++++++++++++
 geode-book/redirects.rb                            | 15 +++++++++++++++
 gradle/rat.gradle                                  |  4 +++-
 19 files changed, 253 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/.gitignore
----------------------------------------------------------------------
diff --git a/geode-book/.gitignore b/geode-book/.gitignore
index 0cae826..30545a4 100644
--- a/geode-book/.gitignore
+++ b/geode-book/.gitignore
@@ -1,2 +1,2 @@
-output
-final_app
+output/
+final_app/

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/Gemfile
----------------------------------------------------------------------
diff --git a/geode-book/Gemfile b/geode-book/Gemfile
index f66d333..b61bbdc 100644
--- a/geode-book/Gemfile
+++ b/geode-book/Gemfile
@@ -1,3 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
 source "https://rubygems.org"
 
 gem 'bookbindery'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/Gemfile.lock
----------------------------------------------------------------------
diff --git a/geode-book/Gemfile.lock b/geode-book/Gemfile.lock
index 3c483c0..1fb5a4c 100644
--- a/geode-book/Gemfile.lock
+++ b/geode-book/Gemfile.lock
@@ -200,4 +200,4 @@ DEPENDENCIES
   libv8 (= 3.16.14.7)
 
 BUNDLED WITH
-   1.11.2
+   1.13.1

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/config.yml
----------------------------------------------------------------------
diff --git a/geode-book/config.yml b/geode-book/config.yml
index 74003f0..b2f999a 100644
--- a/geode-book/config.yml
+++ b/geode-book/config.yml
@@ -1,3 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
 book_repo: apache/incubator-geode/geode-book
 public_host: localhost
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/index.html.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/index.html.erb b/geode-book/master_middleman/source/index.html.erb
index 39ee634..3d273d3 100644
--- a/geode-book/master_middleman/source/index.html.erb
+++ b/geode-book/master_middleman/source/index.html.erb
@@ -1,4 +1,20 @@
 <html>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+express or implied. See the License for the specific language governing
+permissions and limitations under the License.
+-->
 <head>
 
 <script type="text/javascript">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/javascripts/book.js
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/javascripts/book.js b/geode-book/master_middleman/source/javascripts/book.js
index 90879c4..16edd95 100644
--- a/geode-book/master_middleman/source/javascripts/book.js
+++ b/geode-book/master_middleman/source/javascripts/book.js
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 // Declare your book-specific javascript overrides in this file.
 //= require 'waypoints/waypoint'
 //= require 'waypoints/context'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/javascripts/waypoints/context.js
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/javascripts/waypoints/context.js b/geode-book/master_middleman/source/javascripts/waypoints/context.js
index 5e3551b..d005cb3 100644
--- a/geode-book/master_middleman/source/javascripts/waypoints/context.js
+++ b/geode-book/master_middleman/source/javascripts/waypoints/context.js
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 (function() {
   'use strict'
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/javascripts/waypoints/group.js
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/javascripts/waypoints/group.js b/geode-book/master_middleman/source/javascripts/waypoints/group.js
index 57c3038..3f14d7c 100644
--- a/geode-book/master_middleman/source/javascripts/waypoints/group.js
+++ b/geode-book/master_middleman/source/javascripts/waypoints/group.js
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 (function() {
   'use strict'
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/javascripts/waypoints/noframeworkAdapter.js
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/javascripts/waypoints/noframeworkAdapter.js b/geode-book/master_middleman/source/javascripts/waypoints/noframeworkAdapter.js
index 99abcb5..4c55ca9 100644
--- a/geode-book/master_middleman/source/javascripts/waypoints/noframeworkAdapter.js
+++ b/geode-book/master_middleman/source/javascripts/waypoints/noframeworkAdapter.js
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 (function() {
   'use strict'
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/javascripts/waypoints/sticky.js
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/javascripts/waypoints/sticky.js b/geode-book/master_middleman/source/javascripts/waypoints/sticky.js
index 569fcdb..371cac0 100644
--- a/geode-book/master_middleman/source/javascripts/waypoints/sticky.js
+++ b/geode-book/master_middleman/source/javascripts/waypoints/sticky.js
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 (function() {
   'use strict'
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/javascripts/waypoints/waypoint.js
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/javascripts/waypoints/waypoint.js b/geode-book/master_middleman/source/javascripts/waypoints/waypoint.js
index 7f76f1d..0196b04 100644
--- a/geode-book/master_middleman/source/javascripts/waypoints/waypoint.js
+++ b/geode-book/master_middleman/source/javascripts/waypoints/waypoint.js
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 (function() {
   'use strict'
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/layouts/_book-footer.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/layouts/_book-footer.erb b/geode-book/master_middleman/source/layouts/_book-footer.erb
index 55bf514..c3b93f6 100644
--- a/geode-book/master_middleman/source/layouts/_book-footer.erb
+++ b/geode-book/master_middleman/source/layouts/_book-footer.erb
@@ -1,3 +1,19 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
 <div class="copyright">
   <a href='/'>Apache Geode Documentation</a>
   &copy; <%= Time.now.year %> <a href='http://www.apache.org/'>The Apache Software Foundation</a>.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/layouts/_title.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/layouts/_title.erb b/geode-book/master_middleman/source/layouts/_title.erb
index ea744d9..e9a0956 100644
--- a/geode-book/master_middleman/source/layouts/_title.erb
+++ b/geode-book/master_middleman/source/layouts/_title.erb
@@ -1,6 +1,21 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
 <% if current_page.data.title %>
   <h1 class="title-container" <%= current_page.data.dita ? 'style="display: none;"' : '' %>>
     <%= current_page.data.title %>
   </h1>
 <% end %>
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/stylesheets/book-styles.css.scss
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/stylesheets/book-styles.css.scss b/geode-book/master_middleman/source/stylesheets/book-styles.css.scss
index 1236d8e..a7ab274 100644
--- a/geode-book/master_middleman/source/stylesheets/book-styles.css.scss
+++ b/geode-book/master_middleman/source/stylesheets/book-styles.css.scss
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 * {
   box-sizing: border-box;
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/stylesheets/partials/_book-base-values.scss
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/stylesheets/partials/_book-base-values.scss b/geode-book/master_middleman/source/stylesheets/partials/_book-base-values.scss
index e69de29..93562f4 100644
--- a/geode-book/master_middleman/source/stylesheets/partials/_book-base-values.scss
+++ b/geode-book/master_middleman/source/stylesheets/partials/_book-base-values.scss
@@ -0,0 +1,14 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/stylesheets/partials/_book-vars.scss
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/stylesheets/partials/_book-vars.scss b/geode-book/master_middleman/source/stylesheets/partials/_book-vars.scss
index 4245d57..e2c5e3f 100644
--- a/geode-book/master_middleman/source/stylesheets/partials/_book-vars.scss
+++ b/geode-book/master_middleman/source/stylesheets/partials/_book-vars.scss
@@ -1,3 +1,18 @@
+//Licensed to the Apache Software Foundation (ASF) under one or more
+//contributor license agreements.  See the NOTICE file distributed with
+//this work for additional information regarding copyright ownership.
+//The ASF licenses this file to You under the Apache License, Version 2.0
+//(the "License"); you may not use this file except in compliance with
+//the License.  You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+//express or implied. See the License for the specific language governing
+//permissions and limitations under the License.
+
 $navy: #243640;
 $blue1: #2185c5;
 $blue2: #a7cae1;
@@ -16,4 +31,3 @@ $color-border-tip: $blue2;
 
 $color-bg-header: $navy;
 $color-bg-dark: $bluegray1;
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index 12ff6b2..53e9118 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -1,3 +1,19 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
 <div id="sub-nav" class="js-sidenav nav-container" role="navigation">
     <a class="sidenav-title" data-behavior="SubMenuMobile">
         Doc Index

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/geode-book/redirects.rb
----------------------------------------------------------------------
diff --git a/geode-book/redirects.rb b/geode-book/redirects.rb
index d71841a..e913407 100644
--- a/geode-book/redirects.rb
+++ b/geode-book/redirects.rb
@@ -1,3 +1,18 @@
+#Licensed to the Apache Software Foundation (ASF) under one or more
+#contributor license agreements.  See the NOTICE file distributed with
+#this work for additional information regarding copyright ownership.
+#The ASF licenses this file to You under the Apache License, Version 2.0
+#(the "License"); you may not use this file except in compliance with
+#the License.  You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+#express or implied. See the License for the specific language governing
+#permissions and limitations under the License.
+
 r301 %r{/releases/latest/javadoc/(.*)}, 'http://geode.incubator.apache.org/releases/latest/javadoc/$1'
 rewrite '/', '/docs/about_geode.html'
 rewrite '/index.html', '/docs/about_geode.html'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e9669d61/gradle/rat.gradle
----------------------------------------------------------------------
diff --git a/gradle/rat.gradle b/gradle/rat.gradle
index 7e20c56..0b01d81 100644
--- a/gradle/rat.gradle
+++ b/gradle/rat.gradle
@@ -51,7 +51,7 @@ rat {
     // IDE
     'etc/eclipseFormatterProfile.xml',
     'etc/intellijIdeaCodeStyle.xml',
-    'etc/eclipseOrganizeImports.importorder', 
+    'etc/eclipseOrganizeImports.importorder',
     '**/.project',
     '**/.classpath',
     '**/.settings/**',
@@ -88,6 +88,7 @@ rat {
     '**/*.pdf',
     '**/*.png',
     '**/*.ser',
+    '**/*.svg',
     '**/*.truststore',
     '**/*.xls',
     '**/publickeyfile',
@@ -97,6 +98,7 @@ rat {
     'geode-spark-connector/project/plugins.sbt',
     'geode-spark-connector/project/build.properties',
     '**/log4j*.xml',
+    'geode-book/Gemfile.lock',
 
     // modules
     'extensions/**/log4j.properties',


[48/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/DiskManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/DiskManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/DiskManagementDUnitTest.java
index 8d7d6d3..7480bf6 100644
--- a/geode-core/src/test/java/org/apache/geode/management/DiskManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/DiskManagementDUnitTest.java
@@ -16,24 +16,25 @@
  */
 package org.apache.geode.management;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
+import static java.util.concurrent.TimeUnit.*;
+import static org.assertj.core.api.Assertions.*;
 
 import java.io.File;
-import java.util.Arrays;
-import java.util.List;
+import java.io.Serializable;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
 
 import javax.management.ObjectName;
 
-import org.apache.geode.LogWriter;
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.DiskStore;
@@ -42,700 +43,366 @@ import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionFactory;
 import org.apache.geode.cache.Scope;
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.internal.cache.DiskRegion;
 import org.apache.geode.internal.cache.DiskRegionStats;
 import org.apache.geode.internal.cache.DistributedRegion;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.cache.LocalRegion;
 import org.apache.geode.internal.cache.persistence.PersistentMemberID;
 import org.apache.geode.internal.cache.persistence.PersistentMemberManager;
-import org.apache.geode.management.internal.MBeanJMXAdapter;
+import org.apache.geode.internal.process.ProcessUtils;
+import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.SerializableCallable;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 
 /**
  * Test cases to cover all test cases which pertains to disk from Management
  * layer
- * 
- * 
  */
 @Category(DistributedTest.class)
-public class DiskManagementDUnitTest extends ManagementTestBase {
-
-  /**
-   * 
-   */
-  private static final long serialVersionUID = 1L;
+@SuppressWarnings({ "serial", "unused" })
+public class DiskManagementDUnitTest implements Serializable {
 
-  // This must be bigger than the dunit ack-wait-threshold for the revoke
-  // tests. The command line is setting the ack-wait-threshold to be
-  // 60 seconds.
-  private static final int MAX_WAIT = 70 * 1000;
-
-  boolean testFailed = false;
-
-  String failureCause = "";
-  static final String REGION_NAME = "region";
+  private static final String REGION_NAME = DiskManagementDUnitTest.class.getSimpleName() + "_region";
 
   private File diskDir;
 
-  protected static LogWriter logWriter;
+  @Manager
+  private VM managerVM;
 
-  public DiskManagementDUnitTest() throws Exception {
-    super();
-  
-    diskDir = new File("diskDir-" + getName()).getAbsoluteFile();
-    org.apache.geode.internal.FileUtil.delete(diskDir);
-    diskDir.mkdir();
-    diskDir.deleteOnExit();
-  }
+  @Member
+  private VM[] memberVMs;
 
-  @Override
-  protected final void postSetUpManagementTestBase() throws Exception {
-    failureCause = "";
-    testFailed = false;
-  }
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().start(true).build();
+
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
 
-  @Override
-  protected final void postTearDownManagementTestBase() throws Exception {
-    org.apache.geode.internal.FileUtil.delete(diskDir);
+  @Before
+  public void before() throws Exception {
+    this.diskDir = this.temporaryFolder.newFolder("diskDir");
   }
 
   /**
-   * Tests Disk Compaction from a MemberMbean which is at cache level. All the
+   * Tests Disk Compaction from a MemberMXBean which is at cache level. All the
    * disks which belong to the cache should be compacted.
-   * 
-   * @throws Exception
    */
-
   @Test
-  public void testDiskCompact() throws Throwable {
-    initManagement(false);
-    for (VM vm : getManagedNodeList()) {
-      createPersistentRegion(vm);
-      makeDiskCompactable(vm);
-    }
-    
-    for (VM vm : getManagedNodeList()) {
-      compactAllDiskStores(vm);
+  public void testDiskCompact() throws Exception {
+    for (VM memberVM : this.memberVMs) {
+      createPersistentRegion(memberVM);
+      makeDiskCompactable(memberVM);
     }
 
+    for (VM memberVM : this.memberVMs) {
+      compactAllDiskStores(memberVM);
+    }
   }
 
   /**
-   * Tests Disk Compaction from a MemberMbean which is at cache level. All the
+   * Tests Disk Compaction from a MemberMXBean which is at cache level. All the
    * disks which belong to the cache should be compacted.
-   * 
-   * @throws Exception
    */
-
   @Test
-  public void testDiskCompactRemote() throws Throwable {
-
-    initManagement(false);
-    for (VM vm : getManagedNodeList()) {
-      createPersistentRegion(vm);
-      makeDiskCompactable(vm);
+  public void testDiskCompactRemote() throws Exception {
+    for (VM memberVM : this.memberVMs) {
+      createPersistentRegion(memberVM);
+      makeDiskCompactable(memberVM);
     }
-    compactDiskStoresRemote(managingNode);
 
+    compactDiskStoresRemote(this.managerVM, this.memberVMs.length);
   }
 
   /**
    * Tests various operations defined on DiskStore Mbean
-   * 
-   * @throws Exception
    */
-
   @Test
-  public void testDiskOps() throws Throwable {
-
-    initManagement(false);
-    for (VM vm : getManagedNodeList()) {
-      createPersistentRegion(vm);
-      makeDiskCompactable(vm);
-      invokeFlush(vm);
-      invokeForceRoll(vm);
-      invokeForceCompaction(vm);
+  public void testDiskOps() throws Exception {
+    for (VM memberVM : this.memberVMs) {
+      createPersistentRegion(memberVM);
+      makeDiskCompactable(memberVM);
+      invokeFlush(memberVM);
+      invokeForceRoll(memberVM);
+      invokeForceCompaction(memberVM);
     }
-
   }
 
   @Test
-  public void testDiskBackupAllMembers() throws Throwable {
-    initManagement(false);
-    for (VM vm : getManagedNodeList()) {
-      createPersistentRegion(vm);
-      makeDiskCompactable(vm);
-
+  public void testDiskBackupAllMembers() throws Exception {
+    for (VM memberVM : this.memberVMs) {
+      createPersistentRegion(memberVM);
+      makeDiskCompactable(memberVM);
     }
-    backupAllMembers(managingNode);
+
+    backupAllMembers(this.managerVM, this.memberVMs.length);
   }
 
   /**
-   * Checks the test case of missing disks and revoking them through MemberMbean
+   * Checks the test case of missing disks and revoking them through MemberMXBean
    * interfaces
-   * 
-   * @throws Throwable
    */
-  @SuppressWarnings("serial")
   @Test
-  public void testMissingMembers() throws Throwable {
-
-    initManagement(false);
-    VM vm0 = getManagedNodeList().get(0);
-    VM vm1 = getManagedNodeList().get(1);
-    VM vm2 = getManagedNodeList().get(2);
-    
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Creating region in VM0");
-    createPersistentRegion(vm0);
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Creating region in VM1");
-    createPersistentRegion(vm1);
-
-    putAnEntry(vm0);
-
- 
-    managingNode.invoke(new SerializableRunnable("Check for waiting regions") {
-
-      public void run() {
-        Cache cache = getCache();
-        ManagementService service = getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        PersistentMemberDetails[] missingDiskStores = bean
-            .listMissingDiskStores();
-
-        assertNull(missingDiskStores);
-      }
-    });
+  public void testMissingMembers() throws Exception {
+    VM memberVM1 = this.memberVMs[0];
+    VM memberVM2 = this.memberVMs[1];
 
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("closing region in vm0");
-    closeRegion(vm0);
-
-    updateTheEntry(vm1);
-
-    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("closing region in vm1");
-    closeRegion(vm1);
-    AsyncInvocation future = createPersistentRegionAsync(vm0);
-    waitForBlockedInitialization(vm0);
-    assertTrue(future.isAlive());
-
-    managingNode.invoke(new SerializableRunnable("Revoke the member") {
-
-      public void run() {
-        Cache cache = getCache();
-        GemFireCacheImpl cacheImpl = (GemFireCacheImpl) cache;
-        ManagementService service = getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        PersistentMemberDetails[] missingDiskStores = bean
-        .listMissingDiskStores();
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("waiting members=" + missingDiskStores);
-        assertNotNull(missingDiskStores);
-        assertEquals(1, missingDiskStores.length);
-
-        for (PersistentMemberDetails id : missingDiskStores) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Missing DiskStoreID is =" + id.getDiskStoreId());
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Missing Host is =" + id.getHost());
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Missing Directory is =" + id.getDirectory());
-
-          try {
-            bean.revokeMissingDiskStores(id.getDiskStoreId());
-          } catch (Exception e) {
-            fail("revokeMissingDiskStores failed with exception " + e);
-          }
-        }
-      }
+    createPersistentRegion(memberVM1);
+    createPersistentRegion(memberVM2);
+
+    putAnEntry(memberVM1);
+
+    this.managerVM.invoke("checkForMissingDiskStores", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+      PersistentMemberDetails[] missingDiskStores = distributedSystemMXBean.listMissingDiskStores();
+
+      assertThat(missingDiskStores).isNull();
     });
 
-    future.join(MAX_WAIT);
-    if (future.isAlive()) {
-      fail("Region not created within" + MAX_WAIT);
-    }
-    if (future.exceptionOccurred()) {
-      throw new Exception(future.getException());
-    }
-    checkForRecoveryStat(vm0, true);
-    // Check to make sure we recovered the old
-    // value of the entry.
-    SerializableRunnable checkForEntry = new SerializableRunnable(
-        "check for the entry") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(REGION_NAME);
-        assertEquals("B", region.get("A"));
-      }
-    };
-    vm0.invoke(checkForEntry);
+    closeRegion(memberVM1);
 
-  }
+    updateTheEntry(memberVM2, "C");
 
-  protected void checkNavigation(final VM vm,
-      final DistributedMember diskMember, final String diskStoreName) {
-    SerializableRunnable checkNavigation = new SerializableRunnable(
-        "Check Navigation") {
-      public void run() {
+    closeRegion(memberVM2);
 
-        final ManagementService service = getManagementService();
+    AsyncInvocation creatingPersistentRegionAsync = createPersistentRegionAsync(memberVM1);
 
-        DistributedSystemMXBean disMBean = service.getDistributedSystemMXBean();
-        try {
-          ObjectName expected = MBeanJMXAdapter.getDiskStoreMBeanName(diskMember.getId(), diskStoreName);
-          ObjectName actual = disMBean.fetchDiskStoreObjectName(diskMember.getId(), diskStoreName);
-          assertEquals(expected, actual);
-        } catch (Exception e) {
-          fail("Disk Store Navigation Failed " + e);
-        }
+    memberVM1.invoke(() ->
+      await().until(() -> {
+        GemFireCacheImpl cache = (GemFireCacheImpl) this.managementTestRule.getCache();
+        PersistentMemberManager persistentMemberManager = cache.getPersistentMemberManager();
+        Map<String, Set<PersistentMemberID>> regions = persistentMemberManager.getWaitingRegions();
+        return !regions.isEmpty();
+      })
+    );
 
+    assertThat(creatingPersistentRegionAsync.isAlive()).isTrue();
 
-      }
-    };
-    vm.invoke(checkNavigation);
-  }
+    this.managerVM.invoke("revokeMissingDiskStore", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
+      PersistentMemberDetails[] missingDiskStores = bean.listMissingDiskStores();
 
-  /**
-   * get Distributed member for a given vm
-   */
-  @SuppressWarnings("serial")
-  protected static DistributedMember getMember() throws Exception {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    return cache.getDistributedSystem().getDistributedMember();
+      assertThat(missingDiskStores).isNotNull().hasSize(1);
+
+      assertThat(bean.revokeMissingDiskStores(missingDiskStores[0].getDiskStoreId())).isTrue();
+    });
+
+    await(creatingPersistentRegionAsync);
+
+    verifyRecoveryStats(memberVM1, true);
+
+    // Check to make sure we recovered the old value of the entry.
+    memberVM1.invoke("check for the entry", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      Region region = cache.getRegion(REGION_NAME);
+      assertThat(region.get("A")).isEqualTo("B");
+    });
   }
 
   /**
    * Invokes flush on the given disk store by MBean interface
-   * 
-   * @param vm
-   *          reference to VM
    */
-  @SuppressWarnings("serial")
-  public void invokeFlush(final VM vm) {
-    SerializableRunnable invokeFlush = new SerializableRunnable(
-        "Invoke Flush On Disk") {
-      public void run() {
-        Cache cache = getCache();
-        DiskStoreFactory dsf = cache.createDiskStoreFactory();
-        String name = "testFlush_" + vm.getPid();
-        DiskStore ds = dsf.create(name);
-
-        ManagementService service = getManagementService();
-        DiskStoreMXBean bean = service.getLocalDiskStoreMBean(name);
-        assertNotNull(bean);
-        bean.flush();
-      }
-    };
-    vm.invoke(invokeFlush);
+  private void invokeFlush(final VM memberVM) {
+    memberVM.invoke("invokeFlush", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+      String name = "testFlush_" + ProcessUtils.identifyPid();
+      DiskStore diskStore = diskStoreFactory.create(name);
+
+      ManagementService service = this.managementTestRule.getManagementService();
+      DiskStoreMXBean diskStoreMXBean = service.getLocalDiskStoreMBean(name);
+      assertThat(diskStoreMXBean).isNotNull();
+      assertThat(diskStoreMXBean.getName()).isEqualTo(diskStore.getName());
+
+      diskStoreMXBean.flush();
+    });
   }
 
   /**
    * Invokes force roll on disk store by MBean interface
-   * 
-   * @param vm
-   *          reference to VM
    */
-  @SuppressWarnings("serial")
-  public void invokeForceRoll(final VM vm) {
-    SerializableRunnable invokeForceRoll = new SerializableRunnable(
-        "Invoke Force Roll") {
-      public void run() {
-        Cache cache = getCache();
-        DiskStoreFactory dsf = cache.createDiskStoreFactory();
-        String name = "testForceRoll_" + vm.getPid();
-        DiskStore ds = dsf.create(name);
-        ManagementService service = getManagementService();
-        DiskStoreMXBean bean = service.getLocalDiskStoreMBean(name);
-        assertNotNull(bean);
-        bean.forceRoll();
-      }
-    };
-    vm.invoke(invokeForceRoll);
+  private void invokeForceRoll(final VM memberVM) {
+    memberVM.invoke("invokeForceRoll", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+      String name = "testForceRoll_" + ProcessUtils.identifyPid();
+      DiskStore diskStore = diskStoreFactory.create(name);
+
+      ManagementService service = this.managementTestRule.getManagementService();
+      DiskStoreMXBean diskStoreMXBean = service.getLocalDiskStoreMBean(name);
+      assertThat(diskStoreMXBean).isNotNull();
+      assertThat(diskStoreMXBean.getName()).isEqualTo(diskStore.getName());
+
+      diskStoreMXBean.forceRoll();
+    });
   }
 
   /**
    * Invokes force compaction on disk store by MBean interface
-   * 
-   * @param vm
-   *          reference to VM
    */
-  @SuppressWarnings("serial")
-  public void invokeForceCompaction(final VM vm) {
-    SerializableRunnable invokeForceCompaction = new SerializableRunnable(
-        "Invoke Force Compaction") {
-      public void run() {
-        Cache cache = getCache();
-        DiskStoreFactory dsf = cache.createDiskStoreFactory();
-        dsf.setAllowForceCompaction(true);
-        String name = "testForceCompaction_" + vm.getPid();
-        DiskStore ds = dsf.create(name);
-        ManagementService service = getManagementService();
-        DiskStoreMXBean bean = service.getLocalDiskStoreMBean(name);
-        assertNotNull(bean);
-        assertEquals(false, bean.forceCompaction());
-      }
-    };
-    vm.invoke(invokeForceCompaction);
+  private void invokeForceCompaction(final VM memberVM) {
+    memberVM.invoke("invokeForceCompaction", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      DiskStoreFactory dsf = cache.createDiskStoreFactory();
+      dsf.setAllowForceCompaction(true);
+      String name = "testForceCompaction_" + ProcessUtils.identifyPid();
+      DiskStore diskStore = dsf.create(name);
+
+      ManagementService service = this.managementTestRule.getManagementService();
+      DiskStoreMXBean diskStoreMXBean = service.getLocalDiskStoreMBean(name);
+      assertThat(diskStoreMXBean).isNotNull();
+      assertThat(diskStoreMXBean.getName()).isEqualTo(diskStore.getName());
+
+      assertThat(diskStoreMXBean.forceCompaction()).isFalse();
+    });
   }
 
   /**
    * Makes the disk compactable by adding and deleting some entries
-   * 
-   * @throws Exception
    */
-  @SuppressWarnings("serial")
-  public void makeDiskCompactable(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Make The Disk Compactable") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(REGION_NAME);
-        DiskRegion dr = ((LocalRegion) region).getDiskRegion();
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("putting key1");
-        region.put("key1", "value1");
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("putting key2");
-        region.put("key2", "value2");
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("removing key2");
-        region.remove("key2");
-        // now that it is compactable the following forceCompaction should
-        // go ahead and do a roll and compact it.
-      }
+  private void makeDiskCompactable(final VM memberVM) throws Exception {
+    memberVM.invoke("makeDiskCompactable", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      Region region = cache.getRegion(REGION_NAME);
+      region.put("key1", "value1");
+      region.put("key2", "value2");
+      region.remove("key2");
+      // now that it is compactable the following forceCompaction should
+      // go ahead and do a roll and compact it.
     });
-
   }
 
-
-
- 
-
   /**
    * Compacts all DiskStores belonging to a member
-   * 
-   * @param vm1
-   *          reference to VM
-   * @throws Exception
    */
-  @SuppressWarnings("serial")
-  public void compactAllDiskStores(VM vm1) throws Exception {
-
-    vm1.invoke(new SerializableCallable("Compact All Disk Stores") {
-
-      public Object call() throws Exception {
-        ManagementService service = getManagementService();
-        MemberMXBean memberBean = service.getMemberMXBean();
-        String[] compactedDiskStores = memberBean.compactAllDiskStores();
-
-        assertTrue(compactedDiskStores.length > 0);
-        for (int i = 0; i < compactedDiskStores.length; i++) {
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
-              "<ExpectedString> Compacted Store " + i + " "
-                  + compactedDiskStores[i] + "</ExpectedString> ");
-        }
-
-        return null;
-      }
+  private void compactAllDiskStores(final VM memberVM) throws Exception {
+    memberVM.invoke("compactAllDiskStores", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      MemberMXBean memberMXBean = service.getMemberMXBean();
+      String[] compactedDiskStores = memberMXBean.compactAllDiskStores();
+      assertThat(compactedDiskStores).hasSize(1);
     });
-
   }
 
   /**
    * Takes a back up of all the disk store in a given directory
    */
-  @SuppressWarnings("serial")
-  public void backupAllMembers(final VM managingVM) throws Exception {
-
-    managingVM.invoke(new SerializableCallable("Backup All Disk Stores") {
+  private void backupAllMembers(final VM managerVM, final int memberCount) {
+    managerVM.invoke("backupAllMembers", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
+      File backupDir = this.temporaryFolder.newFolder("backupDir");
 
-      public Object call() throws Exception {
-        ManagementService service = getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        DiskBackupStatus status = bean.backupAllMembers(getBackupDir("test_backupAllMembers")
-            .getAbsolutePath(), null);
+      DiskBackupStatus status = bean.backupAllMembers(backupDir.getAbsolutePath(), null);
 
-        return null;
-      }
+      assertThat(status.getBackedUpDiskStores().keySet().size()).isEqualTo(memberCount);
+      assertThat(status.getOfflineDiskStores()).isEqualTo(null); // TODO: fix GEODE-1946
     });
-
   }
 
   /**
-   * Compact a disk store from Managing node
+   * Compact a disk store from managerVM VM
    */
-  @SuppressWarnings("serial")
-  public void compactDiskStoresRemote(VM managingVM) throws Exception {
-    {
-
-      managingVM.invoke(new SerializableCallable("Compact All Disk Stores Remote") {
-
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-          Set<DistributedMember> otherMemberSet = cache
-              .getDistributionManager().getOtherNormalDistributionManagerIds();
-
-          for (DistributedMember member : otherMemberSet) {
-            MemberMXBean bean = MBeanUtil.getMemberMbeanProxy(member);
-            String[] allDisks = bean.listDiskStores(true);
-            assertNotNull(allDisks);
-            List<String> listString = Arrays.asList(allDisks);
-            org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
-                "<ExpectedString> Remote All Disk Stores Are  "
-                    + listString.toString() + "</ExpectedString> ");
-            String[] compactedDiskStores = bean.compactAllDiskStores();
-            assertTrue(compactedDiskStores.length > 0);
-            for (int i = 0; i < compactedDiskStores.length; i++) {
-              org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
-                  "<ExpectedString> Remote Compacted Store " + i + " "
-                      + compactedDiskStores[i] + "</ExpectedString> ");
-            }
-
-          }
-          return null;
-        }
-      });
+  private void compactDiskStoresRemote(final VM managerVM, final int memberCount) {
+    managerVM.invoke("compactDiskStoresRemote", () -> {
+      Set<DistributedMember> otherMemberSet = this.managementTestRule.getOtherNormalMembers();// ((GemFireCacheImpl)cache).getDistributionManager().getOtherNormalDistributionManagerIds();
+      assertThat(otherMemberSet.size()).isEqualTo(memberCount);
 
-    }
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
 
-  }
+      for (DistributedMember member : otherMemberSet) {
+        MemberMXBean memberMXBean = awaitMemberMXBeanProxy(member);
 
-  /**
-   * Checks if a file with the given extension is present
-   * 
-   * @param fileExtension
-   *          file extension
-   * @throws Exception
-   */
-  protected void checkIfContainsFileWithExt(String fileExtension)
-      throws Exception {
-    File[] files = diskDir.listFiles();
-    for (int j = 0; j < files.length; j++) {
-      if (files[j].getAbsolutePath().endsWith(fileExtension)) {
-        fail("file \"" + files[j].getAbsolutePath() + "\" still exists");
-      }
-    }
+        String[] allDisks = memberMXBean.listDiskStores(true);
+        assertThat(allDisks).isNotNull().hasSize(1);
 
-  }
-
-  /**
-   * Update Entry
-   * 
-   * @param vm1
-   *          reference to VM
-   */
-  protected void updateTheEntry(VM vm1) {
-    updateTheEntry(vm1, "C");
-  }
-
-  /**
-   * Update an Entry
-   * @param vm1
-   *          reference to VM
-   * @param value
-   *          Value which is updated
-   */
-  @SuppressWarnings("serial")
-  protected void updateTheEntry(VM vm1, final String value) {
-    vm1.invoke(new SerializableRunnable("change the entry") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(REGION_NAME);
-        region.put("A", value);
+        String[] compactedDiskStores = memberMXBean.compactAllDiskStores();
+        assertThat(compactedDiskStores).hasSize(1);
       }
     });
   }
 
-  /**
-   * Put an entry to region
-   * 
-   * @param vm0
-   *          reference to VM
-   */
-  @SuppressWarnings("serial")
-  protected void putAnEntry(VM vm0) {
-    vm0.invoke(new SerializableRunnable("Put an entry") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(REGION_NAME);
-        region.put("A", "B");
-      }
+  private void updateTheEntry(final VM memberVM, final String value) {
+    memberVM.invoke("updateTheEntry", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      Region region = cache.getRegion(REGION_NAME);
+      region.put("A", value);
     });
   }
 
-  /**
-   * Close the given region REGION_NAME
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  @SuppressWarnings("serial")
-  protected void closeRegion(final VM vm) {
-    SerializableRunnable closeRegion = new SerializableRunnable(
-        "Close persistent region") {
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(REGION_NAME);
-        region.close();
-      }
-    };
-    vm.invoke(closeRegion);
+  private void putAnEntry(final VM memberVM) {
+    memberVM.invoke("putAnEntry", () -> {
+      Cache cache = managementTestRule.getCache();
+      Region region = cache.getRegion(REGION_NAME);
+      region.put("A", "B");
+    });
   }
 
-  /**
-   * Waiting to blocked waiting for another persistent member to come online
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  @SuppressWarnings("serial")
-  private void waitForBlockedInitialization(VM vm) {
-    vm.invoke(new SerializableRunnable() {
-
-      public void run() {
-        Wait.waitForCriterion(new WaitCriterion() {
-
-          public String description() {
-            return "Waiting to blocked waiting for another persistent member to come online";
-          }
-
-          public boolean done() {
-            Cache cache = getCache();
-            GemFireCacheImpl cacheImpl = (GemFireCacheImpl) cache;
-            PersistentMemberManager mm = cacheImpl.getPersistentMemberManager();
-            Map<String, Set<PersistentMemberID>> regions = mm
-                .getWaitingRegions();
-            boolean done = !regions.isEmpty();
-            return done;
-          }
-
-        }, MAX_WAIT, 100, true);
-
-      }
-
+  private void closeRegion(final VM memberVM) {
+    memberVM.invoke("closeRegion", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      Region region = cache.getRegion(REGION_NAME);
+      region.close();
     });
   }
 
-  /**
-   * Creates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
-   * @throws Throwable
-   */
-  protected void createPersistentRegion(VM vm) throws Throwable {
-    AsyncInvocation future = createPersistentRegionAsync(vm);
-    future.join(MAX_WAIT);
-    if (future.isAlive()) {
-      fail("Region not created within" + MAX_WAIT);
-    }
-    if (future.exceptionOccurred()) {
-      throw new RuntimeException(future.getException());
-    }
+  private void createPersistentRegion(final VM memberVM) throws InterruptedException, ExecutionException, TimeoutException {
+    await(createPersistentRegionAsync(memberVM));
   }
 
-  /**
-   * Creates a persistent region in async manner
-   * 
-   * @param vm
-   *          reference to VM
-   * @return reference to AsyncInvocation
-   */
-  @SuppressWarnings("serial")
-  protected AsyncInvocation createPersistentRegionAsync(final VM vm) {
-    SerializableRunnable createRegion = new SerializableRunnable(
-        "Create persistent region") {
-      public void run() {
-        Cache cache = getCache();
-        DiskStoreFactory dsf = cache.createDiskStoreFactory();
-        File dir = getDiskDirForVM(vm);
-        dir.mkdirs();
-        dsf.setDiskDirs(new File[] { dir });
-        dsf.setMaxOplogSize(1);
-        dsf.setAllowForceCompaction(true);
-        dsf.setAutoCompact(false);
-        DiskStore ds = dsf.create(REGION_NAME);
-        RegionFactory rf = cache.createRegionFactory();
-        rf.setDiskStoreName(ds.getName());
-        rf.setDiskSynchronous(true);
-        rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
-        rf.setScope(Scope.DISTRIBUTED_ACK);
-        rf.create(REGION_NAME);
-      }
-    };
-    return vm.invokeAsync(createRegion);
+  private AsyncInvocation createPersistentRegionAsync(final VM memberVM) {
+    return memberVM.invokeAsync("createPersistentRegionAsync", () -> {
+      File dir = new File(diskDir, String.valueOf(ProcessUtils.identifyPid()));
+
+      Cache cache = this.managementTestRule.getCache();
+
+      DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+      diskStoreFactory.setDiskDirs(new File[] { dir });
+      diskStoreFactory.setMaxOplogSize(1);
+      diskStoreFactory.setAllowForceCompaction(true);
+      diskStoreFactory.setAutoCompact(false);
+      DiskStore diskStore = diskStoreFactory.create(REGION_NAME);
+
+      RegionFactory regionFactory = cache.createRegionFactory();
+      regionFactory.setDiskStoreName(diskStore.getName());
+      regionFactory.setDiskSynchronous(true);
+      regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+      regionFactory.setScope(Scope.DISTRIBUTED_ACK);
+      regionFactory.create(REGION_NAME);
+    });
   }
 
-  /**
-   * Validates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  @SuppressWarnings("serial")
-  protected void validatePersistentRegion(final VM vm) {
-    SerializableRunnable validateDisk = new SerializableRunnable(
-        "Validate persistent region") {
-      public void run() {
-        Cache cache = getCache();
-        ManagementService service = getManagementService();
-        DiskStoreMXBean bean = service.getLocalDiskStoreMBean(REGION_NAME);
-        assertNotNull(bean);
+  private void verifyRecoveryStats(final VM memberVM, final boolean localRecovery) {
+    memberVM.invoke("verifyRecoveryStats", () -> {
+      Cache cache = this.managementTestRule.getCache();
+      Region region = cache.getRegion(REGION_NAME);
+      DistributedRegion distributedRegion = (DistributedRegion) region;
+      DiskRegionStats stats = distributedRegion.getDiskRegion().getStats();
+
+      if (localRecovery) {
+        assertThat(stats.getLocalInitializations()).isEqualTo(1);
+        assertThat(stats.getRemoteInitializations()).isEqualTo(0);
+      } else {
+        assertThat(stats.getLocalInitializations()).isEqualTo(0);
+        assertThat(stats.getRemoteInitializations()).isEqualTo(1);
       }
-    };
-    vm.invoke(validateDisk);
+    });
   }
 
-  /**
-   * Appends vm id to disk dir
-   * 
-   * @param vm
-   *          reference to VM
-   * @return
-   */
-  protected File getDiskDirForVM(final VM vm) {
-    File dir = new File(diskDir, String.valueOf(vm.getPid()));
-    return dir;
+  private MemberMXBean awaitMemberMXBeanProxy(final DistributedMember member) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    ObjectName objectName = service.getMemberMBeanName(member);
+    await().until(() -> assertThat(service.getMBeanProxy(objectName, MemberMXBean.class)).isNotNull());
+    return service.getMBeanProxy(objectName, MemberMXBean.class);
   }
 
-  /**
-   * Checks recovery status
-   * 
-   * @param vm
-   *          reference to VM
-   * @param localRecovery
-   *          local recovery on or not
-   */
-  @SuppressWarnings("serial")
-  private void checkForRecoveryStat(VM vm, final boolean localRecovery) {
-    vm.invoke(new SerializableRunnable("check disk region stat") {
-
-      public void run() {
-        Cache cache = getCache();
-        Region region = cache.getRegion(REGION_NAME);
-        DistributedRegion distributedRegion = (DistributedRegion) region;
-        DiskRegionStats stats = distributedRegion.getDiskRegion().getStats();
-        if (localRecovery) {
-          assertEquals(1, stats.getLocalInitializations());
-          assertEquals(0, stats.getRemoteInitializations());
-        } else {
-          assertEquals(0, stats.getLocalInitializations());
-          assertEquals(1, stats.getRemoteInitializations());
-        }
-
-      }
-    });
+  private void await(final AsyncInvocation createPersistentRegionAsync) throws InterruptedException, ExecutionException, TimeoutException {
+    createPersistentRegionAsync.await(2, MINUTES);
   }
 
-  /**
-   * 
-   * @return back up directory
-   */
-  protected static File getBackupDir(String name) throws Exception {
-    File backUpDir = new File("BackupDir-" + name).getAbsoluteFile();
-    org.apache.geode.internal.FileUtil.delete(backUpDir);
-    backUpDir.mkdir();
-    backUpDir.deleteOnExit();
-    return backUpDir;
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/DistributedSystemDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/DistributedSystemDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/DistributedSystemDUnitTest.java
index b343dad..743fcaa 100644
--- a/geode-core/src/test/java/org/apache/geode/management/DistributedSystemDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/DistributedSystemDUnitTest.java
@@ -16,23 +16,19 @@
  */
 package org.apache.geode.management;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.geode.test.dunit.Host.*;
+import static org.apache.geode.test.dunit.Invoke.*;
+import static org.assertj.core.api.Assertions.*;
 
+import java.io.Serializable;
+import java.lang.management.ManagementFactory;
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import javax.management.InstanceNotFoundException;
 import javax.management.ListenerNotFoundException;
 import javax.management.MBeanServer;
 import javax.management.Notification;
@@ -41,11 +37,16 @@ import javax.management.NotificationFilter;
 import javax.management.NotificationListener;
 import javax.management.ObjectName;
 
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
 import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.Cache;
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.admin.Alert;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.logging.LogService;
@@ -59,843 +60,482 @@ import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.management.internal.beans.MemberMBean;
 import org.apache.geode.management.internal.beans.SequenceNumber;
 import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.SerializableCallable;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
- * Distributed System tests
- * 
- * a) For all the notifications 
- * 
- *  i) gemfire.distributedsystem.member.joined
- * 
- *  ii) gemfire.distributedsystem.member.left
- * 
- *  iii) gemfire.distributedsystem.member.suspect
- * 
- *  iv ) All notifications emitted by member mbeans
- * 
- *  vi) Alerts
- * 
+ * Distributed System management tests
+ * <p>
+ * a) For all the notifications
+ * i) gemfire.distributedsystem.member.joined
+ * ii) gemfire.distributedsystem.member.left
+ * iii) gemfire.distributedsystem.member.suspect
+ * iv ) All notifications emitted by member mbeans
+ * vi) Alerts
+ * <p>
  * b) Concurrently modify proxy list by removing member and accessing the
  * distributed system MBean
- * 
+ * <p>
  * c) Aggregate Operations like shutDownAll
- * 
+ * <p>
  * d) Member level operations like fetchJVMMetrics()
- * 
+ * <p>
  * e ) Statistics
- * 
- * 
- * 
  */
 @Category(DistributedTest.class)
-public class DistributedSystemDUnitTest extends ManagementTestBase {
+@SuppressWarnings({ "serial", "unused" })
+public class DistributedSystemDUnitTest implements Serializable {
 
   private static final Logger logger = LogService.getLogger();
-  
-  private static final long serialVersionUID = 1L;
-
- 
-  private static final int MAX_WAIT = 10 * 1000;
-  
-  private static MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
-  
-  static List<Notification> notifList = new ArrayList<>();
-  
-  static Map<ObjectName , NotificationListener> notificationListenerMap = new HashMap<ObjectName , NotificationListener>();
-  
-  static final String WARNING_LEVEL_MESSAGE = "Warninglevel Alert Message";
-  
-  static final String SEVERE_LEVEL_MESSAGE =  "Severelevel Alert Message";
-
-  
-  public DistributedSystemDUnitTest() {
-    super();
+
+  private static final String WARNING_LEVEL_MESSAGE = "Warning Level Alert Message";
+  private static final String SEVERE_LEVEL_MESSAGE = "Severe Level Alert Message";
+
+  private static List<Notification> notifications;
+  private static Map<ObjectName, NotificationListener> notificationListenerMap;
+
+  @Manager
+  private VM managerVM;
+
+  @Member
+  private VM[] memberVMs;
+
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().build();
+
+  @Before
+  public void before() throws Exception {
+    notifications = new ArrayList<>();
+    notificationListenerMap = new HashMap<>();
+
+    invokeInEveryVM(() -> notifications = new ArrayList<>());
+    invokeInEveryVM(() -> notificationListenerMap = new HashMap<>());
+  }
+
+  @After
+  public void after() throws Exception {
+    resetAlertCounts(this.managerVM);
   }
 
   /**
    * Tests each and every operations that is defined on the MemberMXBean
-   * 
-   * @throws Exception
    */
   @Test
   public void testDistributedSystemAggregate() throws Exception {
-    VM managingNode = getManagingNode();
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    addNotificationListener(managingNode);
+    this.managementTestRule.createManager(this.managerVM);
+    addNotificationListener(this.managerVM);
 
-    for (VM vm : getManagedNodeList()) {
-      createCache(vm);
+    for (VM memberVM : this.memberVMs) {
+      this.managementTestRule.createMember(memberVM);
     }
-    
-    checkAggregate(managingNode);
-    for (VM vm : getManagedNodeList()) {
-      closeCache(vm);
-    }
-
-    closeCache(managingNode);
 
+    verifyDistributedSystemMXBean(this.managerVM);
   }
-   
+
   /**
    * Tests each and every operations that is defined on the MemberMXBean
-   * 
-   * @throws Exception
    */
   @Test
   public void testAlertManagedNodeFirst() throws Exception {
-
-    for (VM vm : getManagedNodeList()) {
-      createCache(vm);
-      warnLevelAlert(vm);
-      severeLevelAlert(vm);
+    for (VM memberVM : this.memberVMs) {
+      this.managementTestRule.createMember(memberVM);
+      generateWarningAlert(memberVM);
+      generateSevereAlert(memberVM);
     }
 
-    VM managingNode = getManagingNode();
+    this.managementTestRule.createManager(this.managerVM);
+    addAlertListener(this.managerVM);
+    verifyAlertCount(this.managerVM, 0, 0);
 
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    addAlertListener(managingNode);
-    checkAlertCount(managingNode, 0, 0);
-
-    final DistributedMember managingMember = getMember(managingNode);
+    DistributedMember managerDistributedMember = this.managementTestRule.getDistributedMember(this.managerVM);
 
     // Before we start we need to ensure that the initial (implicit) SEVERE alert has propagated everywhere.
-    for (VM vm : getManagedNodeList()) {
-      ensureLoggerState(vm, managingMember, Alert.SEVERE);
+    for (VM memberVM : this.memberVMs) {
+      verifyAlertAppender(memberVM, managerDistributedMember, Alert.SEVERE);
     }
 
-    setAlertLevel(managingNode, AlertDetails.getAlertLevelAsString(Alert.WARNING));
+    setAlertLevel(this.managerVM, AlertDetails.getAlertLevelAsString(Alert.WARNING));
 
-    for (VM vm : getManagedNodeList()) {
-      ensureLoggerState(vm, managingMember, Alert.WARNING);
-      warnLevelAlert(vm);
-      severeLevelAlert(vm);
+    for (VM memberVM : this.memberVMs) {
+      verifyAlertAppender(memberVM, managerDistributedMember, Alert.WARNING);
+      generateWarningAlert(memberVM);
+      generateSevereAlert(memberVM);
     }
 
-    checkAlertCount(managingNode, 3, 3);
-    resetAlertCounts(managingNode);
-
-    setAlertLevel(managingNode, AlertDetails.getAlertLevelAsString(Alert.SEVERE));
+    verifyAlertCount(this.managerVM, 3, 3);
+    resetAlertCounts(this.managerVM);
 
-    for (VM vm : getManagedNodeList()) {
-      ensureLoggerState(vm, managingMember, Alert.SEVERE);
-      warnLevelAlert(vm);
-      severeLevelAlert(vm);
-    }
+    setAlertLevel(this.managerVM, AlertDetails.getAlertLevelAsString(Alert.SEVERE));
 
-    checkAlertCount(managingNode, 3, 0);
-    resetAlertCounts(managingNode);
-    
-    for (VM vm : getManagedNodeList()) {
-      closeCache(vm);
+    for (VM memberVM : this.memberVMs) {
+      verifyAlertAppender(memberVM, managerDistributedMember, Alert.SEVERE);
+      generateWarningAlert(memberVM);
+      generateSevereAlert(memberVM);
     }
 
-    closeCache(managingNode);
+    verifyAlertCount(this.managerVM, 3, 0);
   }
-  
-  @SuppressWarnings("serial")
-  public void ensureLoggerState(VM vm1, final DistributedMember member,
-      final int alertLevel) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Ensure Logger State") {
-
-        public Object call() throws Exception {
-          
-          Wait.waitForCriterion(new WaitCriterion() {
-            public String description() {
-              return "Waiting for all alert Listener to register with managed node";
-            }
-
-            public boolean done() {
-
-              if (AlertAppender.getInstance().hasAlertListener(member, alertLevel)) {
-                return true;
-              }
-              return false;
-            }
-
-          }, MAX_WAIT, 500, true);
-
-          return null;
-        }
-      });
 
-    }
-  }
-  
   /**
    * Tests each and every operations that is defined on the MemberMXBean
-   * 
-   * @throws Exception
    */
   @Test
   public void testShutdownAll() throws Exception {
-    final Host host = Host.getHost(0);
-    VM managedNode1 = host.getVM(0);
-    VM managedNode2 = host.getVM(1);
-    VM managedNode3 = host.getVM(2);
-
-    VM managingNode = host.getVM(3);
-
-    // Managing Node is created first
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    
-    createCache(managedNode1);
-    createCache(managedNode2);
-    createCache(managedNode3);
-    shutDownAll(managingNode);
-    closeCache(managingNode);
+    VM memberVM1 = getHost(0).getVM(0);
+    VM memberVM2 = getHost(0).getVM(1);
+    VM memberVM3 = getHost(0).getVM(2);
+
+    VM managerVM = getHost(0).getVM(3);
+
+    // managerVM Node is created first
+    this.managementTestRule.createManager(managerVM);
+
+    this.managementTestRule.createMember(memberVM1);
+    this.managementTestRule.createMember(memberVM2);
+    this.managementTestRule.createMember(memberVM3);
+
+    shutDownAll(managerVM);
   }
-  
+
   @Test
-  public void testNavigationAPIS() throws Exception{
-    
-    final Host host = Host.getHost(0); 
-    
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    
-    for(VM vm : managedNodeList){
-      createCache(vm);
+  public void testNavigationAPIS() throws Exception {
+    this.managementTestRule.createManager(this.managerVM);
+
+    for (VM memberVM : this.memberVMs) {
+      this.managementTestRule.createMember(memberVM);
     }
-    
-    checkNavigationAPIs(managingNode);    
+
+    verifyFetchMemberObjectName(this.managerVM, this.memberVMs.length + 1);
   }
-  
+
   @Test
   public void testNotificationHub() throws Exception {
-    this.initManagement(false);
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
 
     class NotificationHubTestListener implements NotificationListener {
+
       @Override
       public synchronized void handleNotification(Notification notification, Object handback) {
         logger.info("Notification received {}", notification);
-        notifList.add(notification);
+        notifications.add(notification);
       }
     }
 
-    managingNode
-        .invoke(new SerializableRunnable("Add Listener to MemberMXBean") {
-
-          public void run() {
-            Cache cache = getCache();
-            ManagementService service = getManagementService();
-            final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-            
-            Wait.waitForCriterion(new WaitCriterion() {
-              public String description() {
-                return "Waiting for all members to send their initial Data";
-              }
-
-              public boolean done() {
-                if (bean.listMemberObjectNames().length == 5) {// including locator 
-                  return true;
-                } else {
-                  return false;
-                }
-              }
-            }, MAX_WAIT, 500, true);
-            for (ObjectName objectName : bean.listMemberObjectNames()) {
-              NotificationHubTestListener listener = new NotificationHubTestListener();
-              try {
-                mbeanServer.addNotificationListener(objectName, listener, null,
-                    null);
-                notificationListenerMap.put(objectName, listener);
-              } catch (InstanceNotFoundException e) {
-                LogWriterUtils.getLogWriter().error(e);
-              }
-            }
-          }
-        });
+    this.managerVM.invoke("addListenerToMemberMXBean", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      final DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
 
-    // Check in all VMS
+      await().until(() -> assertThat(distributedSystemMXBean.listMemberObjectNames()).hasSize(5));
 
-    for (VM vm : managedNodeList) {
-      vm.invoke(new SerializableRunnable("Check Hub Listener num count") {
-
-        public void run() {
-          Cache cache = getCache();
-          SystemManagementService service = (SystemManagementService) getManagementService();
-          NotificationHub hub = service.getNotificationHub();
-          Map<ObjectName, NotificationHubListener> listenerObjectMap = hub
-              .getListenerObjectMap();
-          assertEquals(1, listenerObjectMap.keySet().size());
-          ObjectName memberMBeanName = MBeanJMXAdapter.getMemberMBeanName(cache
-              .getDistributedSystem().getDistributedMember());
+      for (ObjectName objectName : distributedSystemMXBean.listMemberObjectNames()) {
+        NotificationHubTestListener listener = new NotificationHubTestListener();
+        ManagementFactory.getPlatformMBeanServer().addNotificationListener(objectName, listener, null, null);
+        notificationListenerMap.put(objectName, listener);
+      }
+    });
 
-          NotificationHubListener listener = listenerObjectMap
-              .get(memberMBeanName);
+    // Check in all VMS
 
-          /*
-           * Counter of listener should be 2 . One for default Listener which is
-           * added for each member mbean by distributed system mbean One for the
-           * added listener in test
-           */
-          assertEquals(2, listener.getNumCounter());
+    for (VM memberVM : this.memberVMs) {
+      memberVM.invoke("checkNotificationHubListenerCount", () -> {
+        SystemManagementService service = this.managementTestRule.getSystemManagementService();
+        NotificationHub notificationHub = service.getNotificationHub();
+        Map<ObjectName, NotificationHubListener> listenerMap = notificationHub.getListenerObjectMap();
+        assertThat(listenerMap.keySet()).hasSize(1);
 
-          // Raise some notifications
+        ObjectName memberMBeanName = MBeanJMXAdapter.getMemberMBeanName(this.managementTestRule.getDistributedMember());
+        NotificationHubListener listener = listenerMap.get(memberMBeanName);
 
-          NotificationBroadcasterSupport memberLevelNotifEmitter = (MemberMBean) service
-              .getMemberMXBean();
+        /*
+         * Counter of listener should be 2 . One for default Listener which is
+         * added for each member mbean by distributed system mbean One for the
+         * added listener in test
+         */
+        assertThat(listener.getNumCounter()).isEqualTo(2);
 
-          String memberSource = MBeanJMXAdapter.getMemberNameOrId(cache
-              .getDistributedSystem().getDistributedMember());
+        // Raise some notifications
 
-          // Only a dummy notification , no actual region is creates
-          Notification notification = new Notification(
-              JMXNotificationType.REGION_CREATED, memberSource, SequenceNumber
-                  .next(), System.currentTimeMillis(),
-                  ManagementConstants.REGION_CREATED_PREFIX + "/test");
-          memberLevelNotifEmitter.sendNotification(notification);
+        NotificationBroadcasterSupport notifier = (MemberMBean) service.getMemberMXBean();
+        String memberSource = MBeanJMXAdapter.getMemberNameOrId(this.managementTestRule.getDistributedMember());
 
-        }
+        // Only a dummy notification , no actual region is created
+        Notification notification = new Notification(JMXNotificationType.REGION_CREATED, memberSource, SequenceNumber.next(), System.currentTimeMillis(), ManagementConstants.REGION_CREATED_PREFIX + "/test");
+        notifier.sendNotification(notification);
       });
     }
 
-    managingNode.invoke(new SerializableRunnable(
-        "Check notifications && Remove Listeners") {
-
-      public void run() {
-
-        Wait.waitForCriterion(new WaitCriterion() {
-          public String description() {
-            return "Waiting for all Notifications to reach the Managing Node";
-          }
-
-          public boolean done() {
-            if (notifList.size() == 3) {
-              return true;
-            } else {
-              return false;
-            }
-          }
-        }, MAX_WAIT, 500, true);
-
-        notifList.clear();
-
-        Iterator<ObjectName> it = notificationListenerMap.keySet().iterator();
-        while (it.hasNext()) {
-          ObjectName objectName = it.next();
-          NotificationListener listener = notificationListenerMap
-              .get(objectName);
-          try {
-            mbeanServer.removeNotificationListener(objectName, listener);
-          } catch (ListenerNotFoundException e) {
-            LogWriterUtils.getLogWriter().error(e);
-          } catch (InstanceNotFoundException e) {
-            LogWriterUtils.getLogWriter().error(e);
-          }
-        }
+    this.managerVM.invoke("checkNotificationsAndRemoveListeners", () -> {
+      await().until(() -> assertThat(notifications).hasSize(3));
 
+      notifications.clear();
+
+      for (ObjectName objectName : notificationListenerMap.keySet()) {
+        NotificationListener listener = notificationListenerMap.get(objectName);
+        ManagementFactory.getPlatformMBeanServer().removeNotificationListener(objectName, listener);
       }
     });
 
     // Check in all VMS again
 
-    for (VM vm : managedNodeList) {
-      vm.invoke(new SerializableRunnable("Check Hub Listener num count Again") {
-
-        public void run() {
-          Cache cache = getCache();
-          SystemManagementService service = (SystemManagementService) getManagementService();
-          NotificationHub hub = service.getNotificationHub();
-          Map<ObjectName, NotificationHubListener> listenerObjectMap = hub
-              .getListenerObjectMap();
-
-          assertEquals(1, listenerObjectMap.keySet().size());
-
-          ObjectName memberMBeanName = MBeanJMXAdapter.getMemberMBeanName(cache
-              .getDistributedSystem().getDistributedMember());
-
-          NotificationHubListener listener = listenerObjectMap
-              .get(memberMBeanName);
-
-          /*
-           * Counter of listener should be 1 for the default Listener which is
-           * added for each member mbean by distributed system mbean.
-           */
-          assertEquals(1, listener.getNumCounter());
-
-        }
+    for (VM memberVM : this.memberVMs) {
+      memberVM.invoke("checkNotificationHubListenerCountAgain", () -> {
+        SystemManagementService service = this.managementTestRule.getSystemManagementService();
+        NotificationHub hub = service.getNotificationHub();
+        Map<ObjectName, NotificationHubListener> listenerObjectMap = hub.getListenerObjectMap();
+        assertThat(listenerObjectMap.keySet().size()).isEqualTo(1);
+
+        ObjectName memberMBeanName = MBeanJMXAdapter.getMemberMBeanName(this.managementTestRule.getDistributedMember());
+        NotificationHubListener listener = listenerObjectMap.get(memberMBeanName);
+
+        /*
+         * Counter of listener should be 1 for the default Listener which is
+         * added for each member mbean by distributed system mbean.
+         */
+        assertThat(listener.getNumCounter()).isEqualTo(1);
       });
     }
 
-    managingNode
-    .invoke(new SerializableRunnable("Remove Listener from MemberMXBean") {
-
-      public void run() {
-        Cache cache = getCache();
-        ManagementService service = getManagementService();
-        final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        
-        Wait.waitForCriterion(new WaitCriterion() {
-          public String description() {
-            return "Waiting for all members to send their initial Data";
-          }
-
-          public boolean done() {
-            if (bean.listMemberObjectNames().length == 5) {// including locator 
-              return true;
-            } else {
-              return false;
-            }
-
-          }
-
-        }, MAX_WAIT, 500, true);
-        for (ObjectName objectName : bean.listMemberObjectNames()) {
-          NotificationHubTestListener listener = new NotificationHubTestListener();
-          try {
-            mbeanServer.removeNotificationListener(objectName, listener);
-          } catch (InstanceNotFoundException e) {
-            LogWriterUtils.getLogWriter().error(e);
-          } catch (ListenerNotFoundException e) {
-            // TODO: apparently there is never a notification listener on any these mbeans at this point 
-            // fix this test so it doesn't hit these unexpected exceptions -- getLogWriter().error(e);
-          }
+    this.managerVM.invoke("removeListenerFromMemberMXBean", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+
+      await().until(() -> assertThat(distributedSystemMXBean.listMemberObjectNames()).hasSize(5));
+
+      for (ObjectName objectName : distributedSystemMXBean.listMemberObjectNames()) {
+        NotificationHubTestListener listener = new NotificationHubTestListener();
+        try {
+          ManagementFactory.getPlatformMBeanServer().removeNotificationListener(objectName, listener); // because new instance!!
+        } catch (ListenerNotFoundException e) {
+          // TODO: [old] apparently there is never a notification listener on any these mbeans at this point [fix this]
+          // fix this test so it doesn't hit these unexpected exceptions -- getLogWriter().error(e);
         }
       }
     });
-    
-    for (VM vm : managedNodeList) {
-      vm.invoke(new SerializableRunnable("Check Hub Listeners clean up") {
-
-        public void run() {
-          Cache cache = getCache();
-          SystemManagementService service = (SystemManagementService) getManagementService();
-          NotificationHub hub = service.getNotificationHub();
-          hub.cleanUpListeners();
-          assertEquals(0, hub.getListenerObjectMap().size());
-
-          Iterator<ObjectName> it = notificationListenerMap.keySet().iterator();
-          while (it.hasNext()) {
-            ObjectName objectName = it.next();
-            NotificationListener listener = notificationListenerMap
-                .get(objectName);
-            try {
-              mbeanServer.removeNotificationListener(objectName, listener);
-              fail("Found Listeners inspite of clearing them");
-            } catch (ListenerNotFoundException e) {
-              // Expected Exception Do nothing
-            } catch (InstanceNotFoundException e) {
-              LogWriterUtils.getLogWriter().error(e);
-            }
-          }
+
+    for (VM memberVM : this.memberVMs) {
+      memberVM.invoke("verifyNotificationHubListenersWereRemoved", () -> {
+        SystemManagementService service = this.managementTestRule.getSystemManagementService();
+        NotificationHub notificationHub = service.getNotificationHub();
+        notificationHub.cleanUpListeners();
+        assertThat(notificationHub.getListenerObjectMap()).isEmpty();
+
+        for (ObjectName objectName : notificationListenerMap.keySet()) {
+          NotificationListener listener = notificationListenerMap.get(objectName);
+          assertThatThrownBy(() -> ManagementFactory.getPlatformMBeanServer().removeNotificationListener(objectName, listener)).isExactlyInstanceOf(ListenerNotFoundException.class);
         }
       });
     }
   }
-  
+
   /**
    * Tests each and every operations that is defined on the MemberMXBean
-   * 
-   * @throws Exception
    */
   @Test
   public void testAlert() throws Exception {
-    VM managingNode = getManagingNode();
-   
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    addAlertListener(managingNode);
-    resetAlertCounts(managingNode);
-    
-    final DistributedMember managingMember = getMember(managingNode);
-    
-    
-    
-    warnLevelAlert(managingNode);
-    severeLevelAlert(managingNode);
-    checkAlertCount(managingNode, 1, 0);
-    resetAlertCounts(managingNode);
-    
-    for (VM vm : getManagedNodeList()) {
-      
-      createCache(vm);
-      // Default is severe ,So only Severe level alert is expected
-      
-      ensureLoggerState(vm, managingMember, Alert.SEVERE);
-      
-      warnLevelAlert(vm);
-      severeLevelAlert(vm);
-      
-    }
-    checkAlertCount(managingNode, 3, 0);
-    resetAlertCounts(managingNode);
-    setAlertLevel(managingNode, AlertDetails.getAlertLevelAsString(Alert.WARNING));
-
-    
-    for (VM vm : getManagedNodeList()) {
-      // warning and severe alerts both are to be checked
-      ensureLoggerState(vm, managingMember, Alert.WARNING);
-      warnLevelAlert(vm);
-      severeLevelAlert(vm);
-    }
+    this.managementTestRule.createManager(this.managerVM);
+    addAlertListener(this.managerVM);
+    resetAlertCounts(this.managerVM);
+
+    DistributedMember managerDistributedMember = this.managementTestRule.getDistributedMember(this.managerVM);
+
+    generateWarningAlert(this.managerVM);
+    generateSevereAlert(this.managerVM);
+    verifyAlertCount(this.managerVM, 1, 0);
+    resetAlertCounts(this.managerVM);
+
+    for (VM memberVM : this.memberVMs) {
+      this.managementTestRule.createMember(memberVM);
 
-    checkAlertCount(managingNode, 3, 3);
-    
-    resetAlertCounts(managingNode);
-    
-    setAlertLevel(managingNode, AlertDetails.getAlertLevelAsString(Alert.OFF));
-    
-    for (VM vm : getManagedNodeList()) {
-      ensureLoggerState(vm, managingMember, Alert.OFF);
-      warnLevelAlert(vm);
-      severeLevelAlert(vm);
+      verifyAlertAppender(memberVM, managerDistributedMember, Alert.SEVERE);
+
+      generateWarningAlert(memberVM);
+      generateSevereAlert(memberVM);
     }
-    checkAlertCount(managingNode, 0, 0);
-    resetAlertCounts(managingNode);
-    
-    for (VM vm : getManagedNodeList()) {
-      closeCache(vm);
+
+    verifyAlertCount(this.managerVM, 3, 0);
+    resetAlertCounts(this.managerVM);
+    setAlertLevel(this.managerVM, AlertDetails.getAlertLevelAsString(Alert.WARNING));
+
+    for (VM memberVM : this.memberVMs) {
+      verifyAlertAppender(memberVM, managerDistributedMember, Alert.WARNING);
+      generateWarningAlert(memberVM);
+      generateSevereAlert(memberVM);
     }
 
-    closeCache(managingNode);
+    verifyAlertCount(this.managerVM, 3, 3);
 
-  }
-  
-  @SuppressWarnings("serial")
-  public void checkAlertCount(VM vm1, final int expectedSevereAlertCount,
-      final int expectedWarningAlertCount) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Check Alert Count") {
-
-        public Object call() throws Exception {
-          final AlertNotifListener nt = AlertNotifListener.getInstance();
-          Wait.waitForCriterion(new WaitCriterion() {
-            public String description() {
-              return "Waiting for all alerts to reach the Managing Node";
-            }
-            public boolean done() {
-              if (expectedSevereAlertCount == nt.getseverAlertCount()
-                  && expectedWarningAlertCount == nt.getWarnigAlertCount()) {
-                return true;
-              } else {
-                return false;
-              }
-
-            }
-
-          }, MAX_WAIT, 500, true);
-
-          return null;
-        }
-      });
+    resetAlertCounts(this.managerVM);
+
+    setAlertLevel(this.managerVM, AlertDetails.getAlertLevelAsString(Alert.OFF));
 
+    for (VM memberVM : this.memberVMs) {
+      verifyAlertAppender(memberVM, managerDistributedMember, Alert.OFF);
+      generateWarningAlert(memberVM);
+      generateSevereAlert(memberVM);
     }
+
+    verifyAlertCount(this.managerVM, 0, 0);
   }
-  
 
+  private void verifyAlertAppender(final VM memberVM, final DistributedMember member, final int alertLevel) {
+    memberVM.invoke("verifyAlertAppender", () -> await().until(() -> assertThat(AlertAppender.getInstance().hasAlertListener(member, alertLevel)).isTrue()));
+  }
 
-  
-  @SuppressWarnings("serial")
-  public void setAlertLevel(VM vm1, final String alertLevel) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Set Alert level") {
+  private void verifyAlertCount(final VM managerVM, final int expectedSevereAlertCount, final int expectedWarningAlertCount) {
+    managerVM.invoke("verifyAlertCount", () -> {
+      AlertNotificationListener listener = AlertNotificationListener.getInstance();
 
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-          ManagementService service = getManagementService();
-          DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-          assertNotNull(bean);
-          bean.changeAlertLevel(alertLevel);
+      await().until(() -> assertThat(listener.getSevereAlertCount()).isEqualTo(expectedSevereAlertCount));
+      await().until(() -> assertThat(listener.getWarningAlertCount()).isEqualTo(expectedWarningAlertCount));
+    });
+  }
 
-          return null;
-        }
-      });
+  private void setAlertLevel(final VM managerVM, final String alertLevel) {
+    managerVM.invoke("setAlertLevel", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+      distributedSystemMXBean.changeAlertLevel(alertLevel);
+    });
+  }
 
-    }
+  private void generateWarningAlert(final VM anyVM) {
+    anyVM.invoke("generateWarningAlert", () -> {
+      IgnoredException ignoredException = IgnoredException.addIgnoredException(WARNING_LEVEL_MESSAGE);
+      logger.warn(WARNING_LEVEL_MESSAGE);
+      ignoredException.remove();
+    });
   }
-  
-  @SuppressWarnings("serial")
-  public void warnLevelAlert(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Warning level Alerts") {
-
-        public Object call() throws Exception {
-          final IgnoredException warnEx = IgnoredException.addIgnoredException(WARNING_LEVEL_MESSAGE);
-          logger.warn(WARNING_LEVEL_MESSAGE);
-          warnEx.remove();
-          return null;
-        }
-      });
 
-    }
+  private void resetAlertCounts(final VM managerVM) {
+    managerVM.invoke("resetAlertCounts", () -> {
+      AlertNotificationListener listener = AlertNotificationListener.getInstance();
+      listener.resetCount();
+    });
   }
-  
-  
-  @SuppressWarnings("serial")
-  public void resetAlertCounts(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Reset Alert Count") {
-
-        public Object call() throws Exception {
-          AlertNotifListener nt =  AlertNotifListener.getInstance();
-          nt.resetCount();
-          return null;
-        }
-      });
 
-    }
+  private void generateSevereAlert(final VM anyVM) {
+    anyVM.invoke("generateSevereAlert", () -> {
+      IgnoredException ignoredException = IgnoredException.addIgnoredException(SEVERE_LEVEL_MESSAGE);
+      logger.fatal(SEVERE_LEVEL_MESSAGE);
+      ignoredException.remove();
+    });
   }
 
-  @SuppressWarnings("serial")
-  public void severeLevelAlert(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Severe Level Alert") {
-
-        public Object call() throws Exception {
-          // add expected exception strings         
-          
-          final IgnoredException severeEx = IgnoredException.addIgnoredException(SEVERE_LEVEL_MESSAGE);
-          logger.fatal(SEVERE_LEVEL_MESSAGE);
-          severeEx.remove();
-          return null;
-        }
-      });
+  private void addAlertListener(final VM managerVM) {
+    managerVM.invoke("addAlertListener", () -> {
+      AlertNotificationListener listener = AlertNotificationListener.getInstance();
+      listener.resetCount();
 
-    }
-  }
-  
-  @SuppressWarnings("serial")
-  public void addAlertListener(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Add Alert Listener") {
-
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-          ManagementService service = getManagementService();
-          DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-          AlertNotifListener nt =  AlertNotifListener.getInstance();
-          nt.resetCount();
-          
-          NotificationFilter notificationFilter = new NotificationFilter() {
-            @Override
-            public boolean isNotificationEnabled(Notification notification) {
-              return notification.getType().equals(JMXNotificationType.SYSTEM_ALERT);
-            }
-
-          };
-          
-          mbeanServer.addNotificationListener(MBeanJMXAdapter
-              .getDistributedSystemName(), nt, notificationFilter, null);
-
-          return null;
-        }
-      });
+      NotificationFilter notificationFilter = (Notification notification) -> notification.getType().equals(JMXNotificationType.SYSTEM_ALERT);
 
-    }
+      ManagementFactory.getPlatformMBeanServer().addNotificationListener(MBeanJMXAdapter.getDistributedSystemName(), listener, notificationFilter, null);
+    });
   }
-  
+
   /**
    * Check aggregate related functions and attributes
-   * @param vm1
-   * @throws Exception
    */
-  @SuppressWarnings("serial")
-  public void checkAggregate(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Chech Aggregate Attributes") {
-
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-
-          ManagementService service = getManagementService();
-
-          final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-          assertNotNull(service.getDistributedSystemMXBean());
-          
-          Wait.waitForCriterion(new WaitCriterion() {
-            public String description() {
-              return "Waiting All members to intitialize DistributedSystemMBean expect 5 but found " + bean.getMemberCount();
-            }
-            public boolean done() {
-              // including locator
-              if (bean.getMemberCount() == 5) {
-                return true;
-              } else {
-                return false;
-              }
-
-            }
-
-          }, MAX_WAIT, 500, true);
-
-
-
-          final Set<DistributedMember> otherMemberSet = cache
-              .getDistributionManager().getOtherNormalDistributionManagerIds();
-          Iterator<DistributedMember> memberIt = otherMemberSet.iterator();
-          while (memberIt.hasNext()) {
-            DistributedMember member = memberIt.next();
-            LogWriterUtils.getLogWriter().info(
-                "JVM Metrics For Member " + member.getId() + ":"
-                    + bean.showJVMMetrics(member.getId()));
-            LogWriterUtils.getLogWriter().info(
-                "OS Metrics For Member " + member.getId() + ":"
-                    + bean.showOSMetrics(member.getId()));
-          }
-
-          return null;
-        }
-      });
+  private void verifyDistributedSystemMXBean(final VM managerVM) {
+    managerVM.invoke("verifyDistributedSystemMXBean", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
 
-    }
+      await().until(() -> assertThat(distributedSystemMXBean.getMemberCount()).isEqualTo(5));
+
+      Set<DistributedMember> otherMemberSet = this.managementTestRule.getOtherNormalMembers();
+      for (DistributedMember member : otherMemberSet) {
+        // TODO: need assertions? JVMMetrics and OSMetrics
+      }
+    });
   }
 
-  @SuppressWarnings("serial")
-  public void addNotificationListener(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Add Notification Listener") {
-
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-          ManagementService service = getManagementService();
-          DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-          assertNotNull(bean);
-          TestDistributedSystemNotif nt = new TestDistributedSystemNotif();
-          mbeanServer.addNotificationListener(MBeanJMXAdapter
-              .getDistributedSystemName(), nt, null, null);
-
-          return null;
-        }
-      });
+  private void addNotificationListener(final VM managerVM) {
+    managerVM.invoke("addNotificationListener", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+      assertThat(distributedSystemMXBean).isNotNull();
 
-    }
+      DistributedSystemNotificationListener listener = new DistributedSystemNotificationListener();
+      ManagementFactory.getPlatformMBeanServer().addNotificationListener(MBeanJMXAdapter.getDistributedSystemName(), listener, null, null);
+    });
   }
 
- 
-
-  @SuppressWarnings("serial")
-  public void shutDownAll(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Shut Down All") {
-
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-          ManagementService service = getManagementService();
-          DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-          assertNotNull(service.getDistributedSystemMXBean());
-          bean.shutDownAllMembers();
-          Wait.pause(2000);
-          assertEquals(
-              cache.getDistributedSystem().getAllOtherMembers().size(), 1);
-          return null;
-        }
-      });
+  private void shutDownAll(final VM managerVM) {
+    managerVM.invoke("shutDownAll", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+      distributedSystemMXBean.shutDownAllMembers();
 
-    }
+      await().until(() -> assertThat(this.managementTestRule.getOtherNormalMembers()).hasSize(0));
+    });
   }
-  
-
-  
-  @SuppressWarnings("serial")
-  public void checkNavigationAPIs(VM vm1) throws Exception {
-    {
-      vm1.invoke(new SerializableCallable("Check Navigation APIS") {
-
-        public Object call() throws Exception {
-          GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-          ManagementService service = getManagementService();
-          final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-          
-          assertNotNull(service.getDistributedSystemMXBean());
-          
-          waitForAllMembers(4);
-          
-          for(int i =0; i< bean.listMemberObjectNames().length ; i++){
-            LogWriterUtils.getLogWriter().info(
-                "ObjectNames Of the Mmeber" + bean.listMemberObjectNames()[i] );
-          }
-
-          
-          ObjectName thisMemberName = MBeanJMXAdapter
-              .getMemberMBeanName(InternalDistributedSystem
-                  .getConnectedInstance().getDistributedMember().getId());
-
-          ObjectName memberName = bean
-              .fetchMemberObjectName(InternalDistributedSystem
-                  .getConnectedInstance().getDistributedMember().getId());
-          assertEquals(thisMemberName, memberName);
-          
-          return null;
-        }
-      });
 
-    }
+  private void verifyFetchMemberObjectName(final VM managerVM, final int memberCount) {
+    managerVM.invoke("verifyFetchMemberObjectName", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+
+      await().until(() -> assertThat(distributedSystemMXBean.listMemberObjectNames()).hasSize(memberCount));
+
+      String memberId = this.managementTestRule.getDistributedMember().getId();
+      ObjectName thisMemberName = MBeanJMXAdapter.getMemberMBeanName(memberId);
+      ObjectName memberName = distributedSystemMXBean.fetchMemberObjectName(memberId);
+      assertThat(memberName).isEqualTo(thisMemberName);
+    });
   }
 
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
+  }
 
-  /**
-   * Notification handler
-   * 
-   * 
-   */
-  private static class TestDistributedSystemNotif implements
-      NotificationListener {
+  private static class DistributedSystemNotificationListener implements NotificationListener {
 
     @Override
-    public void handleNotification(Notification notification, Object handback) {
-      assertNotNull(notification);      
+    public void handleNotification(final Notification notification, final Object handback) {
+      assertThat(notification).isNotNull();
     }
-
   }
-  
-  /**
-   * Notification handler
-   * 
-   * 
-   */
-  private static class AlertNotifListener implements NotificationListener {
-    
-    private static AlertNotifListener listener = new AlertNotifListener();
-    
-    public static AlertNotifListener getInstance(){
+
+  private static class AlertNotificationListener implements NotificationListener {
+
+    private static AlertNotificationListener listener = new AlertNotificationListener();
+
+    private int warningAlertCount = 0;
+
+    private int severeAlertCount = 0;
+
+    static AlertNotificationListener getInstance() { // TODO: get rid of singleton
       return listener;
     }
 
-    private int warnigAlertCount = 0;
+    @Override
+    public synchronized void handleNotification(final Notification notification, final Object handback) {
+      assertThat(notification).isNotNull();
 
-    private int severAlertCount = 0;
+      Map<String, String> notificationUserData = (Map<String, String>) notification.getUserData();
 
-    @Override
-    public synchronized void handleNotification(Notification notification, Object handback) {
-      assertNotNull(notification);
-      logger.info("Notification received {}", notification);
-      Map<String,String> notifUserData = (Map<String,String>)notification.getUserData();
-      if (notifUserData.get(JMXNotificationUserData.ALERT_LEVEL).equalsIgnoreCase("warning")) {
-        assertEquals(WARNING_LEVEL_MESSAGE,notification.getMessage());
-        ++warnigAlertCount;
+      if (notificationUserData.get(JMXNotificationUserData.ALERT_LEVEL).equalsIgnoreCase("warning")) {
+        assertThat(notification.getMessage()).isEqualTo(WARNING_LEVEL_MESSAGE);
+        warningAlertCount++;
       }
-      if (notifUserData.get(JMXNotificationUserData.ALERT_LEVEL).equalsIgnoreCase("severe")) {
-        assertEquals(SEVERE_LEVEL_MESSAGE,notification.getMessage());
-        ++severAlertCount;
+      if (notificationUserData.get(JMXNotificationUserData.ALERT_LEVEL).equalsIgnoreCase("severe")) {
+        assertThat(notification.getMessage()).isEqualTo(SEVERE_LEVEL_MESSAGE);
+        severeAlertCount++;
       }
     }
 
-    public void resetCount() {
-      warnigAlertCount = 0;
-
-      severAlertCount = 0;
+    void resetCount() {
+      warningAlertCount = 0;
+      severeAlertCount = 0;
     }
 
-    public int getWarnigAlertCount() {
-      return warnigAlertCount;
+    int getWarningAlertCount() {
+      return warningAlertCount;
     }
 
-    public int getseverAlertCount() {
-      return severAlertCount;
+    int getSevereAlertCount() {
+      return severeAlertCount;
     }
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/JMXMBeanDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/JMXMBeanDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/JMXMBeanDUnitTest.java
index ffa024f..dd22f7d 100644
--- a/geode-core/src/test/java/org/apache/geode/management/JMXMBeanDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/JMXMBeanDUnitTest.java
@@ -51,7 +51,7 @@ import org.apache.geode.test.junit.categories.FlakyTest;
 import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 import org.apache.geode.util.test.TestUtil;
 
-public class JMXMBeanDUnitTest extends DistributedTestCase {
+public class JMXMBeanDUnitTest extends DistributedTestCase { // TODO: rename and fix on Mac
 
   private Host host;
   private VM locator;


[23/50] [abbrv] incubator-geode git commit: GEODE-2006: add FlakyTest category to testSelectCommand

Posted by kl...@apache.org.
GEODE-2006: add FlakyTest category to testSelectCommand

Also, remove unused serial id.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/474ff41e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/474ff41e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/474ff41e

Branch: refs/heads/feature/GEODE-1930
Commit: 474ff41e9ff4f357baaea27e6b4da7d4328f2231
Parents: 3d173b1
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Oct 17 11:06:12 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Oct 17 16:30:19 2016 -0700

----------------------------------------------------------------------
 .../internal/cli/commands/GemfireDataCommandsDUnitTest.java        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/474ff41e/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
index 5417ccb..8be6d99 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
@@ -91,7 +91,6 @@ import org.apache.geode.test.junit.categories.FlakyTest;
 @SuppressWarnings("serial")
 public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
 
-  private static final long serialVersionUID = 1L;
   private static final String REGION_NAME = "FunctionCommandsReplicatedRegion";
   private static final String REBALANCE_REGION_NAME = "GemfireDataCommandsDUnitTestRegion";
   private static final String REBALANCE_REGION2_NAME = "GemfireDataCommandsDUnitTestRegion2";
@@ -522,6 +521,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     validateResult(cmdResult, true);
   }
 
+  @Category(FlakyTest.class) // GEODE-2006
   @Test
   public void testSelectCommand() {
     setupForSelect();


[41/50] [abbrv] incubator-geode git commit: GEODE-2020: for rest api get request, use utf-8 as response encoding.

Posted by kl...@apache.org.
GEODE-2020: for rest api get request, use utf-8 as response encoding.

* add more test assertions.
* fix legacy tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/fadd92b0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/fadd92b0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/fadd92b0

Branch: refs/heads/feature/GEODE-1930
Commit: fadd92b0556ac6d3a48ffccbf64100fd94689e62
Parents: af55d92
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Oct 20 15:28:50 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Oct 21 10:37:54 2016 -0700

----------------------------------------------------------------------
 .../rest/internal/web/GeodeRestClient.java      | 148 +++++++
 .../web/RestSecurityIntegrationTest.java        | 410 ++++++-------------
 .../web/controllers/CommonCrudController.java   |   6 +-
 .../controllers/FunctionAccessController.java   |   2 +-
 .../web/controllers/PdxBasedCrudController.java |   4 +-
 .../web/controllers/QueryAccessController.java  |   4 +-
 6 files changed, 290 insertions(+), 284 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java
new file mode 100644
index 0000000..c83cebb
--- /dev/null
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.rest.internal.web;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpResponse;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.AuthCache;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.CredentialsProvider;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.client.protocol.HttpClientContext;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.auth.BasicScheme;
+import org.apache.http.impl.client.BasicAuthCache;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.json.JSONTokener;
+import org.junit.Assert;
+
+public class GeodeRestClient {
+
+  public final static String PROTOCOL = "http";
+  public final static String HOSTNAME = "localhost";
+  public final static String CONTEXT = "/geode/v1";
+
+  private int restPort = 0;
+  public GeodeRestClient(int restPort){
+    this.restPort = restPort;
+  }
+
+  public HttpResponse doHEAD(String query, String username, String password) throws MalformedURLException {
+    HttpHead httpHead = new HttpHead(CONTEXT + query);
+    return doRequest(httpHead, username, password);
+  }
+
+  public HttpResponse doPost(String query, String username, String password, String body) throws MalformedURLException {
+    HttpPost httpPost = new HttpPost(CONTEXT + query);
+    httpPost.addHeader("content-type", "application/json");
+    httpPost.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
+    return doRequest(httpPost, username, password);
+  }
+
+  public HttpResponse doPut(String query, String username, String password, String body) throws MalformedURLException {
+    HttpPut httpPut = new HttpPut(CONTEXT + query);
+    httpPut.addHeader("content-type", "application/json");
+    httpPut.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
+    return doRequest(httpPut, username, password);
+  }
+
+  public HttpResponse doGet(String uri, String username, String password) throws MalformedURLException {
+    HttpGet getRequest = new HttpGet(CONTEXT + uri);
+    return doRequest(getRequest, username, password);
+  }
+  public HttpResponse doGet(String uri) throws MalformedURLException {
+    return doGet(uri, null, null);
+  }
+
+  public HttpResponse doDelete(String uri, String username, String password) throws MalformedURLException {
+    HttpDelete httpDelete = new HttpDelete(CONTEXT + uri);
+    return doRequest(httpDelete, username, password);
+  }
+
+  public static String getContentType(HttpResponse response){
+    return response.getEntity().getContentType().getValue();
+  }
+
+  /**
+   * Retrieve the status code of the HttpResponse
+   *
+   * @param response The HttpResponse message received from the server
+   *
+   * @return a numeric value
+   */
+  public static int getCode(HttpResponse response) {
+    return response.getStatusLine().getStatusCode();
+  }
+
+  public static JSONTokener getResponseBody(HttpResponse response) throws IOException {
+    HttpEntity entity = response.getEntity();
+    InputStream content = entity.getContent();
+    BufferedReader reader = new BufferedReader(new InputStreamReader(content));
+    String line;
+    StringBuilder str = new StringBuilder();
+    while ((line = reader.readLine()) != null) {
+      str.append(line);
+    }
+    return new JSONTokener(str.toString());
+  }
+
+  private HttpResponse doRequest(HttpRequestBase request, String username, String password) throws MalformedURLException {
+    HttpHost targetHost = new HttpHost(HOSTNAME,restPort, PROTOCOL);
+    CloseableHttpClient httpclient = HttpClients.custom().build();
+    HttpClientContext clientContext = HttpClientContext.create();
+    // if username is null, do not put in authentication
+    if (username != null) {
+      CredentialsProvider credsProvider = new BasicCredentialsProvider();
+      credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()), new UsernamePasswordCredentials(username, password));
+      httpclient = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
+      AuthCache authCache = new BasicAuthCache();
+      BasicScheme basicAuth = new BasicScheme();
+      authCache.put(targetHost, basicAuth);
+      clientContext.setCredentialsProvider(credsProvider);
+      clientContext.setAuthCache(authCache);
+    }
+
+    try {
+      return httpclient.execute(targetHost, request, clientContext);
+    } catch (ClientProtocolException e) {
+      e.printStackTrace();
+      Assert.fail("Rest GET should not have thrown ClientProtocolException!");
+    } catch (IOException e) {
+      e.printStackTrace();
+      Assert.fail("Rest GET Request should not have thrown IOException!");
+    }
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
index 6e91894..5f66f3b 100644
--- a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
@@ -19,42 +19,16 @@ package org.apache.geode.rest.internal.web;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.MalformedURLException;
-import java.nio.charset.StandardCharsets;
 import java.util.Properties;
 
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpHost;
 import org.apache.http.HttpResponse;
-import org.apache.http.auth.AuthScope;
-import org.apache.http.auth.UsernamePasswordCredentials;
-import org.apache.http.client.AuthCache;
-import org.apache.http.client.ClientProtocolException;
-import org.apache.http.client.CredentialsProvider;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpHead;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.client.protocol.HttpClientContext;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.impl.auth.BasicScheme;
-import org.apache.http.impl.client.BasicAuthCache;
-import org.apache.http.impl.client.BasicCredentialsProvider;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
 import org.json.JSONArray;
 import org.json.JSONObject;
-import org.json.JSONTokener;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.springframework.http.MediaType;
 
 import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.internal.AvailablePortHelper;
@@ -69,10 +43,6 @@ public class RestSecurityIntegrationTest {
 
   protected static final String REGION_NAME = "AuthRegion";
 
-  public final static String PROTOCOL = "http";
-  public final static String HOSTNAME = "localhost";
-  public final static String CONTEXT = "/geode/v1";
-
   private static int restPort = AvailablePortHelper.getRandomAvailableTCPPort();
   static Properties properties = new Properties() {{
     setProperty(SampleSecurityManager.SECURITY_JSON, "org/apache/geode/management/internal/security/clientServer.json");
@@ -84,6 +54,7 @@ public class RestSecurityIntegrationTest {
 
   @ClassRule
   public static ServerStarter serverStarter = new ServerStarter(properties);
+  private final GeodeRestClient restClient = new GeodeRestClient(restPort);
 
   @BeforeClass
   public static void before() throws Exception {
@@ -95,95 +66,99 @@ public class RestSecurityIntegrationTest {
   public void testFunctions() throws Exception {
     String json = "{\"@type\":\"double\",\"@value\":210}";
 
-    HttpResponse response = doGet("/functions", "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
-    response = doGet("/functions", "stranger", "1234567");
-    assertEquals(403, getCode(response));
-    response = doGet("/functions", "dataReader", "1234567");
-    assertTrue(isOK(response));
-
-    response = doPost("/functions/AddFreeItemsToOrder", "unknown-user", "1234567", json);
-    assertEquals(401, getCode(response));
-    response = doPost("/functions/AddFreeItemsToOrder", "dataReader", "1234567", json);
-    assertEquals(403, getCode(response));
-    response = doPost("/functions/AddFreeItemsToOrder?onRegion=" + REGION_NAME, "dataWriter", "1234567", json);
+    HttpResponse response = restClient.doGet("/functions", "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doGet("/functions", "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doGet("/functions", "dataReader", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    response.getEntity();
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
+
+    response = restClient.doPost("/functions/AddFreeItemsToOrder", "unknown-user", "1234567", json);
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doPost("/functions/AddFreeItemsToOrder", "dataReader", "1234567", json);
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doPost("/functions/AddFreeItemsToOrder?onRegion=" + REGION_NAME, "dataWriter", "1234567", json);
     // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-    assertEquals(500, getCode(response));
+    assertEquals(500, restClient.getCode(response));
   }
 
   @Test
   public void testQueries() throws Exception {
-    HttpResponse response = doGet("/queries", "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
-    response = doGet("/queries", "stranger", "1234567");
-    assertEquals(403, getCode(response));
-    response = doGet("/queries", "dataReader", "1234567");
-    assertEquals(200, getCode(response));
+    HttpResponse response = restClient.doGet("/queries", "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doGet("/queries", "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doGet("/queries", "dataReader", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
   }
 
   @Test
   public void testAdhocQuery() throws Exception {
-    HttpResponse response = doGet("/queries/adhoc?q=", "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
-    response = doGet("/queries/adhoc?q=", "stranger", "1234567");
-    assertEquals(403, getCode(response));
-    response = doGet("/queries/adhoc?q=", "dataReader", "1234567");
+    HttpResponse response = restClient.doGet("/queries/adhoc?q=", "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doGet("/queries/adhoc?q=", "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doGet("/queries/adhoc?q=", "dataReader", "1234567");
     // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-    assertEquals(500, getCode(response));
+    assertEquals(500, restClient.getCode(response));
   }
 
   @Test
   public void testPostQuery() throws Exception {
-    HttpResponse response = doPost("/queries?id=0&q=", "unknown-user", "1234567", "");
-    assertEquals(401, getCode(response));
-    response = doPost("/queries?id=0&q=", "stranger", "1234567", "");
-    assertEquals(403, getCode(response));
-    response = doPost("/queries?id=0&q=", "dataReader", "1234567", "");
+    HttpResponse response = restClient.doPost("/queries?id=0&q=", "unknown-user", "1234567", "");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doPost("/queries?id=0&q=", "stranger", "1234567", "");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doPost("/queries?id=0&q=", "dataReader", "1234567", "");
     // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-    assertEquals(500, getCode(response));
+    assertEquals(500, restClient.getCode(response));
   }
 
   @Test
   public void testPostQuery2() throws Exception {
-    HttpResponse response = doPost("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
-    assertEquals(401, getCode(response));
-    response = doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
-    assertEquals(403, getCode(response));
-    response = doPost("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
+    HttpResponse response = restClient.doPost("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doPost("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
     // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-    assertEquals(500, getCode(response));
+    assertEquals(500, restClient.getCode(response));
   }
 
   @Test
   public void testPutQuery() throws Exception {
-    HttpResponse response = doPut("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
-    assertEquals(401, getCode(response));
-    response = doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
-    assertEquals(403, getCode(response));
-    response = doPut("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
+    HttpResponse response = restClient.doPut("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doPut("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
     // We should get a 404 because we're trying to update a query that doesn't exist
-    assertEquals(404, getCode(response));
+    assertEquals(404, restClient.getCode(response));
   }
 
   @Test
   public void testDeleteQuery() throws Exception {
-    HttpResponse response = doDelete("/queries/id", "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
-    response = doDelete("/queries/id", "stranger", "1234567");
-    assertEquals(403, getCode(response));
-    response = doDelete("/queries/id", "dataWriter", "1234567");
+    HttpResponse response = restClient.doDelete("/queries/id", "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doDelete("/queries/id", "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doDelete("/queries/id", "dataWriter", "1234567");
     // We should get a 404 because we're trying to delete a query that doesn't exist
-    assertEquals(404, getCode(response));
+    assertEquals(404, restClient.getCode(response));
   }
 
   @Test
   public void testServers() throws Exception {
-    HttpResponse response = doGet("/servers", "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
-    response = doGet("/servers", "stranger", "1234567");
-    assertEquals(403, getCode(response));
-    response = doGet("/servers", "super-user", "1234567");
-    assertTrue(isOK(response));
+    HttpResponse response = restClient.doGet("/servers", "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doGet("/servers", "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
+    response = restClient.doGet("/servers", "super-user", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
   }
 
   /**
@@ -192,27 +167,15 @@ public class RestSecurityIntegrationTest {
    */
   @Test
   public void testPing() throws Exception {
-    HttpResponse response = doHEAD("/ping", "stranger", "1234567");
-    assertTrue(isOK(response));
-    response = doGet("/ping", "stranger", "1234567");
-    assertTrue(isOK(response));
-
-    response = doHEAD("/ping", "super-user", "1234567");
-    assertTrue(isOK(response));
-    response = doGet("/ping", "super-user", "1234567");
-    assertTrue(isOK(response));
-
-    // TODO - invalid username/password should still respond, but doesn't
-    //      response = doHEAD("/ping", "unknown-user", "badpassword");
-    //      assertTrue(isOK(response));
-    //      response = doGet("/ping", "unknown-user", "badpassword");
-    //      assertTrue(isOK(response));
-
-    // TODO - credentials are currently required and shouldn't be for this endpoint
-    //      response = doHEAD("/ping", null, null);
-    //      assertTrue(isOK(response));
-    //      response = doGet("/ping", null, null);
-    //      assertTrue(isOK(response));
+    HttpResponse response = restClient.doHEAD("/ping", "stranger", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    response = restClient.doGet("/ping", "stranger", "1234567");
+    assertEquals(200, restClient.getCode(response));
+
+    response = restClient.doHEAD("/ping", "super-user", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    response = restClient.doGet("/ping", "super-user", "1234567");
+    assertEquals(200, restClient.getCode(response));
   }
 
   /**
@@ -220,11 +183,11 @@ public class RestSecurityIntegrationTest {
    */
   @Test
   public void getRegions() throws Exception {
-    HttpResponse response = doGet("", "dataReader", "1234567");
-    assertEquals("A '200 - OK' was expected", 200, getCode(response));
+    HttpResponse response = restClient.doGet("", "dataReader", "1234567");
+    assertEquals("A '200 - OK' was expected", 200, restClient.getCode(response));
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
 
-    assertTrue(isOK(response));
-    JSONObject jsonObject = new JSONObject(getResponseBody(response));
+    JSONObject jsonObject = new JSONObject(restClient.getResponseBody(response));
     JSONArray regions = jsonObject.getJSONArray("regions");
     assertNotNull(regions);
     assertTrue(regions.length() > 0);
@@ -233,12 +196,12 @@ public class RestSecurityIntegrationTest {
     assertEquals("REPLICATE", region.get("type"));
 
     // List regions with an unknown user - 401
-    response = doGet("", "unknown-user", "badpassword");
-    assertEquals(401, getCode(response));
+    response = restClient.doGet("", "unknown-user", "badpassword");
+    assertEquals(401, restClient.getCode(response));
 
     // list regions with insufficent rights - 403
-    response = doGet("", "authRegionReader", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doGet("", "authRegionReader", "1234567");
+    assertEquals(403, restClient.getCode(response));
   }
 
   /**
@@ -247,16 +210,17 @@ public class RestSecurityIntegrationTest {
   @Test
   public void getRegion() throws Exception {
     // Test an unknown user - 401 error
-    HttpResponse response = doGet("/" + REGION_NAME, "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
+    HttpResponse response = restClient.doGet("/" + REGION_NAME, "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
 
     // Test a user with insufficient rights - 403
-    response = doGet("/" + REGION_NAME, "stranger", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doGet("/" + REGION_NAME, "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
 
     // Test an authorized user - 200
-    response = doGet("/" + REGION_NAME, "super-user", "1234567");
-    assertTrue(isOK(response));
+    response = restClient.doGet("/" + REGION_NAME, "super-user", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
   }
 
   /**
@@ -265,16 +229,16 @@ public class RestSecurityIntegrationTest {
   @Test
   public void headRegion() throws Exception {
     // Test an unknown user - 401 error
-    HttpResponse response = doHEAD("/" + REGION_NAME, "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
+    HttpResponse response = restClient.doHEAD("/" + REGION_NAME, "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
 
     // Test a user with insufficient rights - 403
-    response = doHEAD("/" + REGION_NAME, "stranger", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doHEAD("/" + REGION_NAME, "stranger", "1234567");
+    assertEquals(403, restClient.getCode(response));
 
     // Test an authorized user - 200
-    response = doHEAD("/" + REGION_NAME, "super-user", "1234567");
-    assertTrue(isOK(response));
+    response = restClient.doHEAD("/" + REGION_NAME, "super-user", "1234567");
+    assertEquals(200, restClient.getCode(response));
   }
 
   /**
@@ -283,12 +247,12 @@ public class RestSecurityIntegrationTest {
   @Test
   public void deleteRegion() throws Exception {
     // Test an unknown user - 401 error
-    HttpResponse response = doDelete("/" + REGION_NAME, "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
+    HttpResponse response = restClient.doDelete("/" + REGION_NAME, "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
 
     // Test a user with insufficient rights - 403
-    response = doDelete("/" + REGION_NAME, "dataReader", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doDelete("/" + REGION_NAME, "dataReader", "1234567");
+    assertEquals(403, restClient.getCode(response));
   }
 
   /**
@@ -297,11 +261,12 @@ public class RestSecurityIntegrationTest {
   @Test
   public void getRegionKeys() throws Exception {
     // Test an authorized user
-    HttpResponse response = doGet("/" + REGION_NAME + "/keys", "super-user", "1234567");
-    assertTrue(isOK(response));
+    HttpResponse response = restClient.doGet("/" + REGION_NAME + "/keys", "super-user", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
     // Test an unauthorized user
-    response = doGet("/" + REGION_NAME + "/keys", "dataWriter", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doGet("/" + REGION_NAME + "/keys", "dataWriter", "1234567");
+    assertEquals(403, restClient.getCode(response));
   }
 
   /**
@@ -310,11 +275,13 @@ public class RestSecurityIntegrationTest {
   @Test
   public void getRegionKey() throws Exception {
     // Test an authorized user
-    HttpResponse response = doGet("/" + REGION_NAME + "/key1", "key1User", "1234567");
-    assertTrue(isOK(response));
+    HttpResponse response = restClient.doGet("/" + REGION_NAME + "/key1", "key1User", "1234567");
+    assertEquals(200, restClient.getCode(response));
+    assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
+
     // Test an unauthorized user
-    response = doGet("/" + REGION_NAME + "/key1", "dataWriter", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doGet("/" + REGION_NAME + "/key1", "dataWriter", "1234567");
+    assertEquals(403, restClient.getCode(response));
   }
 
   /**
@@ -323,16 +290,16 @@ public class RestSecurityIntegrationTest {
   @Test
   public void deleteRegionKey() throws Exception {
     // Test an unknown user - 401 error
-    HttpResponse response = doDelete("/" + REGION_NAME + "/key1", "unknown-user", "1234567");
-    assertEquals(401, getCode(response));
+    HttpResponse response = restClient.doDelete("/" + REGION_NAME + "/key1", "unknown-user", "1234567");
+    assertEquals(401, restClient.getCode(response));
 
     // Test a user with insufficient rights - 403
-    response = doDelete("/" + REGION_NAME + "/key1", "dataReader", "1234567");
-    assertEquals(403, getCode(response));
+    response = restClient.doDelete("/" + REGION_NAME + "/key1", "dataReader", "1234567");
+    assertEquals(403, restClient.getCode(response));
 
     // Test an authorized user - 200
-    response = doDelete("/" + REGION_NAME + "/key1", "key1User", "1234567");
-    assertTrue(isOK(response));
+    response = restClient.doDelete("/" + REGION_NAME + "/key1", "key1User", "1234567");
+    assertEquals(200, restClient.getCode(response));
   }
 
   /**
@@ -341,17 +308,16 @@ public class RestSecurityIntegrationTest {
   @Test
   public void postRegionKey() throws Exception {
     // Test an unknown user - 401 error
-    HttpResponse response = doPost("/" + REGION_NAME + "?key9", "unknown", "1234567", "{ \"key9\" : \"foo\" }");
-    assertEquals(401, getCode(response));
+    HttpResponse response = restClient.doPost("/" + REGION_NAME + "?key9", "unknown", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(401, restClient.getCode(response));
 
     // Test a user with insufficient rights - 403
-    response = doPost("/" + REGION_NAME + "?key9", "dataReader", "1234567", "{ \"key9\" : \"foo\" }");
-    assertEquals(403, getCode(response));
+    response = restClient.doPost("/" + REGION_NAME + "?key9", "dataReader", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(403, restClient.getCode(response));
 
     // Test an authorized user - 200
-    response = doPost("/" + REGION_NAME + "?key9", "dataWriter", "1234567", "{ \"key9\" : \"foo\" }");
-    assertEquals(201, getCode(response));
-    assertTrue(isOK(response));
+    response = restClient.doPost("/" + REGION_NAME + "?key9", "dataWriter", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(201, restClient.getCode(response));
   }
 
   /**
@@ -363,135 +329,27 @@ public class RestSecurityIntegrationTest {
     String json = "{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for  XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225}";
     String casJSON = "{\"@old\":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for  XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225},\"@new \":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1013,\"description\":\"Order for  New Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/25/2014\",\"contact\":\"Vanilla Bean\",\"email\":\"vanillabean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":12345,\"description\":\"part 123\",\"quantity\":12,\"unitPrice\":29.99,\"totalPrice\":149.95}],\"totalPrice\":149.95}}";
     // Test an unknown user - 401 error
-    HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=PUT", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
-    assertEquals(401, getCode(response));
-
-    response = doPut("/" + REGION_NAME + "/key1?op=CAS", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
-    assertEquals(401, getCode(response));
-    response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "unknown-user", "1234567", "{ \"@old\" : \"value1\", \"@new\" : \"CASvalue\" }");
-    assertEquals(401, getCode(response));
-
-    response = doPut("/" + REGION_NAME + "/key1?op=PUT", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
-    assertEquals(403, getCode(response));
-
-    response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
-    assertEquals(403, getCode(response));
-
-    response = doPut("/" + REGION_NAME + "/key1?op=CAS", "dataReader", "1234567", casJSON);
-    assertEquals(403, getCode(response));
-
-    response = doPut("/" + REGION_NAME + "/key1?op=PUT", "key1User", "1234567", "{ \"key1\" : \"foo\" }");
-    assertEquals(200, getCode(response));
-    assertTrue(isOK(response));
-
-    response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "key1User", "1234567", json);
-    assertEquals(200, getCode(response));
-    assertTrue(isOK(response));
-  }
-
-  protected HttpResponse doHEAD(String query, String username, String password) throws MalformedURLException {
-    HttpHead httpHead = new HttpHead(CONTEXT + query);
-    return doRequest(httpHead, username, password);
-  }
-
-
-  protected HttpResponse doPost(String query, String username, String password, String body) throws MalformedURLException {
-    HttpPost httpPost = new HttpPost(CONTEXT + query);
-    httpPost.addHeader("content-type", "application/json");
-    httpPost.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
-    return doRequest(httpPost, username, password);
-  }
-
+    HttpResponse response = restClient.doPut("/" + REGION_NAME + "/key1?op=PUT", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(401, restClient.getCode(response));
 
-  protected HttpResponse doPut(String query, String username, String password, String body) throws MalformedURLException {
-    HttpPut httpPut = new HttpPut(CONTEXT + query);
-    httpPut.addHeader("content-type", "application/json");
-    httpPut.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
-    return doRequest(httpPut, username, password);
-  }
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=CAS", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(401, restClient.getCode(response));
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=REPLACE", "unknown-user", "1234567", "{ \"@old\" : \"value1\", \"@new\" : \"CASvalue\" }");
+    assertEquals(401, restClient.getCode(response));
 
-  protected HttpResponse doGet(String uri, String username, String password) throws MalformedURLException {
-    HttpGet getRequest = new HttpGet(CONTEXT + uri);
-    return doRequest(getRequest, username, password);
-  }
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=PUT", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
+    assertEquals(403, restClient.getCode(response));
 
-  protected HttpResponse doDelete(String uri, String username, String password) throws MalformedURLException {
-    HttpDelete httpDelete = new HttpDelete(CONTEXT + uri);
-    return doRequest(httpDelete, username, password);
-  }
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=REPLACE", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
+    assertEquals(403, restClient.getCode(response));
 
-  /**
-   * Check the HTTP status of the response and return if it's within the OK range
-   *
-   * @param response The HttpResponse message received from the server
-   *
-   * @return true if the status code is a 2XX-type code (200-299), otherwise false
-   */
-  protected boolean isOK(HttpResponse response) {
-    int returnCode = response.getStatusLine().getStatusCode();
-    return (returnCode < 300 && returnCode >= 200);
-  }
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=CAS", "dataReader", "1234567", casJSON);
+    assertEquals(403, restClient.getCode(response));
 
-  /**
-   * Check the HTTP status of the response and return true if a 401
-   *
-   * @param response The HttpResponse message received from the server
-   *
-   * @return true if the status code is 401, otherwise false
-   */
-  protected boolean isUnauthorized(HttpResponse response) {
-    int returnCode = response.getStatusLine().getStatusCode();
-    return returnCode == 401;
-  }
-
-  /**
-   * Retrieve the status code of the HttpResponse
-   *
-   * @param response The HttpResponse message received from the server
-   *
-   * @return a numeric value
-   */
-  protected int getCode(HttpResponse response) {
-    return response.getStatusLine().getStatusCode();
-  }
-
-  protected JSONTokener getResponseBody(HttpResponse response) throws IOException {
-    HttpEntity entity = response.getEntity();
-    InputStream content = entity.getContent();
-    BufferedReader reader = new BufferedReader(new InputStreamReader(content));
-    String line;
-    StringBuilder str = new StringBuilder();
-    while ((line = reader.readLine()) != null) {
-      str.append(line);
-    }
-    return new JSONTokener(str.toString());
-  }
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=PUT", "key1User", "1234567", "{ \"key1\" : \"foo\" }");
+    assertEquals(200, restClient.getCode(response));
 
-  private HttpResponse doRequest(HttpRequestBase request, String username, String password) throws MalformedURLException {
-    HttpHost targetHost = new HttpHost(HOSTNAME, this.restPort, PROTOCOL);
-    CloseableHttpClient httpclient = HttpClients.custom().build();
-    HttpClientContext clientContext = HttpClientContext.create();
-    // if username is null, do not put in authentication
-    if (username != null) {
-      CredentialsProvider credsProvider = new BasicCredentialsProvider();
-      credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()), new UsernamePasswordCredentials(username, password));
-      httpclient = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
-      AuthCache authCache = new BasicAuthCache();
-      BasicScheme basicAuth = new BasicScheme();
-      authCache.put(targetHost, basicAuth);
-      clientContext.setCredentialsProvider(credsProvider);
-      clientContext.setAuthCache(authCache);
-    }
-
-    try {
-      return httpclient.execute(targetHost, request, clientContext);
-    } catch (ClientProtocolException e) {
-      e.printStackTrace();
-      fail("Rest GET should not have thrown ClientProtocolException!");
-    } catch (IOException e) {
-      e.printStackTrace();
-      fail("Rest GET Request should not have thrown IOException!");
-    }
-    return null;
+    response = restClient.doPut("/" + REGION_NAME + "/key1?op=REPLACE", "key1User", "1234567", json);
+    assertEquals(200, restClient.getCode(response));
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
index 30c8b3a..935b3ad 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
@@ -63,7 +63,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
    *
    * @return JSON document containing result
    */
-  @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_VALUE, MediaType.APPLICATION_JSON_VALUE })
+  @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_UTF8_VALUE})
   @ApiOperation(
     value = "list all resources (Regions)",
     notes = "List all available resources (Regions) in the GemFire cluster",
@@ -92,7 +92,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
    * @return JSON document containing result
    */
   @RequestMapping(method = RequestMethod.GET, value = "/{region}/keys",
-                  produces = { MediaType.APPLICATION_JSON_VALUE } )
+                  produces = { MediaType.APPLICATION_JSON_UTF8_VALUE } )
   @ApiOperation(
     value = "list all keys",
     notes = "List all keys in region",
@@ -198,7 +198,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
     return new ResponseEntity<>(HttpStatus.OK);
   }
   
-  @RequestMapping(method = { RequestMethod.GET }, value = "/servers")
+  @RequestMapping(method = { RequestMethod.GET }, value = "/servers", produces = { MediaType.APPLICATION_JSON_UTF8_VALUE } )
   @ApiOperation(
     value = "fetch all REST enabled servers in the DS",
     notes = "Find all gemfire node where developer REST service is up and running!",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
index e1ea1ad..831083e 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
@@ -86,7 +86,7 @@ public class FunctionAccessController extends AbstractBaseController {
    *
    * @return result as a JSON document.
    */
-  @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_VALUE })
+  @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_UTF8_VALUE })
   @ApiOperation(
       value = "list all functions",
       notes = "list all functions available in the GemFire cluster",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
index ebb8ccc..32de04e 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
@@ -134,7 +134,7 @@ public class PdxBasedCrudController extends CommonCrudController {
    * @param limit total number of entries requested
    * @return JSON document
    */
-  @RequestMapping(method = RequestMethod.GET, value = "/{region}", produces = MediaType.APPLICATION_JSON_VALUE)
+  @RequestMapping(method = RequestMethod.GET, value = "/{region}", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)
   @ApiOperation(
     value = "read all data for region",
     notes = "Read all data for region. Use limit param to get fixed or limited number of entries.",
@@ -213,7 +213,7 @@ public class PdxBasedCrudController extends CommonCrudController {
    * @return JSON document
    */
   @RequestMapping(method = RequestMethod.GET, value = "/{region}/{keys}",
-                  produces = MediaType.APPLICATION_JSON_VALUE)
+                  produces = MediaType.APPLICATION_JSON_UTF8_VALUE)
   @ApiOperation(
     value = "read data for specific keys",
     notes = "Read data for specific set of keys in region.",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
index d13c99c..e5287b9 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
@@ -91,7 +91,7 @@ public class QueryAccessController extends AbstractBaseController {
    * list all parametrized Queries created in a Gemfire data node
    * @return result as a JSON document.
    */
-  @RequestMapping(method = RequestMethod.GET,  produces = { MediaType.APPLICATION_JSON_VALUE })
+  @RequestMapping(method = RequestMethod.GET,  produces = { MediaType.APPLICATION_JSON_UTF8_VALUE })
   @ApiOperation(
     value = "list all parametrized queries",
     notes = "List all parametrized queries by id/name",
@@ -165,7 +165,7 @@ public class QueryAccessController extends AbstractBaseController {
    * @param oql OQL query string to be executed
    * @return query result as a JSON document
    */
-  @RequestMapping(method = RequestMethod.GET, value = "/adhoc", produces = { MediaType.APPLICATION_JSON_VALUE })
+  @RequestMapping(method = RequestMethod.GET, value = "/adhoc", produces = { MediaType.APPLICATION_JSON_UTF8_VALUE })
   @ApiOperation(
     value = "run an adhoc query",
     notes = "Run an unnamed (unidentified), ad-hoc query passed as a URL parameter",



[43/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/bean/stats/DistributedSystemStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/bean/stats/DistributedSystemStatsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/bean/stats/DistributedSystemStatsDUnitTest.java
index 2330031..446ea2e 100644
--- a/geode-core/src/test/java/org/apache/geode/management/bean/stats/DistributedSystemStatsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/bean/stats/DistributedSystemStatsDUnitTest.java
@@ -16,98 +16,66 @@
  */
 package org.apache.geode.management.bean.stats;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
+import static com.jayway.awaitility.Awaitility.*;
 import static org.junit.Assert.*;
 
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
+import java.lang.management.ManagementFactory;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import javax.management.ObjectName;
 
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.internal.cache.DiskStoreStats;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.management.DistributedSystemMXBean;
-import org.apache.geode.management.ManagementTestBase;
+import org.apache.geode.management.ManagementService;
+import org.apache.geode.management.ManagementTestRule;
+import org.apache.geode.management.Manager;
+import org.apache.geode.management.Member;
 import org.apache.geode.management.MemberMXBean;
 import org.apache.geode.management.internal.SystemManagementService;
-import org.apache.geode.management.internal.beans.MemberMBean;
-import org.apache.geode.management.internal.beans.MemberMBeanBridge;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.junit.categories.DistributedTest;
 
-/**
- */
 @Category(DistributedTest.class)
-public class DistributedSystemStatsDUnitTest extends ManagementTestBase{
-  
-  private static final long serialVersionUID = 1L;
+@SuppressWarnings("serial")
+public class DistributedSystemStatsDUnitTest {
 
-  public DistributedSystemStatsDUnitTest() {
-    super();
-  }
+  @Manager
+  private VM manager;
+
+  @Member
+  private VM[] members;
+
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().build();
 
   @Test
   public void testDistributedSystemStats() throws Exception {
-    initManagement(true);
+    this.manager.invoke("verifyMBeans", () -> {
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      assertNotNull(cache);
 
-    for(VM vm : managedNodeList){
-      setDiskStats(vm);
-    }
-    verifyDiskStats(managingNode);
-  }
-  
-  @SuppressWarnings("serial")
-  public void setDiskStats(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Set Member Stats") {
-      public void run() {
-        MemberMBean bean = (MemberMBean) managementService.getMemberMXBean();
-        MemberMBeanBridge bridge = bean.getBridge();
-        DiskStoreStats diskStoreStats = new DiskStoreStats(basicGetSystem(), "test");
-        bridge.addDiskStoreStats(diskStoreStats);
-        diskStoreStats.startRead();
-        diskStoreStats.startWrite();
-        diskStoreStats.startBackup();
-        diskStoreStats.startRecovery();
-        diskStoreStats.incWrittenBytes(20, true);
-        diskStoreStats.startFlush();
-        diskStoreStats.setQueueSize(10);
-      }
-    });
-  }
+      SystemManagementService service = (SystemManagementService) ManagementService.getManagementService(cache);
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+      assertNotNull(distributedSystemMXBean);
+
+      Set<DistributedMember> otherMemberSet = cache.getDistributionManager().getOtherNormalDistributionManagerIds();
+      assertEquals(3, otherMemberSet.size());
+
+      for (DistributedMember member : otherMemberSet) {
+        ObjectName memberMXBeanName = service.getMemberMBeanName(member);
+        await().atMost(2, TimeUnit.MINUTES).until(() -> assertTrue(ManagementFactory.getPlatformMBeanServer().isRegistered(memberMXBeanName)));
 
-  @SuppressWarnings("serial")
-  public void verifyDiskStats(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Set Member Stats") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-         
-        for (DistributedMember member : otherMemberSet) {
-          ObjectName memberMBeanName;
-          try {
-            memberMBeanName = service.getMemberMBeanName(member);
-            waitForProxy(memberMBeanName, MemberMXBean.class);
-            MemberMXBean memberBean = service.getMBeanProxy(memberMBeanName, MemberMXBean.class);
-            waitForRefresh(2, memberMBeanName);
-          } catch (NullPointerException e) {
-            Assert.fail("FAILED WITH EXCEPION", e);
-          } catch (Exception e) {
-            Assert.fail("FAILED WITH EXCEPION", e);
-          }
-        }
+        MemberMXBean memberMXBean = service.getMBeanProxy(memberMXBeanName, MemberMXBean.class);
+        assertNotNull(memberMXBean);
 
+        final long lastRefreshTime = service.getLastUpdateTime(memberMXBeanName);
+        await().atMost(1, TimeUnit.MINUTES).until(() -> assertTrue(service.getLastUpdateTime(memberMXBeanName) > lastRefreshTime));
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/internal/beans/QueryDataFunctionApplyLimitClauseTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/beans/QueryDataFunctionApplyLimitClauseTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/beans/QueryDataFunctionApplyLimitClauseTest.java
index 7270a2b..1554cd6 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/beans/QueryDataFunctionApplyLimitClauseTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/beans/QueryDataFunctionApplyLimitClauseTest.java
@@ -26,7 +26,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.test.junit.categories.UnitTest;
 
-
 @Category(UnitTest.class)
 public class QueryDataFunctionApplyLimitClauseTest {
 
@@ -47,8 +46,7 @@ public class QueryDataFunctionApplyLimitClauseTest {
   public void applyLimitClauseDoesNothingIfLimitClauseSpecified() {
     String limitClause = " LIMIT 50";
     String selectQueryWithLimit = selectQuery + limitClause;
-    assertThat(QueryDataFunction.applyLimitClause(selectQueryWithLimit, limit_10, queryResultSetLimit_100))
-      .isEqualTo(selectQueryWithLimit);
+    assertThat(QueryDataFunction.applyLimitClause(selectQueryWithLimit, limit_10, queryResultSetLimit_100)).isEqualTo(selectQueryWithLimit);
   }
 
   @Test
@@ -64,15 +62,13 @@ public class QueryDataFunctionApplyLimitClauseTest {
   @Test // GEODE-1907
   public void applyLimitClauseAddsQueryResultSetLimitIfMissingSpaceAfterFrom() {
     String selectQueryMissingSpaceAfterFrom = "SELECT * FROM/MyRegion";
-    assertThat(QueryDataFunction.applyLimitClause(selectQueryMissingSpaceAfterFrom, limit_0, queryResultSetLimit_100))
-      .isEqualTo(selectQueryMissingSpaceAfterFrom + " LIMIT " + queryResultSetLimit_100);
+    assertThat(QueryDataFunction.applyLimitClause(selectQueryMissingSpaceAfterFrom, limit_0, queryResultSetLimit_100)).isEqualTo(selectQueryMissingSpaceAfterFrom + " LIMIT " + queryResultSetLimit_100);
   }
 
   @Test
   public void applyLimitClauseDoesNotAddQueryResultSetLimitIfMissingSpaceAfterFromButLimitIsPresent() {
     String selectQueryMissingSpaceAfterFromWithLimit = "SELECT * FROM/MyRegion LIMIT " + limit_10;
-    assertThat(QueryDataFunction.applyLimitClause(selectQueryMissingSpaceAfterFromWithLimit, limit_0, queryResultSetLimit_100))
-      .isEqualTo(selectQueryMissingSpaceAfterFromWithLimit);
+    assertThat(QueryDataFunction.applyLimitClause(selectQueryMissingSpaceAfterFromWithLimit, limit_0, queryResultSetLimit_100)).isEqualTo(selectQueryMissingSpaceAfterFromWithLimit);
   }
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestClientIdsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestClientIdsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestClientIdsDUnitTest.java
index 16f8f82..a93485d 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestClientIdsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestClientIdsDUnitTest.java
@@ -58,67 +58,49 @@ import org.apache.geode.test.junit.categories.DistributedTest;
  * This is for testing client IDs
  */
 @Category(DistributedTest.class)
-public class TestClientIdsDUnitTest extends JUnit4DistributedTestCase {
+@SuppressWarnings("serial")
+public class TestClientIdsDUnitTest extends ManagementTestBase {
 
   private static final String k1 = "k1";
   private static final String k2 = "k2";
-
   private static final String client_k1 = "client-k1";
-
   private static final String client_k2 = "client-k2";
-
-  /** name of the test region */
   private static final String REGION_NAME = "ClientHealthStatsDUnitTest_Region";
 
   private static VM server = null;
-
   private static VM client = null;
-
   private static VM client2 = null;
 
-  private static VM managingNode = null;
-
-  private ManagementTestBase helper;
-
-  @Override
-  public final void preSetUp() throws Exception {
-    this.helper = new ManagementTestBase(){};
-  }
-
   @Override
-  public final void postSetUp() throws Exception {
-    final Host host = Host.getHost(0);
-    managingNode = host.getVM(0);
-    server = host.getVM(1);
-    client = host.getVM(2);
-    client2 = host.getVM(3);
+  public final void postSetUpManagementTestBase() throws Exception {
+    server = Host.getHost(0).getVM(1);
+    client = Host.getHost(0).getVM(2);
+    client2 = Host.getHost(0).getVM(3);
   }
 
   @Override
-  public final void preTearDown() throws Exception {
-    helper.closeCache(managingNode);
-    helper.closeCache(server);
-    helper.closeCache(client);
-    helper.closeCache(client2);
+  public final void postTearDownManagementTestBase() throws Exception {
+    closeCache(server);
+    closeCache(client);
+    closeCache(client2);
 
     disconnectFromDS();
   }
 
   @Test
   public void testClientIds() throws Exception {
-    helper.createManagementCache(managingNode);
-    helper.startManagingNode(managingNode);
+    createManagementCache(managingNode);
+    startManagingNode(managingNode);
     int port = (Integer) createServerCache(server);
-    DistributedMember serverMember = helper.getMember(server);
+    DistributedMember serverMember = getMember(server);
     createClientCache(client, NetworkUtils.getServerHostName(server.getHost()), port);
     createClientCache(client2, NetworkUtils.getServerHostName(server.getHost()), port);
     put(client);
     put(client2);
     verifyClientIds(managingNode, serverMember, port);
-    helper.stopManagingNode(managingNode);
+    stopManagingNode(managingNode);
   }
 
-  @SuppressWarnings("serial")
   private Object createServerCache(VM vm) {
     return vm.invoke(new SerializableCallable("Create Server Cache") {
       public Object call() {
@@ -132,7 +114,6 @@ public class TestClientIdsDUnitTest extends JUnit4DistributedTestCase {
     });
   }
 
-  @SuppressWarnings("serial")
   private void createClientCache(VM vm, final String host, final Integer port1) {
     vm.invoke(new SerializableCallable("Create Client Cache") {
 
@@ -158,7 +139,7 @@ public class TestClientIdsDUnitTest extends JUnit4DistributedTestCase {
   }
 
   private Integer createServerCache(DataPolicy dataPolicy) throws Exception {
-    Cache cache = helper.createCache(false);
+    Cache cache = createCache(false);
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(dataPolicy);
@@ -202,7 +183,6 @@ public class TestClientIdsDUnitTest extends JUnit4DistributedTestCase {
   /**
    * get member id
    */
-  @SuppressWarnings("serial")
   protected static DistributedMember getMember() throws Exception {
     GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
     return cache.getDistributedSystem().getDistributedMember();
@@ -213,7 +193,6 @@ public class TestClientIdsDUnitTest extends JUnit4DistributedTestCase {
    * 
    * @param vm
    */
-  @SuppressWarnings("serial")
   protected void verifyClientIds(final VM vm,
       final DistributedMember serverMember, final int serverPort) {
     SerializableRunnable verifyCacheServerRemote = new SerializableRunnable(
@@ -262,7 +241,6 @@ public class TestClientIdsDUnitTest extends JUnit4DistributedTestCase {
    * 
    * @param vm
    */
-  @SuppressWarnings("serial")
   protected void put(final VM vm) {
     SerializableRunnable put = new SerializableRunnable("put") {
       public void run() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestSubscriptionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestSubscriptionsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestSubscriptionsDUnitTest.java
index a94cc93..380b741 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestSubscriptionsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/pulse/TestSubscriptionsDUnitTest.java
@@ -17,6 +17,9 @@
 package org.apache.geode.management.internal.pulse;
 
 import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.test.dunit.Host.*;
+import static org.apache.geode.test.dunit.NetworkUtils.*;
+import static org.apache.geode.test.dunit.Wait.*;
 import static org.junit.Assert.*;
 
 import java.util.Properties;
@@ -34,275 +37,175 @@ import org.apache.geode.cache.Scope;
 import org.apache.geode.cache.client.PoolManager;
 import org.apache.geode.cache.client.internal.PoolImpl;
 import org.apache.geode.cache.server.CacheServer;
-import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.management.DistributedSystemMXBean;
 import org.apache.geode.management.ManagementService;
 import org.apache.geode.management.ManagementTestBase;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.NetworkUtils;
-import org.apache.geode.test.dunit.SerializableCallable;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
  * This is for testing subscriptions
  */
 @Category(DistributedTest.class)
-public class TestSubscriptionsDUnitTest extends JUnit4DistributedTestCase {
+@SuppressWarnings("serial")
+public class TestSubscriptionsDUnitTest extends ManagementTestBase {
 
-  private static final String k1 = "k1";
-  private static final String k2 = "k2";
-  private static final String client_k1 = "client-k1";
-
-  private static final String client_k2 = "client-k2";
   private static final String REGION_NAME = TestSubscriptionsDUnitTest.class.getSimpleName() + "_Region";
+
+  private static final String KEY1 = "k1";
+  private static final String KEY2 = "k2";
+  private static final String CLIENT_VALUE1 = "client-k1";
+  private static final String CLIENT_VALUE2 = "client-k2";
+
   private static VM server = null;
   private static VM client = null;
   private static VM client2 = null;
-  private static VM managingNode = null;
-  private ManagementTestBase helper;
-
-  @Override
-  public final void preSetUp() throws Exception {
-    this.helper = new ManagementTestBase(){};
-  }
-
-  @Override
-  public final void postSetUp() throws Exception {
-    final Host host = Host.getHost(0);
-    managingNode = host.getVM(0);
-    server = host.getVM(1);
-    client = host.getVM(2);
-    client2 = host.getVM(3);
-  }
 
   @Override
-  public final void preTearDown() throws Exception {
-    helper.closeCache(managingNode);
-    helper.closeCache(server);
-    helper.closeCache(client);
-    helper.closeCache(client2);
-    disconnectFromDS();
+  public final void postSetUpManagementTestBase() throws Exception {
+    server = getHost(0).getVM(1);
+    client = getHost(0).getVM(2);
+    client2 = getHost(0).getVM(3);
   }
 
   @Test
-  public void testNoOfSubscription() throws Exception {
+  public void testNumSubscriptions() throws Exception {
+    createManagementCache(managingNode);
+    startManagingNode(managingNode);
 
-    helper.createManagementCache(managingNode);
-    helper.startManagingNode(managingNode);
+    int port = createServerCache(server);
+    getMember(server);
+
+    createClientCache(client, getServerHostName(server.getHost()), port);
+    createClientCache(client2, getServerHostName(server.getHost()), port);
 
-    int port = (Integer) createServerCache(server);
-    DistributedMember serverMember = helper.getMember(server);
-    createClientCache(client, NetworkUtils.getServerHostName(server.getHost()), port);
-    createClientCache(client2, NetworkUtils.getServerHostName(server.getHost()), port);
     put(client);
     put(client2);
+
     registerInterest(client);
     registerInterest(client2);
-    verifyClientStats(managingNode, serverMember, port);
-    helper.stopManagingNode(managingNode);
+
+    verifyNumSubscriptions(managingNode);
+
+    stopManagingNode(managingNode);
   }
 
-  @SuppressWarnings("serial")
-  private Object createServerCache(VM vm) {
-    return vm.invoke(new SerializableCallable(
-        "Create Server Cache in TestSubscriptionsDUnitTest") {
-
-      public Object call() {
-        try {
-          return createServerCache();
-        } catch (Exception e) {
-          fail("Error while createServerCache in TestSubscriptionsDUnitTest"
-              + e);
-        }
-        return null;
-      }
+  private int createServerCache(VM vm) {
+    return vm.invoke("Create Server Cache in TestSubscriptionsDUnitTest", () -> {
+      return createServerCache();
     });
   }
 
-  @SuppressWarnings("serial")
-  private void createClientCache(VM vm, final String host, final Integer port1) {
-    vm.invoke(new SerializableCallable(
-        "Create Client Cache in TestSubscriptionsDUnitTest") {
-
-      public Object call() {
-        try {
-          createClientCache(host, port1);
-        } catch (Exception e) {
-          fail("Error while createClientCache in TestSubscriptionsDUnitTest "
-              + e);
-        }
-        return null;
-      }
+  private void createClientCache(VM vm, final String host, final int port1) {
+    vm.invoke("Create Client Cache in TestSubscriptionsDUnitTest", () -> {
+      createClientCache(host, port1);
     });
   }
 
   private Cache createCache(Properties props) throws Exception {
     DistributedSystem ds = getSystem(props);
-    ds.disconnect();
-    ds = getSystem(props);
-    assertNotNull(ds);
-    Cache cache = (GemFireCacheImpl) CacheFactory.create(ds);
-    assertNotNull(cache);
+    Cache cache = CacheFactory.create(ds);
     return cache;
   }
 
-  private Integer createServerCache(DataPolicy dataPolicy) throws Exception {
-    Cache cache = helper.createCache(false);
+  private int createServerCache(DataPolicy dataPolicy) throws Exception {
+    Cache cache = createCache(false);
+
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(dataPolicy);
-    RegionAttributes attrs = factory.create();
-    cache.createRegion(REGION_NAME, attrs);
-    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+
+    cache.createRegion(REGION_NAME, factory.create());
+
     CacheServer server1 = cache.addCacheServer();
-    server1.setPort(port);
+    server1.setPort(0);
     server1.setNotifyBySubscription(true);
     server1.start();
-    return new Integer(server1.getPort());
+
+    return server1.getPort();
   }
 
-  public Integer createServerCache() throws Exception {
+  private int createServerCache() throws Exception {
     return createServerCache(DataPolicy.REPLICATE);
   }
 
-  public Cache createClientCache(String host, Integer port1) throws Exception {
-
+  private Cache createClientCache(String host, int port1) throws Exception {
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
+
     Cache cache = createCache(props);
+
     PoolImpl p = (PoolImpl) PoolManager.createFactory()
-        .addServer(host, port1.intValue()).setSubscriptionEnabled(true)
-        .setThreadLocalConnections(true).setMinConnections(1)
-        .setReadTimeout(20000).setPingInterval(10000).setRetryAttempts(1)
-        .setSubscriptionEnabled(true).setStatisticInterval(1000)
-        .create("TestSubscriptionsDUnitTest");
+                                       .addServer(host, port1)
+                                       .setSubscriptionEnabled(true)
+                                       .setThreadLocalConnections(true)
+                                       .setMinConnections(1)
+                                       .setReadTimeout(20000)
+                                       .setPingInterval(10000)
+                                       .setRetryAttempts(1)
+                                       .setSubscriptionEnabled(true)
+                                       .setStatisticInterval(1000)
+                                       .create("TestSubscriptionsDUnitTest");
 
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setPoolName(p.getName());
 
     RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(REGION_NAME, attrs);
-    return cache;
+    cache.createRegion(REGION_NAME, attrs);
 
+    return cache;
   }
 
-  /**
-   * get member id
-   */
-  @SuppressWarnings("serial")
-  protected static DistributedMember getMember() throws Exception {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    return cache.getDistributedSystem().getDistributedMember();
-  }
+  private void verifyNumSubscriptions(final VM vm) {
+    vm.invoke("TestSubscriptionsDUnitTest Verify Cache Server Remote", () -> {
+      final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
 
-  /**
-   * Verify the Cache Server details
-   * 
-   * @param vm
-   */
-  @SuppressWarnings("serial")
-  protected void verifyClientStats(final VM vm,
-      final DistributedMember serverMember, final int serverPort) {
-    SerializableRunnable verifyCacheServerRemote = new SerializableRunnable(
-        "TestSubscriptionsDUnitTest Verify Cache Server Remote") {
-      public void run() {
-        final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        try {
-          final WaitCriterion waitCriteria = new WaitCriterion() {
-            @Override
-            public boolean done() {
-              ManagementService service = ManagementService
-                  .getExistingManagementService(cache);
-              final DistributedSystemMXBean dsBean = service
-                  .getDistributedSystemMXBean();
-              if (dsBean != null) {
-                if (dsBean.getNumSubscriptions() > 1) {
-                  return true;
-                }
-              }
-              return false;
-            }
-
-            @Override
-            public String description() {
-              return "TestSubscriptionsDUnitTest wait for getDistributedSystemMXBean to complete and get results";
-            }
-          };
-          Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
-          final DistributedSystemMXBean dsBean = ManagementService
-              .getExistingManagementService(cache).getDistributedSystemMXBean();
-          assertNotNull(dsBean);
-          LogWriterUtils.getLogWriter().info(
-              "TestSubscriptionsDUnitTest dsBean.getNumSubscriptions() ="
-                  + dsBean.getNumSubscriptions());
-          assertTrue(dsBean.getNumSubscriptions() == 2 ? true : false);
-        } catch (Exception e) {
-          fail("TestSubscriptionsDUnitTest Error while verifying subscription "
-              + e.getMessage());
+      waitForCriterion(new WaitCriterion() {
+        @Override
+        public boolean done() {
+          ManagementService service = ManagementService.getExistingManagementService(cache);
+          DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+          return distributedSystemMXBean != null & distributedSystemMXBean.getNumSubscriptions() > 1;
+        }
+        @Override
+        public String description() {
+          return "TestSubscriptionsDUnitTest wait for getDistributedSystemMXBean to complete and get results";
         }
+      }, 2 * 60 * 1000, 3000, true);
 
-      }
-    };
-    vm.invoke(verifyCacheServerRemote);
+      DistributedSystemMXBean distributedSystemMXBean = ManagementService.getExistingManagementService(cache).getDistributedSystemMXBean();
+      assertNotNull(distributedSystemMXBean);
+      assertEquals(2, distributedSystemMXBean.getNumSubscriptions());
+    });
   }
 
-  /**
-   * Verify the Cache Server details
-   * 
-   * @param vm
-   */
-  @SuppressWarnings("serial")
-  protected void registerInterest(final VM vm) {
-    SerializableRunnable put = new SerializableRunnable(
-        "TestSubscriptionsDUnitTest registerInterest") {
-      public void run() {
-        try {
-          Cache cache = GemFireCacheImpl.getInstance();
-          Region<Object, Object> r1 = cache.getRegion(Region.SEPARATOR
-              + REGION_NAME);
-          assertNotNull(r1);
-          r1.registerInterest(k1);
-          r1.registerInterest(k2);
-        } catch (Exception ex) {
-          Assert.fail("TestSubscriptionsDUnitTest failed while register Interest", ex);
-        }
-      }
+  private void registerInterest(final VM vm) {
+    vm.invoke("TestSubscriptionsDUnitTest registerInterest", () -> {
+      Cache cache = GemFireCacheImpl.getInstance();
+      Region<Object, Object> region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
+      assertNotNull(region);
 
-    };
-    vm.invoke(put);
+      region.registerInterest(KEY1);
+      region.registerInterest(KEY2);
+    });
   }
 
-  @SuppressWarnings("serial")
-  protected void put(final VM vm) {
-    SerializableRunnable put = new SerializableRunnable("put") {
-      public void run() {
-        try {
-          Cache cache = GemFireCacheImpl.getInstance();
-          Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
-          assertNotNull(r1);
-          r1.put(k1, client_k1);
-          assertEquals(r1.getEntry(k1).getValue(), client_k1);
-          r1.put(k2, client_k2);
-          assertEquals(r1.getEntry(k2).getValue(), client_k2);
-        } catch (Exception ex) {
-          Assert.fail("failed while put", ex);
-        }
-      }
+  private void put(final VM vm) {
+    vm.invoke("put", () -> {
+      Cache cache = GemFireCacheImpl.getInstance();
+      Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
+      assertNotNull(region);
 
-    };
-    vm.invoke(put);
-  }
+      region.put(KEY1, CLIENT_VALUE1);
+      assertEquals(CLIENT_VALUE1, region.getEntry(KEY1).getValue());
 
+      region.put(KEY2, CLIENT_VALUE2);
+      assertEquals(CLIENT_VALUE2, region.getEntry(KEY2).getValue());
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/AsyncInvocation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/AsyncInvocation.java b/geode-core/src/test/java/org/apache/geode/test/dunit/AsyncInvocation.java
index 5b65e32..808da37 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/AsyncInvocation.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/AsyncInvocation.java
@@ -332,6 +332,61 @@ public class AsyncInvocation<V> implements Future<V> {
   }
 
   /**
+   * Waits if necessary for at most the given time for the computation
+   * to complete.
+   *
+   * @param  timeout the maximum time to wait
+   * @param  unit the time unit of the timeout argument
+   *
+   * @return this {@code AsyncInvocation}
+   *
+   * @throws AssertionError wrapping any {@code Exception} thrown by this
+   *         {@code AsyncInvocation}.
+   *
+   * @throws CancellationException if the computation was cancelled
+   *
+   * @throws ExecutionException if the computation threw an exception
+   *
+   * @throws InterruptedException if the current thread is interrupted.
+   *
+   * @throws TimeoutException if the wait timed out
+   */
+  public AsyncInvocation<V> await(final long timeout, final TimeUnit unit) throws ExecutionException, InterruptedException, TimeoutException {
+    long millis = unit.toMillis(timeout);
+    join(millis);
+    timeoutIfAlive(millis);
+    checkException();
+    return this;
+  }
+
+  /**
+   * Waits if necessary for at most the given time for the computation
+   * to complete.
+   *
+   * @return this {@code AsyncInvocation}
+   *
+   * @throws AssertionError wrapping any {@code Exception} thrown by this
+   *         {@code AsyncInvocation}.
+   *
+   * @throws AssertionError wrapping a {@code TimeoutException} if this
+   *         {@code AsyncInvocation} fails to complete within the default
+   *         timeout of 60 seconds as defined by {@link #DEFAULT_JOIN_MILLIS}.
+   *
+   * @throws CancellationException if the computation was cancelled
+   *
+   * @throws ExecutionException if the computation threw an exception
+   *
+   * @throws InterruptedException if the current thread is interrupted.
+   */
+  public AsyncInvocation<V> await() throws ExecutionException, InterruptedException {
+    try {
+      return await(DEFAULT_JOIN_MILLIS, TimeUnit.MILLISECONDS);
+    } catch (TimeoutException timeoutException) {
+      throw new AssertionError(timeoutException);
+    }
+  }
+
+  /**
    * Waits if necessary for the work to complete, and then returns the result
    * of this {@code AsyncInvocation}.
    *
@@ -371,10 +426,6 @@ public class AsyncInvocation<V> implements Future<V> {
    * @throws AssertionError wrapping any {@code Exception} thrown by this
    *         {@code AsyncInvocation}.
    *
-   * @throws AssertionError wrapping a {@code TimeoutException} if this
-   *         {@code AsyncInvocation} fails to complete within the default
-   *         timeout of 60 seconds as defined by {@link #DEFAULT_JOIN_MILLIS}.
-   *
    * @throws CancellationException if the computation was cancelled
    *
    * @throws ExecutionException if the computation threw an exception

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/Invoke.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/Invoke.java b/geode-core/src/test/java/org/apache/geode/test/dunit/Invoke.java
index 1ceb433..7ed1477 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/Invoke.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/Invoke.java
@@ -24,13 +24,13 @@ import java.util.Map;
  * <code>DistributedTest</code> to invoke a <code>SerializableRunnable</code>
  * or <code>SerializableCallable</code> in a remote test <code>VM</code>.
  * 
- * These methods can be used directly: <code>Invoke.invokeInEveryVM(...)</code>, 
+ * These methods can be used directly: <code>Invoke.invokeInEveryVMAndController(...)</code>,
  * however, they are intended to be referenced through static import:
  *
  * <pre>
  * import static org.apache.geode.test.dunit.Invoke.*;
  *    ...
- *    invokeInEveryVM(...);
+ *    invokeInEveryVMAndController(...);
  * </pre>
  *
  * Extracted from DistributedTestCase.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/VM.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/VM.java b/geode-core/src/test/java/org/apache/geode/test/dunit/VM.java
index ad43a77..4bc9f3e 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/VM.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/VM.java
@@ -26,6 +26,7 @@ import java.util.concurrent.Callable;
 import com.jayway.awaitility.Awaitility;
 import hydra.MethExecutorResult;
 
+import org.apache.geode.internal.process.PidUnavailableException;
 import org.apache.geode.internal.process.ProcessUtils;
 import org.apache.geode.test.dunit.standalone.BounceResult;
 import org.apache.geode.test.dunit.standalone.RemoteDUnitVMIF;
@@ -95,7 +96,11 @@ public class VM implements Serializable {
    * Returns the process id of this {@code VM}.
    */
   public int getPid() {
-    return this.pid;
+//    try {
+      return invoke(() -> ProcessUtils.identifyPid());
+//    } catch (PidUnavailableException e) {
+//      return this.pid;
+//    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/Wait.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/Wait.java b/geode-core/src/test/java/org/apache/geode/test/dunit/Wait.java
index eb0a475..338350b 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/Wait.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/Wait.java
@@ -19,6 +19,8 @@ package org.apache.geode.test.dunit;
 import static org.apache.geode.test.dunit.Jitter.*;
 import static org.junit.Assert.*;
 
+import java.io.Serializable;
+
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.internal.cache.LocalRegion;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java b/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
index 40d0aba..921f2af 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/cache/internal/JUnit4CacheTestCase.java
@@ -238,6 +238,11 @@ public abstract class JUnit4CacheTestCase extends JUnit4DistributedTestCase impl
     return getCache(false, factory);
   }
 
+  public final Cache getCache(final Properties properties) {
+    getSystem(properties);
+    return getCache();
+  }
+
   public final Cache getCache(final boolean client) {
     return getCache(client, null);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
index 86971d4..bf8c7d5 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
@@ -114,7 +114,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
   }
 
   @Rule
-  public SerializableTestName testName = new SerializableTestName();
+  public SerializableTestName testNameForDistributedTestCase = new SerializableTestName();
 
   @BeforeClass
   public static final void initializeDistributedTestCase() {
@@ -125,7 +125,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     if (this.distributedTestFixture != this) {
       return this.distributedTestFixture.getName();
     }
-    return this.testName.getMethodName();
+    return this.testNameForDistributedTestCase.getMethodName();
   }
 
   public final Class<? extends DistributedTestFixture> getTestClass() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedDisconnectRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedDisconnectRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedDisconnectRule.java
index 44787fa..d41b756 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedDisconnectRule.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedDisconnectRule.java
@@ -45,14 +45,14 @@ public class DistributedDisconnectRule extends DistributedExternalResource {
   @Override
   protected void before() throws Throwable {
     if (this.disconnectBefore) {
-      invoker().invokeEverywhere(serializableRunnable());
+      invoker().invokeInEveryVMAndController(serializableRunnable());
     }
   }
 
   @Override
   protected void after() {
     if (this.disconnectAfter) {
-      invoker().invokeEverywhere(serializableRunnable());
+      invoker().invokeInEveryVMAndController(serializableRunnable());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRestoreSystemProperties.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRestoreSystemProperties.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRestoreSystemProperties.java
index 62acec8..94ee903 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRestoreSystemProperties.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRestoreSystemProperties.java
@@ -47,7 +47,7 @@ public class DistributedRestoreSystemProperties extends RestoreSystemProperties
   @Override
   protected void before() throws Throwable {
     super.before();
-    this.invoker.remoteInvokeInEveryVMAndLocator(new SerializableRunnable() {
+    this.invoker.invokeInEveryVMAndController(new SerializableRunnable() {
       @Override
       public void run() { 
         originalProperties = getProperties();
@@ -59,7 +59,7 @@ public class DistributedRestoreSystemProperties extends RestoreSystemProperties
   @Override
   protected void after() {
     super.after();
-    this.invoker.remoteInvokeInEveryVMAndLocator(new SerializableRunnable() {
+    this.invoker.invokeInEveryVMAndController(new SerializableRunnable() {
       @Override
       public void run() { 
         setProperties(originalProperties);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRule.java
new file mode 100644
index 0000000..5ab479b
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRule.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import org.junit.Rule;
+
+/**
+ * Annotates a field or method as a type of {@link Rule} that can be invoked
+ * across multiple VMs in a {@code DistributedTest}.
+ *
+ * If there are multiple annotated {@code DistributedRule}s on a class, they
+ * will be applied in order of fields first, then methods. Furthermore, if
+ * there are multiple fields (or methods) they will be applied in an order that
+ * depends on your JVM's implementation of the reflection API, which is
+ * undefined. Rules defined by fields will always be applied before Rules
+ * defined by methods. You can use a {@link org.junit.rules.RuleChain} or
+ * {@link org.apache.geode.test.junit.rules.RuleList} if you want to have
+ * control over the order in which the Rules are applied.
+ *
+ * <p>
+ * For example, here is a test class that makes a unique
+ * {@link org.junit.rules.TemporaryFolder} available to each DUnit VM:
+ * <pre>
+ * {@literal @}Category(DistributedTest.class)
+ * public class EachVMHasItsOwnTemporaryFolder {
+ *
+ *   {@literal @}DistributedRule
+ *   public TemporaryFolder folder = new TemporaryFolder();
+ *
+ *   {@literal @}Rule
+ *   public DistributedTestRule distributedTestRule = DistributedTestRule.builder().build();
+ *
+ *   {@literal @}Test
+ *   public void eachVMHasItsOwnTemporaryFolder() throws Exception {
+ *     Host.getHost(0).getVM(0).invoke(() -> {
+ *       File gemfireProps = folder.newFile({@literal "}gemfire.properties{@literal "});
+ *       File diskDirs = folder.newFolder({@literal "}diskDirs{@literal "});
+ *       ...
+ *     }
+ *   }
+ * }
+ * </pre>
+ *
+ * @see org.apache.geode.test.junit.rules.serializable.SerializableTestRule
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD, ElementType.METHOD})
+public @interface DistributedRule {
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRunRules.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRunRules.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRunRules.java
new file mode 100644
index 0000000..7490acd
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedRunRules.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import java.io.Serializable;
+
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.SerializableRunnable;
+
+/**
+ * Runs Rules in specified DUnit VMs.
+ */
+public class DistributedRunRules extends Statement implements Serializable {
+  private final Statement statement;
+  private final WhichVMs whichVMs;
+
+  public DistributedRunRules(final Statement base, final Iterable<TestRule> rules, final Description description, final WhichVMs whichVMs) {
+    this.statement = applyAll(base, rules, description);
+    this.whichVMs = whichVMs;
+  }
+
+  @Override
+  public void evaluate() throws Throwable {
+    if (this.whichVMs.controllerVM()) {
+      this.statement.evaluate();
+    }
+    if (this.whichVMs.everyVM()) {
+      for (int i = 0; i < Host.getHost(0).getVMCount(); i++) {
+        Host.getHost(0).getVM(i).invoke(runnable());
+      }
+    }
+    if (this.whichVMs.locatorVM()) {
+      Host.getHost(0).getLocator().invoke(runnable());
+    }
+  }
+
+  private Statement applyAll(Statement result, final Iterable<TestRule> rules, final Description description) {
+    for (TestRule each : rules) {
+      result = each.apply(result, description);
+    }
+    return result;
+  }
+
+  private SerializableRunnable runnable() {
+    return new SerializableRunnable() {
+      @Override
+      public void run() {
+        try {
+          DistributedRunRules.this.statement.evaluate();
+        } catch (Error | RuntimeException e) {
+          throw e;
+        } catch (Throwable t) {
+          throw new RuntimeException(t);
+        }
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedStatement.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedStatement.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedStatement.java
new file mode 100644
index 0000000..1c78d00
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedStatement.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import java.io.Serializable;
+
+import org.junit.runners.model.Statement;
+
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.SerializableRunnable;
+import org.apache.geode.test.junit.rules.serializable.SerializableStatement;
+
+/**
+ * Invokes Statement in specified DUnit VMs.
+ */
+public class DistributedStatement extends SerializableStatement {
+  private final SerializableStatement next;
+  private final WhichVMs whichVMs;
+
+  /**
+   * Construct a new {@code DistributedStatement} statement.
+   * @param next the next {@code Statement} in the execution chain
+   * @param whichVMs specifies which VMs should invoke the statement
+   */
+  public DistributedStatement(final SerializableStatement next, final WhichVMs whichVMs) {
+    this.next = next;
+    this.whichVMs = whichVMs;
+  }
+
+  /**
+   * Invoke the {@link Statement} in the specified VMs.
+   */
+  @Override
+  public void evaluate() throws Throwable {
+    if (this.whichVMs.controllerVM()) {
+      this.next.evaluate();
+    }
+    if (this.whichVMs.everyVM()) {
+      for (int i = 0; i < Host.getHost(0).getVMCount(); i++) {
+        Host.getHost(0).getVM(i).invoke(runnable());
+      }
+    }
+    if (this.whichVMs.locatorVM()) {
+      Host.getHost(0).getLocator().invoke(runnable());
+    }
+  }
+
+  private SerializableRunnable runnable() {
+    return new SerializableRunnable() {
+      @Override
+      public void run() {
+        try {
+          next.evaluate();
+        } catch (Error | RuntimeException e) {
+          throw e;
+        } catch (Throwable t) {
+          throw new RuntimeException(t);
+        }
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedTestRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedTestRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedTestRule.java
new file mode 100644
index 0000000..9516cb3
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedTestRule.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.rules.MethodRule;
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.Statement;
+import org.junit.runners.model.TestClass;
+
+import org.apache.geode.test.dunit.standalone.DUnitLauncher;
+
+/**
+ * Launches the DUnit framework for a {@code DistributedTest}.
+ *
+ * <p>Enables use of {@link DistributedRule} annotations on any Rules.
+ *
+ * <pre>
+ * {@literal @}Category(DistributedTest.class)
+ * public class QueryDataDUnitTest {
+ *
+ *   {@literal @}DistributedRule
+ *   public UseJacksonForJsonPathRule useJacksonForJsonPathRule = new UseJacksonForJsonPathRule();
+ *
+ *   {@literal @}Rule
+ *   public DistributedTestRule distributedTestRule = DistributedTestRule.builder().build();
+ *
+ *   ...
+ * }
+ * </pre>
+ * <p>Use the {@code Builder} to specify which {@code VM}s should invoke any
+ * {@code Rule} annotated with {@literal @}DistributedRule. By default,
+ * {@code controllerVM} is {@code true}, {@code everyVM} is {@code true} and
+ * {@code locatorVM} is {@code false}.
+ */
+public class DistributedTestRule implements MethodRule, Serializable {
+
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  private TestClass testClass;
+
+  private final List<?> rules = new ArrayList<>(); // types are TestRule or MethodRule
+
+  private final RemoteInvoker invoker;
+
+  private final WhichVMs whichVMs;
+
+  // TODO: add ability to specify ordering of DistributedRules
+
+  protected DistributedTestRule(final Builder builder) {
+    this(new RemoteInvoker(), builder);
+  }
+
+  protected DistributedTestRule(final RemoteInvoker invoker, final Builder builder) {
+    this.invoker = invoker;
+
+    this.whichVMs = new WhichVMs();
+    if (builder.controllerVM) {
+      this.whichVMs.addControllerVM();
+    }
+    if (builder.everyVM) {
+      this.whichVMs.addEveryVM();
+    }
+    if (builder.locatorVM) {
+      this.whichVMs.addLocatorVM();
+    }
+  }
+
+  @Override
+  public Statement apply(final Statement base, final FrameworkMethod method, final Object target) {
+    this.testClass = new TestClass(target.getClass());
+    Statement statement = base;
+    statement = withRules(method, target, statement);
+    statement = withDUnit(method, target, statement);
+    return statement;
+  }
+
+  protected Statement withDUnit(final FrameworkMethod method, final Object target, final Statement statement) {
+    return new Statement() {
+      @Override
+      public void evaluate() throws Throwable {
+        setUpDUnit();
+        try {
+          statement.evaluate();
+        } finally {
+          tearDownDUnit();
+        }
+      }
+    };
+  }
+
+  protected void setUpDUnit() throws Exception {
+    DUnitLauncher.launchIfNeeded();
+    // TODO: customize based on fields
+  }
+
+  protected void tearDownDUnit() throws Exception {
+  }
+
+  protected Statement withRules(final FrameworkMethod method, final Object target, final Statement statement) {
+    List<TestRule> testRules = this.testRules(target);
+    Statement result = statement;
+//    result = withMethodRules(method, testRules, target, result);
+    result = withTestRules(method, testRules, result);
+
+    return result;
+  }
+
+//  protected Statement withMethodRules(final FrameworkMethod method, final List<TestRule> testRules, final Object target, final Statement result) {
+//    Statement statement = result;
+//    for (MethodRule rule : methodRules(target)) {
+//      if (!testRules.contains(rule)) {
+//        statement = new DistributedStatement(rule.apply((result, method, target), this.whichVMs);
+//      }
+//    }
+//    return statement;
+//  }
+
+  protected Statement withTestRules(final FrameworkMethod method, final List<TestRule> testRules, final Statement statement) {
+    Description description = Description.createTestDescription(this.testClass.getJavaClass(), method.getName(), method.getAnnotations());
+    return testRules.isEmpty() ? statement : new DistributedRunRules(statement, testRules, description, this.whichVMs);
+  }
+
+  protected List<MethodRule> methodRules(final Object target) {
+    List<MethodRule> rules = this.testClass.getAnnotatedMethodValues(target, DistributedRule.class, MethodRule.class);
+    rules.addAll(this.testClass.getAnnotatedFieldValues(target, DistributedRule.class, MethodRule.class));
+    return rules;
+  }
+
+  protected List<TestRule> testRules(final Object target) {
+    List<TestRule> result = this.testClass.getAnnotatedMethodValues(target, DistributedRule.class, TestRule.class);
+    result.addAll(this.testClass.getAnnotatedFieldValues(target, DistributedRule.class, TestRule.class));
+    return result;
+  }
+
+  /**
+   * Builds an instance of {@link DistributedTestRule}.
+   *
+   * <p>By default, {@code controllerVM} is {@code true}, {@code everyVM} is
+   * {@code true} and {@code locatorVM} is {@code false}.
+   */
+  public static class Builder {
+
+    private boolean everyVM = true;
+    private boolean locatorVM = false;
+    private boolean controllerVM = true;
+
+    protected Builder() {
+    }
+
+    public Builder everyVM(final boolean everyVM) {
+      this.everyVM = everyVM;
+      return this;
+    }
+
+    public Builder locatorVM(final boolean locatorVM) {
+      this.locatorVM = locatorVM;
+      return this;
+    }
+
+    public Builder controllerVM(final boolean locatorVM) {
+      this.locatorVM = locatorVM;
+      return this;
+    }
+
+    public DistributedTestRule build() {
+      return new DistributedTestRule(this);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedUseJacksonForJsonPathRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedUseJacksonForJsonPathRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedUseJacksonForJsonPathRule.java
new file mode 100644
index 0000000..78841fc
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedUseJacksonForJsonPathRule.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import org.apache.geode.test.junit.rules.UseJacksonForJsonPathRule;
+
+public class DistributedUseJacksonForJsonPathRule extends UseJacksonForJsonPathRule {
+
+  private static UseJacksonForJsonPathRule instance = new UseJacksonForJsonPathRule();
+
+  private final RemoteInvoker invoker;
+
+  public DistributedUseJacksonForJsonPathRule() {
+    this(new RemoteInvoker());
+  }
+
+  public DistributedUseJacksonForJsonPathRule(final RemoteInvoker invoker) {
+    this.invoker = invoker;
+  }
+
+  @Override
+  public void before() {
+    this.invoker.invokeInEveryVMAndController(DistributedUseJacksonForJsonPathRule::invokeBefore);
+  }
+
+  @Override
+  public void after() {
+    this.invoker.invokeInEveryVMAndController(DistributedUseJacksonForJsonPathRule::invokeAfter);
+  }
+
+  private static void invokeBefore() {
+    instance.before();
+  }
+  private static void invokeAfter() {
+    instance.after();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedWrapperRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedWrapperRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedWrapperRule.java
new file mode 100644
index 0000000..45311e1
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/DistributedWrapperRule.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+import org.apache.geode.test.junit.rules.UseJacksonForJsonPathRule;
+import org.apache.geode.test.junit.rules.serializable.SerializableExternalResource;
+import org.apache.geode.test.junit.rules.serializable.SerializableStatement;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestRule;
+
+public class DistributedWrapperRule implements SerializableTestRule {
+
+  private static SerializableTestRule instance;
+
+  private final RemoteInvoker invoker;
+  private final WhichVMs whichVMs;
+
+  public DistributedWrapperRule(final SerializableTestRule testRule) {
+    this(testRule, new WhichVMs().addControllerVM().addEveryVM());
+  }
+
+  public DistributedWrapperRule(final SerializableTestRule testRule, final WhichVMs whichVMs) {
+    this(new RemoteInvoker(), testRule, whichVMs);
+  }
+
+  public DistributedWrapperRule(final RemoteInvoker invoker, final SerializableTestRule testRule, final WhichVMs whichVMs) {
+    this.invoker = invoker;
+    instance = testRule;
+    this.whichVMs = whichVMs;
+  }
+
+  @Override
+  public Statement apply(Statement base, Description description){
+    return new DistributedStatement((SerializableStatement) base, whichVMs);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/RemoteInvoker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/RemoteInvoker.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/RemoteInvoker.java
index 66378c7..c9f0902 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/RemoteInvoker.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/RemoteInvoker.java
@@ -20,7 +20,8 @@ import static org.apache.geode.test.dunit.Invoke.*;
 
 import java.io.Serializable;
 
-import org.apache.geode.test.dunit.SerializableRunnable;
+import org.apache.geode.test.dunit.Invoke;
+import org.apache.geode.test.dunit.SerializableRunnableIF;
 
 /**
  * Provides remote invocation support to a {@code TestRule}. These methods
@@ -31,18 +32,21 @@ class RemoteInvoker implements Serializable {
 
   private static final long serialVersionUID = -1759722991299584649L;
 
-  public void invokeEverywhere(final SerializableRunnable runnable) {
+  // controller VM
+  // dunit VMs
+  // locator VM
+
+  public void invokeInEveryVMAndController(final SerializableRunnableIF runnable) {
     try {
       runnable.run();
     } catch (Exception e) {
       throw new RuntimeException(e);
     }
-    invokeInEveryVM(runnable);
-    invokeInLocator(runnable);
+    Invoke.invokeInEveryVM(runnable);
   }
 
-  public void remoteInvokeInEveryVMAndLocator(final SerializableRunnable runnable) {
-    invokeInEveryVM(runnable);
+  public void invokeInEveryVMAndLocator(final SerializableRunnableIF runnable) {
+    Invoke.invokeInEveryVM(runnable);
     invokeInLocator(runnable);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/WhichVMs.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/WhichVMs.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/WhichVMs.java
new file mode 100644
index 0000000..4ee6020
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/WhichVMs.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import java.io.Serializable;
+
+/**
+ * Specifies which DUnit VMs will invoke a Rule.
+ *
+ * TODO: add ability to specify specific VMs
+ *
+ * TODO: add ability to specify order
+ */
+public class WhichVMs implements Serializable{
+  private boolean controllerVM;
+  private boolean everyVM;
+  private boolean locatorVM;
+
+  public WhichVMs() {
+  }
+
+  public WhichVMs addControllerVM() {
+    this.controllerVM = true;
+    return this;
+  }
+  public WhichVMs addEveryVM() {
+    this.everyVM = true;
+    return this;
+  }
+  public WhichVMs addLocatorVM() {
+    this.locatorVM = true;
+    return this;
+  }
+
+  public boolean controllerVM() {
+    return this.controllerVM;
+  }
+  public boolean everyVM() {
+    return this.everyVM;
+  }
+  public boolean locatorVM() {
+    return this.locatorVM;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/rules/tests/DistributedTestRuleTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/tests/DistributedTestRuleTest.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/tests/DistributedTestRuleTest.java
new file mode 100644
index 0000000..8352db2
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/tests/DistributedTestRuleTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.dunit.rules.tests;
+
+import java.io.Serializable;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExternalResource;
+
+import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.junit.rules.serializable.SerializableExternalResource;
+
+public class DistributedTestRuleTest {
+
+  @DistributedRule
+  public SimpleRule simpleRule = new SimpleRule();
+
+  @Rule
+  public DistributedTestRule distributedTestRule = DistributedTestRule.builder().build();
+
+  @Test
+  public void test() throws Exception {
+    System.out.println("KIRK:test");
+  }
+
+  private static class SimpleRule extends SerializableExternalResource {
+    @Override
+    protected void before() throws Throwable {
+      System.out.println("KIRK:before");
+    }
+
+    @Override
+    protected void after() {
+      System.out.println("KIRK:after");
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java b/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
index 6618d2a..a044409 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
@@ -23,6 +23,7 @@ import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.InternalLocator;
 import org.apache.geode.distributed.internal.membership.gms.membership.GMSJoinLeave;
 import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.test.dunit.DUnitEnv;
 import org.apache.geode.test.dunit.Host;
@@ -74,7 +75,9 @@ public class DUnitLauncher {
   private static final int DEBUGGING_VM_NUM = -1;
   private static final int LOCATOR_VM_NUM = -2;
 
-  static final long STARTUP_TIMEOUT = 30 * 1000;
+  static final long STARTUP_TIMEOUT = 300 * 1000; // TODO: restore to 30
+  private static final String STARTUP_TIMEOUT_MESSAGE = "VMs did not start up within " + (STARTUP_TIMEOUT/1000) + " seconds";
+
   private static final String SUSPECT_FILENAME = "dunit_suspect.log";
   private static File DUNIT_SUSPECT_FILE;
 
@@ -192,7 +195,7 @@ public class DUnitLauncher {
 
     //wait for the VM to start up
     if(!processManager.waitForVMs(STARTUP_TIMEOUT)) {
-      throw new RuntimeException("VMs did not start up with 30 seconds");
+      throw new RuntimeException("VMs did not start up within 30 seconds");
     }
 
     locatorPort = startLocator(registry);
@@ -207,7 +210,7 @@ public class DUnitLauncher {
 
     //wait for the VMS to start up
     if(!processManager.waitForVMs(STARTUP_TIMEOUT)) {
-      throw new RuntimeException("VMs did not start up with 30 seconds");
+      throw new RuntimeException("VMs did not start up within 30 seconds");
     }
 
     //populate the Host class with our stubs. The tests use this host class

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-junit/build.gradle
----------------------------------------------------------------------
diff --git a/geode-junit/build.gradle b/geode-junit/build.gradle
index f7e5e46..e47095f 100755
--- a/geode-junit/build.gradle
+++ b/geode-junit/build.gradle
@@ -16,6 +16,7 @@
  */
 
 dependencies {
+  compile 'com.jayway.jsonpath:json-path:' + project.'json-path.version'
   testCompile 'commons-lang:commons-lang:' + project.'commons-lang.version'
   testCompile 'com.google.guava:guava:' + project.'guava.version'
   testCompile 'org.assertj:assertj-core:' + project.'assertj-core.version'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-junit/src/main/java/org/apache/geode/test/junit/rules/UseJacksonForJsonPathRule.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/UseJacksonForJsonPathRule.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/UseJacksonForJsonPathRule.java
new file mode 100644
index 0000000..03d5a60
--- /dev/null
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/UseJacksonForJsonPathRule.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.junit.rules;
+
+import java.util.EnumSet;
+import java.util.Set;
+
+import com.jayway.jsonpath.Configuration;
+import com.jayway.jsonpath.Configuration.Defaults;
+import com.jayway.jsonpath.Option;
+import com.jayway.jsonpath.spi.json.JacksonJsonProvider;
+import com.jayway.jsonpath.spi.json.JsonProvider;
+import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider;
+import com.jayway.jsonpath.spi.mapper.MappingProvider;
+
+import org.apache.geode.test.junit.rules.serializable.SerializableExternalResource;
+
+/**
+ * JUnit Rule that configures json-path to use the {@code JacksonJsonProvider}
+ *
+ * <p>UseJacksonForJsonPathRule can be used in tests that need to use json-path-assert:
+ * <pre>
+ * {@literal @}ClassRule
+ * public static UseJacksonForJsonPathRule useJacksonForJsonPathRule = new UseJacksonForJsonPathRule();
+ *
+ * {@literal @}Test
+ * public void hasAssertionsUsingJsonPathMatchers() {
+ *   ...
+ *   assertThat(json, isJson());
+ *   assertThat(json, hasJsonPath("$.result"));
+ * }
+ * </pre>
+ */
+@SuppressWarnings({ "serial", "unused" })
+public class UseJacksonForJsonPathRule extends SerializableExternalResource {
+
+  private boolean hadDefaults;
+  private JsonProvider jsonProvider;
+  private MappingProvider mappingProvider;
+  private Set<Option> options;
+
+  /**
+   * Override to set up your specific external resource.
+   */
+  @Override
+  public void before() {
+    saveDefaults();
+    Configuration.setDefaults(new Defaults() {
+
+      private final JsonProvider jsonProvider = new JacksonJsonProvider();
+      private final MappingProvider mappingProvider = new JacksonMappingProvider();
+
+      @Override
+      public JsonProvider jsonProvider() {
+        return jsonProvider;
+      }
+
+      @Override
+      public MappingProvider mappingProvider() {
+        return mappingProvider;
+      }
+
+      @Override
+      public Set<Option> options() {
+        return EnumSet.noneOf(Option.class);
+      }
+
+    });
+  }
+
+  /**
+   * Override to tear down your specific external resource.
+   */
+  @Override
+  public void after() {
+    restoreDefaults();
+  }
+
+  private void saveDefaults() {
+    try {
+      Configuration defaultConfiguration = Configuration.defaultConfiguration();
+      this.jsonProvider = defaultConfiguration.jsonProvider();
+      this.mappingProvider = defaultConfiguration.mappingProvider();
+      this.options = defaultConfiguration.getOptions();
+      this.hadDefaults = true;
+    } catch (NoClassDefFoundError ignore) {
+      this.hadDefaults = false;
+    }
+  }
+
+  private void restoreDefaults() {
+    if (!this.hadDefaults) {
+      return;
+    }
+    Configuration.setDefaults(new Defaults() {
+
+      @Override
+      public JsonProvider jsonProvider() {
+        return jsonProvider;
+      }
+
+      @Override
+      public MappingProvider mappingProvider() {
+        return mappingProvider;
+      }
+
+      @Override
+      public Set<Option> options() {
+        return options;
+      }
+
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableExternalResource.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableExternalResource.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableExternalResource.java
index ad974b6..f4db5bd 100755
--- a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableExternalResource.java
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableExternalResource.java
@@ -16,10 +16,32 @@
  */
 package org.apache.geode.test.junit.rules.serializable;
 
+import java.io.Serializable;
+
 import org.junit.rules.ExternalResource;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
 
 /**
  * Serializable subclass of {@link org.junit.rules.ExternalResource ExternalResource}.
  */
 public abstract class SerializableExternalResource extends ExternalResource implements SerializableTestRule {
+
+  public Statement apply(Statement base, Description description) {
+    return statement(base);
+  }
+
+  private Statement statement(final Statement base) {
+    return new SerializableStatement() {
+      @Override
+      public void evaluate() throws Throwable {
+        before();
+        try {
+          base.evaluate();
+        } finally {
+          after();
+        }
+      }
+    };
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableStatement.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableStatement.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableStatement.java
new file mode 100644
index 0000000..3f07421
--- /dev/null
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableStatement.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.junit.rules.serializable;
+
+import java.io.Serializable;
+
+import org.junit.runners.model.Statement;
+
+/**
+ * Serializable subclass of {@link Statement}.
+ */
+public abstract class SerializableStatement extends Statement implements Serializable {
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/management/LuceneManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/management/LuceneManagementDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/management/LuceneManagementDUnitTest.java
index d5f6508..53c2b58 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/management/LuceneManagementDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/management/LuceneManagementDUnitTest.java
@@ -144,12 +144,12 @@ public class LuceneManagementDUnitTest extends ManagementTestBase {
     }
   }
 
-  private static void verifyMBean() {
+  private void verifyMBean() {
     getMBean();
   }
 
-  private static LuceneServiceMXBean getMBean() {
-    ObjectName objectName = MBeanJMXAdapter.getCacheServiceMBeanName(ds.getDistributedMember(), "LuceneService");
+  private LuceneServiceMXBean getMBean() {
+    ObjectName objectName = MBeanJMXAdapter.getCacheServiceMBeanName(getSystem().getDistributedMember(), "LuceneService");
     assertNotNull(getManagementService().getMBeanInstance(objectName, LuceneServiceMXBean.class));
     return getManagementService().getMBeanInstance(objectName, LuceneServiceMXBean.class);
   }
@@ -177,14 +177,14 @@ public class LuceneManagementDUnitTest extends ManagementTestBase {
     createPartitionRegion(vm, regionName);
   }
 
-  private static void createIndexes(String regionName, int numIndexes) {
-    LuceneService luceneService = LuceneServiceProvider.get(cache);
+  private void createIndexes(String regionName, int numIndexes) {
+    LuceneService luceneService = LuceneServiceProvider.get(getCache());
     for (int i=0; i<numIndexes; i++) {
       luceneService.createIndex(INDEX_NAME+"_"+i, regionName, "field"+i);
     }
   }
 
-  private static void verifyAllMBeanIndexMetrics(String regionName, int numRegionIndexes, int numTotalIndexes) {
+  private void verifyAllMBeanIndexMetrics(String regionName, int numRegionIndexes, int numTotalIndexes) {
     LuceneServiceMXBean mbean = getMBean();
     verifyMBeanIndexMetrics(mbean, regionName, numRegionIndexes, numTotalIndexes);
   }
@@ -206,17 +206,17 @@ public class LuceneManagementDUnitTest extends ManagementTestBase {
     }
   }
 
-  private static void putEntries(String regionName, int numEntries) {
+  private void putEntries(String regionName, int numEntries) {
     for (int i=0; i<numEntries; i++) {
-      Region region = cache.getRegion(regionName);
+      Region region = getCache().getRegion(regionName);
       String key = String.valueOf(i);
       Object value = new TestObject(key);
       region.put(key, value);
     }
   }
 
-  private static void queryEntries(String regionName, String indexName) throws LuceneQueryException {
-    LuceneService service = LuceneServiceProvider.get(cache);
+  private void queryEntries(String regionName, String indexName) throws LuceneQueryException {
+    LuceneService service = LuceneServiceProvider.get(getCache());
     LuceneQuery query = service.createLuceneQueryFactory().create(indexName, regionName, "field0:0", null);
     query.findValues();
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/gradle/dependency-versions.properties
----------------------------------------------------------------------
diff --git a/gradle/dependency-versions.properties b/gradle/dependency-versions.properties
index 50af2aa..0098eb1 100644
--- a/gradle/dependency-versions.properties
+++ b/gradle/dependency-versions.properties
@@ -68,7 +68,8 @@ jline.version = 2.12
 jmock.version = 2.8.2
 jna.version = 4.0.0
 jopt-simple.version = 5.0.1
-json-path.version = 1.2.0
+json-path.version = 2.2.0
+json-path-assert.version = 2.2.0
 json4s.version = 3.2.4
 jsr305.version = 3.0.1
 junit.version = 4.12


[13/50] [abbrv] incubator-geode git commit: GEODE-1993: refactor tests to use rules rather than abstract classes

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-cq/src/test/java/org/apache/geode/security/CQClientAuthDunitTest.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/test/java/org/apache/geode/security/CQClientAuthDunitTest.java b/geode-cq/src/test/java/org/apache/geode/security/CQClientAuthDunitTest.java
index 2386af1..cc0a191 100644
--- a/geode-cq/src/test/java/org/apache/geode/security/CQClientAuthDunitTest.java
+++ b/geode-cq/src/test/java/org/apache/geode/security/CQClientAuthDunitTest.java
@@ -43,8 +43,10 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 @Category({ DistributedTest.class, SecurityTest.class })
 public class CQClientAuthDunitTest extends AbstractSecureServerDUnitTest {
 
-  public CQClientAuthDunitTest(){
-    this.postProcessor = SamplePostProcessor.class;
+  public Properties getProperties(){
+    Properties  properties = super.getProperties();
+    properties.setProperty(SECURITY_POST_PROCESSOR, SamplePostProcessor.class.getName());
+    return properties;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-cq/src/test/java/org/apache/geode/security/CQPDXPostProcessorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/test/java/org/apache/geode/security/CQPDXPostProcessorDUnitTest.java b/geode-cq/src/test/java/org/apache/geode/security/CQPDXPostProcessorDUnitTest.java
index 12f08ec..470b722 100644
--- a/geode-cq/src/test/java/org/apache/geode/security/CQPDXPostProcessorDUnitTest.java
+++ b/geode-cq/src/test/java/org/apache/geode/security/CQPDXPostProcessorDUnitTest.java
@@ -17,11 +17,14 @@
 
 package org.apache.geode.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
 import com.jayway.awaitility.Awaitility;
@@ -53,6 +56,7 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
 @Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
 public class CQPDXPostProcessorDUnitTest extends AbstractSecureServerDUnitTest {
   private static byte[] BYTES = {1,0};
+  private static int jmxPort = AvailablePortHelper.getRandomAvailableTCPPort();
 
   @Parameterized.Parameters
   public static Collection<Object[]> parameters(){
@@ -60,11 +64,20 @@ public class CQPDXPostProcessorDUnitTest extends AbstractSecureServerDUnitTest {
     return Arrays.asList(params);
   }
 
+  public Properties getProperties(){
+    Properties  properties = super.getProperties();
+    properties.setProperty(SECURITY_POST_PROCESSOR, PDXPostProcessor.class.getName());
+    properties.setProperty("security-pdx", pdxPersistent+"");
+    properties.setProperty(JMX_MANAGER_PORT, jmxPort+"");
+    return properties;
+  }
+
+  public Map<String, String> getData(){
+    return new HashMap();
+  }
+
   public CQPDXPostProcessorDUnitTest(boolean pdxPersistent){
-    this.postProcessor = PDXPostProcessor.class;
     this.pdxPersistent = pdxPersistent;
-    this.jmxPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    values = new HashMap();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-cq/src/test/java/org/apache/geode/security/CQPostProcessorDunitTest.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/test/java/org/apache/geode/security/CQPostProcessorDunitTest.java b/geode-cq/src/test/java/org/apache/geode/security/CQPostProcessorDunitTest.java
index e2b555a..32632ef 100644
--- a/geode-cq/src/test/java/org/apache/geode/security/CQPostProcessorDunitTest.java
+++ b/geode-cq/src/test/java/org/apache/geode/security/CQPostProcessorDunitTest.java
@@ -17,8 +17,11 @@
 
 package org.apache.geode.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_POST_PROCESSOR;
 import static org.junit.Assert.*;
 
+import java.util.Properties;
+
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -40,10 +43,13 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 @Category({ DistributedTest.class, SecurityTest.class })
 public class CQPostProcessorDunitTest extends AbstractSecureServerDUnitTest {
 
-  public CQPostProcessorDunitTest(){
-    this.postProcessor = SamplePostProcessor.class;
+  public Properties getProperties(){
+    Properties  properties = super.getProperties();
+    properties.setProperty(SECURITY_POST_PROCESSOR, SamplePostProcessor.class.getName());
+    return properties;
   }
 
+
   @Test
   public void testPostProcess(){
     String query = "select * from /AuthRegion";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
index 30532b9..bcc5ab3 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
@@ -47,7 +47,7 @@ import org.apache.geode.management.internal.cli.i18n.CliStrings;
 import org.apache.geode.management.internal.cli.result.CommandResult;
 import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.rules.LocatorServerConfigurationRule;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 
@@ -57,8 +57,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
   private String groupName = "Lucene";
 
   @Rule
-  public LocatorServerConfigurationRule ls = new LocatorServerConfigurationRule(
-      this);
+  public LocatorServerStartupRule ls = new LocatorServerStartupRule();
 
   @Test
   public void indexGetsCreatedUsingClusterConfiguration()
@@ -78,7 +77,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
     // configuration.
     VM vm2 = startNodeUsingClusterConfiguration(2, false);
     vm2.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNotNull(index);
       validateIndexFields(new String[] { "field1", "field2", "field3" }, index);
@@ -104,7 +103,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
     // configuration.
     VM vm2 = startNodeUsingClusterConfiguration(2, false);
     vm2.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNotNull(index);
       String[] fields = new String[] { "field1", "field2", "field3" };
@@ -139,7 +138,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
 
     // VM2 should have lucene index created using gfsh execution.
     vm2.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNotNull(index);
       validateIndexFields(new String[] { "field1", "field2", "field3" }, index);
@@ -147,7 +146,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
 
     // The Lucene index is present in vm3.
     vm3.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNotNull(index);
     });
@@ -176,7 +175,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
 
     // VM2 should have lucene index created using gfsh execution
     vm2.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNotNull(index);
       validateIndexFields(new String[] { "field1", "field2", "field3" }, index);
@@ -184,7 +183,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
 
     // The Lucene index should not be present in vm3.
     vm3.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNull(index);
     });
@@ -213,7 +212,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
 
     // VM2 should have lucene index created using gfsh execution
     vm2.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNotNull(index);
       validateIndexFields(new String[] { "field1", "field2", "field3" }, index);
@@ -221,7 +220,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
 
     // The Lucene index should not be present in vm3.
     vm3.invoke(() -> {
-      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      LuceneService luceneService = LuceneServiceProvider.get(ls.serverStarter.cache);
       final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
       assertNull(index);
     });
@@ -242,7 +241,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
     if (addGroup) {
       nodeProperties.setProperty(GROUPS, groupName);
     }
-    return ls.getServerVM(vmIndex, nodeProperties);
+    return ls.getServerVM(vmIndex, nodeProperties, ls.getLocatorPort(0));
   }
 
   private VM startLocatorWithClusterConfigurationEnabled() throws Exception {
@@ -268,7 +267,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
     locatorProps.setProperty(HTTP_SERVICE_PORT, String.valueOf(httpPort));
     locatorProps.setProperty(CLUSTER_CONFIGURATION_DIR,
  dir.getCanonicalPath());
-    return ls.getLocatorVM(locatorProps);
+    return ls.getLocatorVM(0, locatorProps);
   }
 
   private void createLuceneIndexUsingGfsh(boolean addGroup) throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
index 35d8fb4..2bcb31b 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
@@ -140,7 +140,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
     @ApiResponse( code = 404, message = "Region or key(s) does not exist" ),
     @ApiResponse( code = 500, message = "GemFire throws an error or exception" )      
   } )
-  @PreAuthorize("@securityService.authorizeKeys('WRITE', #region, #keys)")
+  @PreAuthorize("@securityService.authorize('WRITE', #region, #keys)")
   public ResponseEntity<?> delete(@PathVariable("region") String region,
                                   @PathVariable("keys") final String[] keys){
     logger.debug("Delete data for key {} on region {}", ArrayUtils.toString((Object[])keys), region);
@@ -169,7 +169,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
     @ApiResponse( code = 404, message = "Region does not exist" ),
     @ApiResponse( code = 500, message = "if GemFire throws an error or exception" )   
   } )
-  @PreAuthorize("@securityService.authorize('DATA', 'WRITE', #regon)")
+  @PreAuthorize("@securityService.authorize('DATA', 'WRITE', #region)")
   public ResponseEntity<?> delete(@PathVariable("region") String region) {
     logger.debug("Deleting all data in Region ({})...", region);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
index 3003b3d..ebb8ccc 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
@@ -227,7 +227,7 @@ public class PdxBasedCrudController extends CommonCrudController {
     @ApiResponse( code = 404, message = "Region does not exist." ),
     @ApiResponse( code = 500, message = "GemFire throws an error or exception.")  
   } )
-  @PreAuthorize("@securityService.authorizeKeys('READ', #region, #keys)")
+  @PreAuthorize("@securityService.authorize('READ', #region, #keys)")
   public ResponseEntity<?> read(
       @PathVariable("region") String region,
       @PathVariable("keys") final String[] keys,
@@ -311,7 +311,7 @@ public class PdxBasedCrudController extends CommonCrudController {
     @ApiResponse( code = 409, message = "For CAS, @old value does not match to the current value in region" ),
     @ApiResponse( code = 500, message = "GemFire throws an error or exception.")
   } )
-  @PreAuthorize("@securityService.authorizeKeys('WRITE', #region, #keys)")
+  @PreAuthorize("@securityService.authorize('WRITE', #region, #keys)")
   public ResponseEntity<?> update(@PathVariable("region") String region,
       @PathVariable("keys") final String[] keys,
       @RequestParam(value = "op", defaultValue = "PUT") final String opValue,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityService.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityService.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityService.java
index 3d09f09..2247de0 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityService.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityService.java
@@ -44,7 +44,7 @@ public class RestSecurityService {
     }
   }
 
-  public boolean authorizeKeys(String operation, String region, String[] keys) {
+  public boolean authorize(String operation, String region, String[] keys) {
     boolean authorized = false;
     for(String key:keys){
       authorized = authorize("DATA", operation, region, key);


[25/50] [abbrv] incubator-geode git commit: GEODE-2007: fix unchecked warnings

Posted by kl...@apache.org.
GEODE-2007: fix unchecked warnings


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a53c4b15
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a53c4b15
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a53c4b15

Branch: refs/heads/feature/GEODE-1930
Commit: a53c4b1509867a1847b8a21ef8b926911f874bb1
Parents: 7330733
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Oct 17 12:49:00 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Oct 17 16:30:19 2016 -0700

----------------------------------------------------------------------
 .../apache/geode/modules/session/catalina/DeltaSession7.java  | 3 +--
 .../modules/session/Tomcat8SessionsClientServerDUnitTest.java | 3 ++-
 .../org/apache/geode/internal/cache/GemFireCacheImpl.java     | 2 +-
 .../org/apache/geode/internal/cache/ha/HARegionQueue.java     | 7 ++++---
 .../cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java   | 4 ++--
 .../test/java/org/apache/geode/OldClientSupportDUnitTest.java | 4 ++--
 6 files changed, 12 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a53c4b15/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java
----------------------------------------------------------------------
diff --git a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java b/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java
index 0df05ff..c0dfe23 100644
--- a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java
+++ b/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java
@@ -535,8 +535,7 @@ public class DeltaSession7 extends StandardSession implements DataSerializable,
     }
   }
 
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  protected ConcurrentMap readInAttributes(final DataInput in) throws IOException, ClassNotFoundException {
+  protected ConcurrentMap<String, Object> readInAttributes(final DataInput in) throws IOException, ClassNotFoundException {
     return DataSerializer.readObject(in);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a53c4b15/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java b/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java
index 8b29048..384689e 100644
--- a/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java
+++ b/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java
@@ -19,6 +19,7 @@ package org.apache.geode.modules.session;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.apache.geode.internal.cache.CacheServerLauncher.serverPort;
 
+import java.util.List;
 import java.util.Properties;
 
 import org.junit.experimental.categories.Category;
@@ -49,7 +50,7 @@ public class Tomcat8SessionsClientServerDUnitTest extends TestSessionsTomcat8Bas
   @Override
   public void preTearDown() {
     vm0.invoke(() -> {
-      GemFireCacheImpl.getInstance().getCacheServers().forEach(e -> ((CacheServer)e).stop());
+      (GemFireCacheImpl.getInstance().getCacheServers()).forEach(cacheServer -> cacheServer.stop());
     });
     server.stopContainer();
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a53c4b15/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
index 6c195e7..f673ded 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
@@ -4015,7 +4015,7 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
 
-  public List getCacheServers() {
+  public List<CacheServer> getCacheServers() {
     List cacheServersWithoutReceiver = null;
     if (!allCacheServers.isEmpty()) {
     Iterator allCacheServersIterator = allCacheServers.iterator();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a53c4b15/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
index 027a1b5..f2b1185 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
@@ -22,6 +22,7 @@ import org.apache.geode.cache.TimeoutException;
 import org.apache.geode.cache.query.internal.CqQueryVsdStats;
 import org.apache.geode.cache.query.internal.cq.CqService;
 import org.apache.geode.cache.query.internal.cq.InternalCqQuery;
+import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DM;
@@ -2987,10 +2988,10 @@ protected boolean checkEventForRemoval(Long counter, ThreadIdentifier threadid,
                 && !queueRemovalMessageList.isEmpty()) { // messages exist
               QueueRemovalMessage qrm = new QueueRemovalMessage();
               qrm.resetRecipients();
-              List<CacheServerImpl> servers = this.cache.getCacheServers();
+              List<CacheServer> servers = this.cache.getCacheServers();
               List<DistributedMember> recipients = new LinkedList();
-              for (CacheServerImpl server: servers) {
-                recipients.addAll(server.getCacheServerAdvisor().adviseBridgeServers());
+              for (CacheServer server: servers) {
+                recipients.addAll(CacheServerImpl.class.cast(server).getCacheServerAdvisor().adviseBridgeServers());
               }
               qrm.setRecipients(recipients);
               qrm.setMessagesList(queueRemovalMessageList);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a53c4b15/geode-cq/src/test/java/org/apache/geode/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/test/java/org/apache/geode/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java b/geode-cq/src/test/java/org/apache/geode/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
index 3990265..119b382 100644
--- a/geode-cq/src/test/java/org/apache/geode/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
+++ b/geode-cq/src/test/java/org/apache/geode/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
@@ -200,10 +200,10 @@ public class DeltaPropagationWithCQDUnitTest extends JUnit4DistributedTestCase {
 
   public static void verifyFullValueRequestsFromClients(Long expected)
       throws Exception {
-    List<CacheServerImpl> servers = ((GemFireCacheImpl)cache).getCacheServers();
+    List<CacheServer> servers = ((GemFireCacheImpl)cache).getCacheServers();
     assertEquals("expected one server but found these: " + servers, 1, servers.size());
 
-    CacheClientProxy[] proxies = servers.get(0).getAcceptor().getCacheClientNotifier()
+    CacheClientProxy[] proxies = CacheServerImpl.class.cast(servers.get(0)).getAcceptor().getCacheClientNotifier()
         .getClientProxies().toArray(new CacheClientProxy[0]);
     
     // find the proxy for the client that processed the CQs - it will have

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a53c4b15/geode-old-client-support/src/test/java/org/apache/geode/OldClientSupportDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-old-client-support/src/test/java/org/apache/geode/OldClientSupportDUnitTest.java b/geode-old-client-support/src/test/java/org/apache/geode/OldClientSupportDUnitTest.java
index 0b48e16..36d13b5 100644
--- a/geode-old-client-support/src/test/java/org/apache/geode/OldClientSupportDUnitTest.java
+++ b/geode-old-client-support/src/test/java/org/apache/geode/OldClientSupportDUnitTest.java
@@ -102,8 +102,8 @@ public class OldClientSupportDUnitTest extends JUnit4CacheTestCase {
     }
   }
   
-  private Object instantiate(Class aClass) throws Exception {
-    Constructor c = null;
+  private Object instantiate(Class<?> aClass) throws Exception {
+    Constructor<?> c = null;
     try {
       c = aClass.getConstructor();
       return c.newInstance();


[31/50] [abbrv] incubator-geode git commit: GEODE-388: Deprecating DynamicRegionFactory

Posted by kl...@apache.org.
GEODE-388: Deprecating DynamicRegionFactory

Marking DynamicRegionFactory as deprecated.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ad43d447
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ad43d447
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ad43d447

Branch: refs/heads/feature/GEODE-1930
Commit: ad43d4472c3bc9bb4500b34e2aec58f7fcb406d3
Parents: bc7a675
Author: Dan Smith <up...@apache.org>
Authored: Mon Oct 17 16:28:51 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Oct 19 10:19:16 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/geode/cache/DynamicRegionFactory.java   | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ad43d447/geode-core/src/main/java/org/apache/geode/cache/DynamicRegionFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/DynamicRegionFactory.java b/geode-core/src/main/java/org/apache/geode/cache/DynamicRegionFactory.java
index 3cfa73b..a4d84a6 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/DynamicRegionFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/DynamicRegionFactory.java
@@ -31,6 +31,7 @@ import org.apache.geode.SystemFailure;
 import org.apache.geode.cache.client.Pool;
 import org.apache.geode.cache.client.PoolManager;
 import org.apache.geode.cache.client.internal.ServerRegionProxy;
+import org.apache.geode.cache.execute.FunctionService;
 import org.apache.geode.cache.wan.GatewaySender;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
@@ -126,9 +127,12 @@ import org.apache.geode.security.GemFireSecurityException;
  * not directly access this Region; instead use the methods on this factory.
  * </ul>
  * @since GemFire 4.3
+ * @deprecated This class is deprecated. Use {@link FunctionService} to create regions on
+ * other members instead.
  *
  */
 @SuppressWarnings("deprecation")
+@Deprecated
 public abstract class DynamicRegionFactory  {
 
   public static final String dynamicRegionListName = "__DynamicRegions";


[32/50] [abbrv] incubator-geode git commit: GEODE-1983: Swagger is broken with integrated security

Posted by kl...@apache.org.
GEODE-1983: Swagger is broken with integrated security

* This required a simple configuration change.
* This closes #263


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7511ffac
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7511ffac
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7511ffac

Branch: refs/heads/feature/GEODE-1930
Commit: 7511ffac940ef63850c5a3f83857efbae3ac3b18
Parents: ad43d44
Author: Kevin Duling <kd...@pivotal.io>
Authored: Tue Oct 18 10:33:37 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Wed Oct 19 12:36:34 2016 -0700

----------------------------------------------------------------------
 .../internal/web/security/RestSecurityConfiguration.java     | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7511ffac/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityConfiguration.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityConfiguration.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityConfiguration.java
index f3b5c4d..4550ff2 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityConfiguration.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/security/RestSecurityConfiguration.java
@@ -17,8 +17,6 @@
  */
 package org.apache.geode.rest.internal.web.security;
 
-import org.apache.geode.internal.security.IntegratedSecurityService;
-import org.apache.geode.internal.security.SecurityService;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.ComponentScan;
@@ -31,6 +29,9 @@ import org.springframework.security.config.annotation.web.configuration.EnableWe
 import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
 import org.springframework.security.config.http.SessionCreationPolicy;
 
+import org.apache.geode.internal.security.IntegratedSecurityService;
+import org.apache.geode.internal.security.SecurityService;
+
 @Configuration
 @EnableWebSecurity
 @EnableGlobalMethodSecurity(prePostEnabled = true)
@@ -56,8 +57,7 @@ public class RestSecurityConfiguration extends WebSecurityConfigurerAdapter {
   protected void configure(HttpSecurity http) throws Exception {
     http.sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS)
         .and()
-        .authorizeRequests()
-        .antMatchers("/ping").permitAll()
+        .authorizeRequests().antMatchers("/ping", "/api-docs/**", "/docs/**").permitAll()
         .anyRequest().authenticated()
         .and()
         .formLogin()


[17/50] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into develop

Posted by kl...@apache.org.
Merge remote-tracking branch 'origin/develop' into develop


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3068fb69
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3068fb69
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3068fb69

Branch: refs/heads/feature/GEODE-1930
Commit: 3068fb6946795f9f9790d0fb29d7c5f8068f99ac
Parents: 40c1917 de62159
Author: Dan Smith <up...@apache.org>
Authored: Fri Oct 14 16:21:32 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Fri Oct 14 16:21:32 2016 -0700

----------------------------------------------------------------------
 .gitignore                                      |    3 +
 .../internal/web/RestSecurityDUnitTest.java     |  180 ---
 .../web/RestSecurityEndpointsDUnitTest.java     |  422 -------
 .../web/RestSecurityIntegrationTest.java        |  497 +++++++++
 .../client/internal/ExecuteFunctionOp.java      |    4 +-
 .../ExecuteRegionFunctionSingleHopOp.java       |    2 -
 .../internal/index/CompactMapRangeIndex.java    |    9 +-
 .../internal/InternalDistributedSystem.java     |   68 +-
 .../membership/InternalDistributedMember.java   |    7 +-
 .../cache/AbstractBucketRegionQueue.java        |   34 +-
 .../geode/internal/cache/BucketRegionQueue.java |   59 +-
 .../geode/internal/cache/GemFireCacheImpl.java  |    6 +-
 .../cache/execute/ServerFunctionExecutor.java   |    5 +-
 .../execute/ServerRegionFunctionExecutor.java   |    3 +-
 .../util/SynchronizedResultCollector.java       |   57 +
 .../internal/cache/tier/sockets/HandShake.java  |    8 +-
 .../parallel/ParallelGatewaySenderQueue.java    |   35 +-
 .../wan/serial/SerialGatewaySenderQueue.java    |   26 +-
 .../security/IntegratedSecurityService.java     |   29 +-
 .../internal/security/SecurityService.java      |    7 +-
 .../geode/security/ResourcePermission.java      |    4 +-
 .../AutoConnectionSourceImplJUnitTest.java      |   27 +
 .../MapRangeIndexMaintenanceJUnitTest.java      |  617 ++++++++++-
 .../cache/execute/FunctionServiceBase.java      |    1 +
 .../ha/BlockingHARegionQueueJUnitTest.java      |  182 ++-
 .../internal/cache/ha/Bug48571DUnitTest.java    |   13 +-
 .../cache/ha/HARegionQueueJUnitTest.java        |  252 ++---
 .../tier/sockets/HAInterestPart2DUnitTest.java  |    3 +-
 .../ParallelGatewaySenderQueueJUnitTest.java    |  134 ++-
 .../management/ClientHealthStatsDUnitTest.java  |    2 +
 .../cli/commands/QueueCommandsDUnitTest.java    |    1 +
 .../SharedConfigurationUsingDirDUnitTest.java   |    1 +
 .../security/AccessControlMBeanJUnitTest.java   |    7 +-
 ...CacheServerMBeanAuthenticationJUnitTest.java |    7 +-
 .../CacheServerMBeanAuthorizationJUnitTest.java |   13 +-
 .../CacheServerMBeanShiroJUnitTest.java         |   30 +-
 .../security/CacheServerStartupRule.java        |   74 ++
 .../security/CliCommandsSecurityTest.java       |    9 +-
 .../security/DataCommandsSecurityTest.java      |   11 +-
 .../DiskStoreMXBeanSecurityJUnitTest.java       |    9 +-
 .../GatewayReceiverMBeanSecurityTest.java       |    9 +-
 .../GatewaySenderMBeanSecurityTest.java         |    9 +-
 .../security/GfshCommandsPostProcessorTest.java |   30 +-
 .../security/GfshCommandsSecurityTest.java      |   48 +-
 .../security/GfshShellConnectionRule.java       |    5 +-
 .../security/JMXConnectionConfiguration.java    |   33 -
 .../security/JavaRmiServerNameTest.java         |   28 +-
 .../JsonAuthorizationCacheStartRule.java        |   86 --
 .../LockServiceMBeanAuthorizationJUnitTest.java |   11 +-
 .../security/MBeanSecurityJUnitTest.java        |   10 +-
 .../security/MBeanServerConnectionRule.java     |  130 ---
 .../ManagerMBeanAuthorizationJUnitTest.java     |    9 +-
 .../security/MemberMBeanSecurityJUnitTest.java  |   13 +-
 .../security/ResourcePermissionTest.java        |   25 +-
 .../internal/security/ShiroCacheStartRule.java  |   64 --
 .../security/AbstractSecureServerDUnitTest.java |  104 +-
 ...lusterConfigWithEmbededLocatorDUnitTest.java |   67 ++
 .../ClusterConfigWithoutSecurityDUnitTest.java  |  100 ++
 .../security/IntegratedClientAuthDUnitTest.java |   19 +-
 .../NoShowValue1PostProcessorDUnitTest.java     |    8 +-
 .../security/PDXPostProcessorDUnitTest.java     |   18 +-
 .../geode/security/PostProcessorDUnitTest.java  |   10 +-
 .../SecurityClusterConfigDUnitTest.java         |  134 +--
 .../SecurityWithoutClusterConfigDUnitTest.java  |  100 +-
 .../security/StartServerAuthorizationTest.java  |   72 +-
 .../dunit/rules/ConnectionConfiguration.java    |   34 +
 .../rules/LocatorServerConfigurationRule.java   |  141 ---
 .../dunit/rules/LocatorServerStartupRule.java   |  133 +++
 .../geode/test/dunit/rules/LocatorStarter.java  |   74 ++
 .../dunit/rules/MBeanServerConnectionRule.java  |  132 +++
 .../geode/test/dunit/rules/ServerStarter.java   |   99 ++
 .../geode/security/CQClientAuthDunitTest.java   |    6 +-
 .../security/CQPDXPostProcessorDUnitTest.java   |   19 +-
 .../security/CQPostProcessorDunitTest.java      |   10 +-
 geode-junit/build.gradle                        |    5 +-
 .../test/junit/rules/TemporaryFileRule.java     |  111 ++
 .../test/junit/rules/TemporaryFileRuleTest.java |  130 +++
 .../LuceneClusterConfigurationDUnitTest.java    |   47 +-
 geode-pulse/build.gradle                        |    6 +-
 .../src/main/webapp/WEB-INF/spring-security.xml |    6 +-
 .../tools/pulse/testbed/driver/PulseUITest.java |   13 +-
 .../pulse/tests/DataBrowserResultLoader.java    |    2 +
 .../tools/pulse/tests/PulseAbstractTest.java    | 1048 ------------------
 .../geode/tools/pulse/tests/PulseAuthTest.java  |   33 -
 .../tools/pulse/tests/PulseAutomatedTest.java   |  784 -------------
 .../geode/tools/pulse/tests/PulseBaseTest.java  |  693 ------------
 .../tools/pulse/tests/PulseNoAuthTest.java      |   33 -
 .../tools/pulse/tests/ui/PulseAbstractTest.java |  985 ++++++++++++++++
 .../pulse/tests/ui/PulseAnonymousUserTest.java  |  149 +++
 .../tools/pulse/tests/ui/PulseAuthTest.java     |   34 +
 .../pulse/tests/ui/PulseAutomatedTest.java      |  768 +++++++++++++
 .../tools/pulse/tests/ui/PulseBaseTest.java     |  697 ++++++++++++
 .../tools/pulse/tests/ui/PulseNoAuthTest.java   |   34 +
 ...oncurrentParallelGatewaySenderDUnitTest.java |    1 +
 .../ConcurrentWANPropagation_1_DUnitTest.java   |   37 +-
 .../ParallelWANPropagationDUnitTest.java        |    1 -
 .../web/controllers/AbstractBaseController.java |   32 +-
 .../web/controllers/BaseControllerAdvice.java   |   43 +-
 .../web/controllers/CommonCrudController.java   |   55 +-
 .../controllers/FunctionAccessController.java   |   56 +-
 .../web/controllers/PdxBasedCrudController.java |   53 +-
 .../web/controllers/QueryAccessController.java  |   73 +-
 .../web/security/GeodeAuthentication.java       |   37 -
 .../security/GeodeAuthenticationProvider.java   |   21 +-
 .../internal/web/security/GeodeAuthority.java   |   47 -
 .../web/security/RestSecurityService.java       |   56 +
 gradle/dependency-versions.properties           |    1 +
 107 files changed, 5931 insertions(+), 4808 deletions(-)
----------------------------------------------------------------------



[34/50] [abbrv] incubator-geode git commit: GEODE-1959: prompt for password when starting a server if username is specified

Posted by kl...@apache.org.
GEODE-1959: prompt for password when starting a server if username is specified


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b2e77685
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b2e77685
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b2e77685

Branch: refs/heads/feature/GEODE-1930
Commit: b2e77685907c51b1af346e6f8b8da3f5b598b361
Parents: 11ef3eb
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Tue Oct 18 09:13:26 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Wed Oct 19 21:16:23 2016 -0700

----------------------------------------------------------------------
 .../LauncherLifecycleCommandsDUnitTest.java     | 22 ++++++
 .../geode/distributed/ServerLauncher.java       | 78 ++++++++++++++------
 .../membership/gms/membership/GMSJoinLeave.java |  7 +-
 .../cli/commands/LauncherLifecycleCommands.java | 29 +++++++-
 .../internal/cli/commands/ShellCommands.java    | 48 +++---------
 .../internal/cli/i18n/CliStrings.java           | 13 +++-
 .../management/internal/cli/shell/Gfsh.java     | 41 ++++++----
 .../cli/commands/golden-help-offline.properties |  9 +++
 8 files changed, 166 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java b/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
index 490e309..933d152 100644
--- a/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
@@ -437,6 +437,28 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
+  public void testStartServerFailsFastOnMissingPassword() throws IOException {
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+    String pathName = getClass().getSimpleName().concat("_").concat(getTestMethodName());
+    final File workingDirectory = temporaryFolder.newFolder(pathName);
+
+    command.addOption(CliStrings.START_SERVER__NAME, pathName);
+    command.addOption(CliStrings.START_SERVER__DIR, workingDirectory.getCanonicalPath());
+    command.addOption(CliStrings.START_SERVER__USERNAME, "test");
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString, resultString.contains("password must be specified"));
+  }
+
+  @Test
   public void test005StartServerFailsFastOnMissingGemFireSecurityPropertiesFile() throws IOException {
     String gemfireSecuritiesPropertiesFile = "/path/to/missing/gemfire-securities.properties";
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java b/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
index a3d3845..088b670 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
@@ -19,6 +19,32 @@ package org.apache.geode.distributed;
 
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.ServiceLoader;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+
+import joptsimple.OptionException;
+import joptsimple.OptionParser;
+import joptsimple.OptionSet;
+
 import org.apache.geode.SystemFailure;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
@@ -27,14 +53,31 @@ import org.apache.geode.distributed.internal.DefaultServerLauncherCacheProvider;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.GemFireVersion;
-import org.apache.geode.internal.net.SocketCreator;
-import org.apache.geode.internal.cache.*;
+import org.apache.geode.internal.cache.AbstractCacheServer;
+import org.apache.geode.internal.cache.CacheConfig;
+import org.apache.geode.internal.cache.CacheServerLauncher;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.tier.sockets.CacheServerHelper;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.lang.ObjectUtils;
 import org.apache.geode.internal.lang.StringUtils;
 import org.apache.geode.internal.lang.SystemUtils;
-import org.apache.geode.internal.process.*;
+import org.apache.geode.internal.net.SocketCreator;
+import org.apache.geode.internal.process.ClusterConfigurationNotAvailableException;
+import org.apache.geode.internal.process.ConnectionFailedException;
+import org.apache.geode.internal.process.ControlNotificationHandler;
+import org.apache.geode.internal.process.ControllableProcess;
+import org.apache.geode.internal.process.FileAlreadyExistsException;
+import org.apache.geode.internal.process.MBeanInvocationFailedException;
+import org.apache.geode.internal.process.PidUnavailableException;
+import org.apache.geode.internal.process.ProcessController;
+import org.apache.geode.internal.process.ProcessControllerFactory;
+import org.apache.geode.internal.process.ProcessControllerParameters;
+import org.apache.geode.internal.process.ProcessLauncherContext;
+import org.apache.geode.internal.process.ProcessType;
+import org.apache.geode.internal.process.StartupStatusListener;
+import org.apache.geode.internal.process.UnableToControlProcessException;
 import org.apache.geode.internal.util.IOUtils;
 import org.apache.geode.lang.AttachAPINotFoundException;
 import org.apache.geode.management.internal.cli.i18n.CliStrings;
@@ -42,25 +85,8 @@ import org.apache.geode.management.internal.cli.json.GfJsonArray;
 import org.apache.geode.management.internal.cli.json.GfJsonException;
 import org.apache.geode.management.internal.cli.json.GfJsonObject;
 import org.apache.geode.pdx.PdxSerializer;
-import joptsimple.OptionException;
-import joptsimple.OptionParser;
-import joptsimple.OptionSet;
-
-import javax.management.MalformedObjectNameException;
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.geode.distributed.ConfigurationProperties.SERVER_BIND_ADDRESS;
+import org.apache.geode.security.AuthenticationRequiredException;
+import org.apache.geode.security.GemFireSecurityException;
 
 /**
  * The ServerLauncher class is a launcher class with main method to start a GemFire Server (implying a GemFire Cache
@@ -730,6 +756,14 @@ public class ServerLauncher extends AbstractLauncher<String> {
 
         return new ServerState(this, Status.ONLINE);
       }
+      catch(AuthenticationRequiredException e){
+        failOnStart(e);
+        throw new AuthenticationRequiredException("user/password required. Please start your server with --user and --password. "+ e.getMessage());
+      }
+      catch(GemFireSecurityException e){
+        failOnStart(e);
+        throw new GemFireSecurityException(e.getMessage());
+      }
       catch (IOException e) {
         failOnStart(e);
         throw new RuntimeException(LocalizedStrings.Launcher_Command_START_IO_ERROR_MESSAGE.toLocalizedString(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
index 89a9a37..f5198bb 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
@@ -41,6 +41,8 @@ import java.util.concurrent.Future;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.GemFireConfigException;
 import org.apache.geode.SystemConnectException;
 import org.apache.geode.distributed.DistributedMember;
@@ -70,8 +72,8 @@ import org.apache.geode.distributed.internal.membership.gms.messages.ViewAckMess
 import org.apache.geode.distributed.internal.tcpserver.TcpClient;
 import org.apache.geode.internal.Version;
 import org.apache.geode.internal.i18n.LocalizedStrings;
+import org.apache.geode.security.AuthenticationRequiredException;
 import org.apache.geode.security.GemFireSecurityException;
-import org.apache.logging.log4j.Logger;
 
 /**
  * GMSJoinLeave handles membership communication with other processes in the
@@ -413,6 +415,9 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
           || failReason.contains("15806")) {
         throw new SystemConnectException(failReason);
       }
+      else if(failReason.contains("Failed to find credentials")){
+        throw new AuthenticationRequiredException(failReason);
+      }
       throw new GemFireSecurityException(failReason);
     }
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
index 4ffe082..892a92d 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
@@ -17,6 +17,7 @@
 package org.apache.geode.management.internal.cli.commands;
 
 import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.management.internal.cli.i18n.CliStrings.START_SERVER__PASSWORD;
 
 import java.awt.Desktop;
 import java.io.BufferedReader;
@@ -115,6 +116,7 @@ import org.apache.geode.management.internal.cli.util.VisualVmNotFoundException;
 import org.apache.geode.management.internal.configuration.domain.SharedConfigurationStatus;
 import org.apache.geode.management.internal.configuration.messages.SharedConfigurationStatusRequest;
 import org.apache.geode.management.internal.configuration.messages.SharedConfigurationStatusResponse;
+import org.apache.geode.management.internal.security.ResourceConstants;
 import org.apache.geode.security.AuthenticationFailedException;
 
 /**
@@ -1498,11 +1500,28 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
           @CliOption(key = CliStrings.START_SERVER__HTTP_SERVICE_BIND_ADDRESS,
            unspecifiedDefaultValue = CacheServer.HTTP_SERVICE_DEFAULT_BIND_ADDRESS,
            help = CliStrings.START_SERVER__HTTP_SERVICE_BIND_ADDRESS__HELP)
-           final String httpServiceBindAddress)
+      final String httpServiceBindAddress,
+          @CliOption(key = CliStrings.START_SERVER__USERNAME,
+           unspecifiedDefaultValue = "",
+           help = CliStrings.START_SERVER__USERNAME__HELP)
+      final String userName,
+          @CliOption(key = START_SERVER__PASSWORD,
+           unspecifiedDefaultValue = "",
+           help = CliStrings.START_SERVER__PASSWORD__HELP)
+      String passwordToUse)
   // NOTICE: keep the parameters in alphabetical order based on their CliStrings.START_SERVER_* text
   {
-
     try {
+      // prompt for password is username is specified in the command
+      if (!StringUtils.isBlank(userName)) {
+        if (StringUtils.isBlank(passwordToUse)) {
+          passwordToUse = getGfsh().readPassword(START_SERVER__PASSWORD + ": ");
+        }
+        if (StringUtils.isBlank(passwordToUse)) {
+          return ResultBuilder.createConnectionErrorResult(CliStrings.START_SERVER__MSG__PASSWORD_MUST_BE_SPECIFIED);
+        }
+      }
+
       if (workingDirectory == null) {
         // attempt to use or make sub-directory using memberName...
         File serverWorkingDirectory = new File(memberName);
@@ -1560,10 +1579,14 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
       gemfireProperties.setProperty(USE_CLUSTER_CONFIGURATION, StringUtils.valueOf(requestSharedConfiguration, Boolean.TRUE.toString()));
       gemfireProperties.setProperty(LOCK_MEMORY, StringUtils.valueOf(lockMemory, StringUtils.EMPTY_STRING));
       gemfireProperties.setProperty(OFF_HEAP_MEMORY_SIZE, StringUtils.valueOf(offHeapMemorySize, StringUtils.EMPTY_STRING));
-
       gemfireProperties.setProperty(START_DEV_REST_API, StringUtils.valueOf(startRestApi, StringUtils.EMPTY_STRING));
       gemfireProperties.setProperty(HTTP_SERVICE_PORT,  StringUtils.valueOf(httpServicePort, StringUtils.EMPTY_STRING));
       gemfireProperties.setProperty(HTTP_SERVICE_BIND_ADDRESS,  StringUtils.valueOf(httpServiceBindAddress, StringUtils.EMPTY_STRING));
+      // if username is specified in the command line, it will overwrite what's set in the properties file
+      if(!StringUtils.isBlank(userName)){
+        gemfireProperties.setProperty(ResourceConstants.USER_NAME, userName);
+        gemfireProperties.setProperty(ResourceConstants.PASSWORD, passwordToUse);
+      }
 
 
       // read the OSProcess enable redirect system property here -- TODO: replace with new GFSH argument

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
index 792a8ab..ee09167 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
@@ -88,14 +88,6 @@ import org.apache.geode.management.internal.web.shell.HttpOperationInvoker;
 import org.apache.geode.management.internal.web.shell.RestHttpOperationInvoker;
 import org.apache.geode.security.AuthenticationFailedException;
 
-import org.springframework.shell.core.CommandMarker;
-import org.springframework.shell.core.ExitShellRequest;
-import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
 /**
  *
  * @since GemFire 7.0
@@ -200,7 +192,7 @@ public class ShellCommands implements CommandMarker {
     try {
       if (userName != null && userName.length() > 0) {
         if (passwordToUse == null || passwordToUse.length() == 0) {
-          passwordToUse = this.readPassword(gfsh, "password: ");
+          passwordToUse = gfsh.readPassword(CliStrings.CONNECT__PASSWORD + ": ");
         }
         if (passwordToUse == null || passwordToUse.length() == 0) {
           return ResultBuilder.createConnectionErrorResult(CliStrings.CONNECT__MSG__JMX_PASSWORD_MUST_BE_SPECIFIED);
@@ -282,8 +274,8 @@ public class ShellCommands implements CommandMarker {
 
       // otherwise, prompt for username and password and retry the conenction
       try {
-        userName = this.readText(gfsh, "username: ");
-        passwordToUse = this.readPassword(gfsh, "password: ");
+        userName = gfsh.readText(CliStrings.CONNECT__USERNAME + ": ");
+        passwordToUse = gfsh.readPassword(CliStrings.CONNECT__PASSWORD + ": ");
         return httpConnect(sslConfigProps, useSsl, url, userName, passwordToUse);
       }
       catch (IOException ioe) {
@@ -370,8 +362,8 @@ public class ShellCommands implements CommandMarker {
 
       // otherwise, prompt for username and password and retry the conenction
       try {
-        userName = this.readText(gfsh, "username: ");
-        passwordToUse = this.readPassword(gfsh, "password: ");
+        userName = gfsh.readText(CliStrings.CONNECT__USERNAME + ": ");
+        passwordToUse = gfsh.readPassword(CliStrings.CONNECT__PASSWORD + ": ");
         return jmxConnect(sslConfigProps, hostPortToConnect, null, useSsl, userName, passwordToUse, gfSecurityPropertiesPath, true);
       }
       catch (IOException ioe) {
@@ -522,7 +514,7 @@ public class ShellCommands implements CommandMarker {
 
       if (numTimesPrompted > 0) {
         //NOTE: sslConfigProps map was empty
-        keystoreToUse = readText(gfshInstance, CliStrings.CONNECT__KEY_STORE + ": ");
+        keystoreToUse = gfshInstance.readText(CliStrings.CONNECT__KEY_STORE + ": ");
       }
       if (keystoreToUse != null && keystoreToUse.length() > 0) {
         if (keystorePasswordToUse == null || keystorePasswordToUse.length() == 0) {
@@ -530,7 +522,7 @@ public class ShellCommands implements CommandMarker {
           keystorePasswordToUse = sslConfigProps.get(Gfsh.SSL_KEYSTORE_PASSWORD);
           if (keystorePasswordToUse == null || keystorePasswordToUse.length() == 0) {
             // not even in properties file, prompt user for it
-            keystorePasswordToUse = readPassword(gfshInstance, CliStrings.CONNECT__KEY_STORE_PASSWORD + ": ");
+            keystorePasswordToUse = gfshInstance.readPassword(CliStrings.CONNECT__KEY_STORE_PASSWORD + ": ");
             sslConfigProps.put(Gfsh.SSL_KEYSTORE_PASSWORD, keystorePasswordToUse);
           }
         }
@@ -541,7 +533,7 @@ public class ShellCommands implements CommandMarker {
       }
 
       if (numTimesPrompted > 0) {
-        truststoreToUse = readText(gfshInstance, CliStrings.CONNECT__TRUST_STORE + ": ");
+        truststoreToUse = gfshInstance.readText(CliStrings.CONNECT__TRUST_STORE + ": ");
       }
       if (truststoreToUse != null && truststoreToUse.length() > 0) {
         if (truststorePasswordToUse == null || truststorePasswordToUse.length() == 0) {
@@ -549,7 +541,7 @@ public class ShellCommands implements CommandMarker {
           truststorePasswordToUse = sslConfigProps.get(Gfsh.SSL_TRUSTSTORE_PASSWORD);
           if (truststorePasswordToUse == null || truststorePasswordToUse.length() == 0) {
             // not even in properties file, prompt user for it
-            truststorePasswordToUse = readPassword(gfshInstance, CliStrings.CONNECT__TRUST_STORE_PASSWORD + ": ");
+            truststorePasswordToUse = gfshInstance.readPassword(CliStrings.CONNECT__TRUST_STORE_PASSWORD + ": ");
             sslConfigProps.put(Gfsh.SSL_TRUSTSTORE_PASSWORD, truststorePasswordToUse);
           }
         }
@@ -560,7 +552,7 @@ public class ShellCommands implements CommandMarker {
       }
 
       if (numTimesPrompted > 0) {
-        sslCiphersToUse = readText(gfshInstance, CliStrings.CONNECT__SSL_CIPHERS + ": ");
+        sslCiphersToUse = gfshInstance.readText(CliStrings.CONNECT__SSL_CIPHERS + ": ");
       }
       if (sslCiphersToUse != null && sslCiphersToUse.length() > 0) {
         //sslConfigProps.put(DistributionConfig.CLUSTER_SSL_CIPHERS_NAME, sslCiphersToUse);
@@ -568,7 +560,7 @@ public class ShellCommands implements CommandMarker {
       }
 
       if (numTimesPrompted > 0) {
-        sslProtocolsToUse = readText(gfshInstance, CliStrings.CONNECT__SSL_PROTOCOLS + ": ");
+        sslProtocolsToUse = gfshInstance.readText(CliStrings.CONNECT__SSL_PROTOCOLS + ": ");
       }
       if (sslProtocolsToUse != null && sslProtocolsToUse.length() > 0) {
         //sslConfigProps.put(DistributionConfig.CLUSTER_SSL_PROTOCOLS_NAME, sslProtocolsToUse);
@@ -585,24 +577,6 @@ public class ShellCommands implements CommandMarker {
     return CliStrings.format(CliStrings.GFSH__PLEASE_CHECK_LOGS_AT_0, logFilePath);
   }
 
-  private String readText(Gfsh gfsh, String textToPrompt) throws IOException {
-    if (!gfsh.isHeadlessMode() || !gfsh.isQuietMode()) {
-      return gfsh.interact(textToPrompt);
-    }
-    else {
-      return null;
-    }
-  }
-
-  private String readPassword(Gfsh gfsh, String textToPrompt) throws IOException {
-    if (!gfsh.isHeadlessMode() || !gfsh.isQuietMode()) {
-      return gfsh.readWithMask(textToPrompt, '*');
-    }
-    else {
-      return null;
-    }
-  }
-
   /* package-private */
   static Map<String, String> loadPropertiesFromURL(URL gfSecurityPropertiesUrl) {
     Map<String, String> propsMap = Collections.emptyMap();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
index 51887cf..0a6330a 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
@@ -16,6 +16,10 @@
  */
 package org.apache.geode.management.internal.cli.i18n;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.text.MessageFormat;
+
 import org.apache.geode.cache.PartitionAttributesFactory;
 import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.distributed.ConfigurationProperties;
@@ -24,10 +28,6 @@ import org.apache.geode.distributed.internal.SharedConfiguration;
 import org.apache.geode.internal.cache.xmlcache.CacheXml;
 import org.apache.geode.management.internal.cli.shell.Gfsh;
 
-import java.text.MessageFormat;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
 /**-
  *  * Contains 'String' constants used as key to the Localized strings to be used
  * in classes under <code>org.apache.geode.management.internal.cli</code>
@@ -2203,6 +2203,11 @@ public class CliStrings {
   public static final String START_SERVER__HTTP_SERVICE_PORT__HELP = "Port on which HTTP Service will listen on";
   public static final String START_SERVER__HTTP_SERVICE_BIND_ADDRESS = "http-service-bind-address";
   public static final String START_SERVER__HTTP_SERVICE_BIND_ADDRESS__HELP = "The IP address on which the HTTP Service will be bound.  By default, the Server is bound to all local addresses.";
+  public static final String START_SERVER__USERNAME = "user";
+  public static final String START_SERVER__USERNAME__HELP = "User name to securely connect to the cluster. If the --password parameter is not specified then it will be prompted for.";
+  public static final String START_SERVER__PASSWORD = "password";
+  public static final String START_SERVER__PASSWORD__HELP = "Password to securely connect to the cluster.";
+  public static final String START_SERVER__MSG__PASSWORD_MUST_BE_SPECIFIED = "password must be specified.";
   /**
    * Creates a MessageFormat with the given pattern and uses it to format the given argument.
    *

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
index 467682d..e729f20 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
@@ -20,7 +20,6 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.PrintStream;
 import java.net.URL;
 import java.text.MessageFormat;
@@ -37,6 +36,18 @@ import java.util.logging.Level;
 import java.util.logging.LogManager;
 import java.util.logging.Logger;
 
+import jline.Terminal;
+import jline.console.ConsoleReader;
+import org.springframework.shell.core.AbstractShell;
+import org.springframework.shell.core.CommandMarker;
+import org.springframework.shell.core.Converter;
+import org.springframework.shell.core.ExecutionStrategy;
+import org.springframework.shell.core.ExitShellRequest;
+import org.springframework.shell.core.JLineLogHandler;
+import org.springframework.shell.core.JLineShell;
+import org.springframework.shell.core.Parser;
+import org.springframework.shell.event.ShellStatus.Status;
+
 import org.apache.geode.internal.Banner;
 import org.apache.geode.internal.GemFireVersion;
 import org.apache.geode.internal.lang.ClassUtils;
@@ -62,19 +73,6 @@ import org.apache.geode.management.internal.cli.shell.jline.GfshUnsupportedTermi
 import org.apache.geode.management.internal.cli.shell.unsafe.GfshSignalHandler;
 import org.apache.geode.management.internal.cli.util.CommentSkipHelper;
 
-import org.springframework.shell.core.AbstractShell;
-import org.springframework.shell.core.CommandMarker;
-import org.springframework.shell.core.Converter;
-import org.springframework.shell.core.ExecutionStrategy;
-import org.springframework.shell.core.ExitShellRequest;
-import org.springframework.shell.core.JLineLogHandler;
-import org.springframework.shell.core.JLineShell;
-import org.springframework.shell.core.Parser;
-import org.springframework.shell.event.ShellStatus.Status;
-
-import jline.Terminal;
-import jline.console.ConsoleReader;
-
 /**
  * Extends an interactive shell provided by <a
  * href="https://github.com/SpringSource/spring-shell">Spring Shell</a> library.
@@ -324,6 +322,21 @@ public class Gfsh extends JLineShell {
     return signalHandler;
   }
 
+  public String readPassword(String textToPrompt) throws IOException {
+    if(isHeadlessMode && isQuietMode())
+      return null;
+
+    return readWithMask(textToPrompt, '*');
+  }
+
+  public String readText(String textToPrompt) throws IOException {
+    if(isHeadlessMode && isQuietMode())
+      return null;
+
+    return interact(textToPrompt);
+
+  }
+
   /**
    * Starts this GemFire Shell with console.
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties b/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
index 28083f3..40c28d2 100644
--- a/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
+++ b/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
@@ -2522,6 +2522,7 @@ SYNTAX\n\
 \ \ \ \ [--server-port=value] [--socket-buffer-size=value] [--spring-xml-location=value]\n\
 \ \ \ \ [--statistic-archive-file=value] [--use-cluster-configuration(=value)?]\n\
 \ \ \ \ [--start-rest-api(=value)?] [--http-service-port=value] [--http-service-bind-address=value]\n\
+\ \ \ \ [--user=value] [--password=value]\n\
 PARAMETERS\n\
 \ \ \ \ assign-buckets\n\
 \ \ \ \ \ \ \ \ Whether to assign buckets to the partitioned regions of the cache on server start.\n\
@@ -2735,6 +2736,14 @@ PARAMETERS\n\
 \ \ \ \ \ \ \ \ The IP address on which the HTTP Service will be bound.  By default, the Server is bound to\n\
 \ \ \ \ \ \ \ \ all local addresses.\n\
 \ \ \ \ \ \ \ \ Required: false\n\
+\ \ \ \ user\n\
+\ \ \ \ \ \ \ \ User name to securely connect to the cluster. If the --password parameter is not specified\n\
+\ \ \ \ \ \ \ \ then it will be prompted for.\n\
+\ \ \ \ \ \ \ \ Required: false\n\
+\ \ \ \ password\n\
+\ \ \ \ \ \ \ \ Password to securely connect to the cluster.\n\
+\ \ \ \ \ \ \ \ Required: false\n\
+
 
 start-vsd.help=\
 NAME\n\


[30/50] [abbrv] incubator-geode git commit: Adding a docker container to build and view the geode docs

Posted by kl...@apache.org.
Adding a docker container to build and view the geode docs


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/bc7a675a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/bc7a675a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/bc7a675a

Branch: refs/heads/feature/GEODE-1930
Commit: bc7a675a7c2d819c5d99f070d29e485cf1c0ad64
Parents: e130e5b
Author: Dan Smith <up...@apache.org>
Authored: Fri Oct 14 17:54:12 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Oct 19 10:19:16 2016 -0700

----------------------------------------------------------------------
 dev-tools/docker/docs/Dockerfile            | 25 ++++++++++
 dev-tools/docker/docs/build-docs.sh         | 32 +++++++++++++
 dev-tools/docker/docs/build-image-common.sh | 61 ++++++++++++++++++++++++
 dev-tools/docker/docs/view-docs.sh          | 35 ++++++++++++++
 4 files changed, 153 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bc7a675a/dev-tools/docker/docs/Dockerfile
----------------------------------------------------------------------
diff --git a/dev-tools/docker/docs/Dockerfile b/dev-tools/docker/docs/Dockerfile
new file mode 100644
index 0000000..67a31f4
--- /dev/null
+++ b/dev-tools/docker/docs/Dockerfile
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ruby:2.3.0
+MAINTAINER Geode Community <de...@geode.incubator.apache.org>
+
+LABEL Vendor="Apache Geode (incubating)"
+LABEL version=unstable
+
+ADD Gemfile Gemfile
+ADD Gemfile.lock Gemfile.lock
+RUN bundle install

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bc7a675a/dev-tools/docker/docs/build-docs.sh
----------------------------------------------------------------------
diff --git a/dev-tools/docker/docs/build-docs.sh b/dev-tools/docker/docs/build-docs.sh
new file mode 100755
index 0000000..4b670b0
--- /dev/null
+++ b/dev-tools/docker/docs/build-docs.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e -x -u
+
+. $SCRIPT_DIR/build-image-common.sh
+
+docker run -i -t \
+  --rm=true \
+  -w "/home/${USER_NAME}/incubator-geode/geode-book" \
+  -u "${USER_NAME}" \
+  -v "$PWD:/home/${USER_NAME}/incubator-geode" \
+  -v "/home/${USER_NAME}/.m2:/home/${USER_NAME}/.m2" \
+  ${IMAGE_NAME}-${USER_NAME} \
+  bundle exec bookbinder bind local
+
+popd
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bc7a675a/dev-tools/docker/docs/build-image-common.sh
----------------------------------------------------------------------
diff --git a/dev-tools/docker/docs/build-image-common.sh b/dev-tools/docker/docs/build-image-common.sh
new file mode 100644
index 0000000..ff0de73
--- /dev/null
+++ b/dev-tools/docker/docs/build-image-common.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e -x -u
+
+export DOCKER_ENV_VERSION="0.1"
+
+SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+
+#Stupid OSX has a different mktemp command
+TMP_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'geodedocs'`
+
+function cleanup() {
+  rm -rf $TMP_DIR
+}
+
+trap cleanup EXIT
+
+IMAGE_NAME="geode/docsbuild:${DOCKER_ENV_VERSION}"
+
+pushd ${TMP_DIR}
+cp $SCRIPT_DIR/Dockerfile .
+cp $SCRIPT_DIR/../../../geode-book/Gemfile* .
+
+docker build -t ${IMAGE_NAME} .
+
+popd
+
+if [ "$(uname -s)" == "Linux" ]; then
+  USER_NAME=${SUDO_USER:=$USER}
+  USER_ID=$(id -u "${USER_NAME}")
+  GROUP_ID=$(id -g "${USER_NAME}")
+else # boot2docker uid and gid
+  USER_NAME=$USER
+  USER_ID=1000
+  GROUP_ID=50
+fi
+
+docker build -t "${IMAGE_NAME}-${USER_NAME}" - <<UserSpecificDocker
+FROM ${IMAGE_NAME} 
+RUN groupadd --non-unique -g ${GROUP_ID} ${USER_NAME}
+RUN useradd -g ${GROUP_ID} -u ${USER_ID} -k /root -m ${USER_NAME}
+ENV HOME /home/${USER_NAME}
+UserSpecificDocker
+
+# Go to root
+pushd ${SCRIPT_DIR}/../../../

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bc7a675a/dev-tools/docker/docs/view-docs.sh
----------------------------------------------------------------------
diff --git a/dev-tools/docker/docs/view-docs.sh b/dev-tools/docker/docs/view-docs.sh
new file mode 100755
index 0000000..f107ccd
--- /dev/null
+++ b/dev-tools/docker/docs/view-docs.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e -x -u
+
+SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+
+. $SCRIPT_DIR/build-image-common.sh
+
+docker run -i -t \
+  --rm=true \
+  -w "/home/${USER_NAME}/incubator-geode/geode-book/final_app/public" \
+  -u "${USER_NAME}" \
+  -v "$PWD:/home/${USER_NAME}/incubator-geode" \
+  -v "/home/${USER_NAME}/.m2:/home/${USER_NAME}/.m2" \
+  -p 127.0.0.1:8080:8080 \
+  ${IMAGE_NAME}-${USER_NAME} \
+  python -m SimpleHTTPServer 8080
+
+popd
+


[28/50] [abbrv] incubator-geode git commit: GEODE-1353: Added listeners to slow down the receiver.

Posted by kl...@apache.org.
GEODE-1353: Added listeners to slow down the receiver.

	* Added a listener on after create to slow down the receiver.
	* This was done to avoid the need for a very large number of puts.
	* When region size is more than 5 it can initiate the destroy region
	* While the puts have been reduced to 2000 from 20,000
	* This will make sure the queue is not empty.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/56836e59
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/56836e59
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/56836e59

Branch: refs/heads/feature/GEODE-1930
Commit: 56836e59f39a273432b6e64904c41c7854f6446f
Parents: 5c50954
Author: nabarun <nn...@pivotal.io>
Authored: Tue Oct 4 16:21:26 2016 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Tue Oct 18 14:13:19 2016 -0700

----------------------------------------------------------------------
 .../wan/serial/SerialWANStatsDUnitTest.java     | 43 ++++++++++++--------
 1 file changed, 25 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56836e59/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialWANStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialWANStatsDUnitTest.java b/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialWANStatsDUnitTest.java
index 4db4890..ceca847 100644
--- a/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialWANStatsDUnitTest.java
+++ b/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialWANStatsDUnitTest.java
@@ -379,54 +379,61 @@ public class SerialWANStatsDUnitTest extends WANTestBase {
    * 1 region and sender configured on local site and 1 region and a 
    * receiver configured on remote site. Puts to the local region are in progress.
    * Remote region is destroyed in the middle.
+   *
+   * Better fix : slowed down the receiver after every create event, So a huge number of puts is not required.
+   *
    * 
    * @throws Exception
    */
-  @Category(FlakyTest.class) // GEODE-1353
   @Test
   public void testReplicatedSerialPropagationWithRemoteRegionDestroy() throws Exception {
-  int numEntries = 20000;
+  int numEntries = 2000;
     Integer lnPort = (Integer)vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId( 1 ));
     Integer nyPort = (Integer)vm1.invoke(() -> WANTestBase.createFirstRemoteLocator( 2, lnPort ));
 
     //these are part of remote site
     vm2.invoke(() -> WANTestBase.createCache( nyPort ));
+
+    //create one RR (RR_1) on remote site
+    vm2.invoke(() -> WANTestBase.createReplicatedRegion(
+      testName + "_RR_1", null, isOffHeap()  ));
+
+
     vm2.invoke(() -> WANTestBase.createReceiver());
 
+    //This slows down the receiver
+    vm2.invoke(() -> addListenerToSleepAfterCreateEvent(1000, testName + "_RR_1"));
+
+
     //these are part of local site
     vm4.invoke(() -> WANTestBase.createCache( lnPort ));
     vm5.invoke(() -> WANTestBase.createCache( lnPort ));
     vm6.invoke(() -> WANTestBase.createCache( lnPort ));
     vm7.invoke(() -> WANTestBase.createCache( lnPort ));
 
+    //create one RR (RR_1) on local site
+    vm4.invoke(() -> WANTestBase.createReplicatedRegion(
+      testName + "_RR_1", "ln", isOffHeap()  ));
+    vm5.invoke(() -> WANTestBase.createReplicatedRegion(
+      testName + "_RR_1", "ln", isOffHeap()  ));
+    vm6.invoke(() -> WANTestBase.createReplicatedRegion(
+      testName + "_RR_1", "ln", isOffHeap()  ));
+    vm7.invoke(() -> WANTestBase.createReplicatedRegion(
+      testName + "_RR_1", "ln", isOffHeap()  ));
+
     //senders are created on local site
     vm4.invoke(() -> WANTestBase.createSender( "ln", 2,
         false, 100, 100, false, false, null, true ));
     vm5.invoke(() -> WANTestBase.createSender( "ln", 2,
         false, 100, 100, false, false, null, true ));
 
-    //create one RR (RR_1) on remote site
-    vm2.invoke(() -> WANTestBase.createReplicatedRegion(
-        testName + "_RR_1", null, isOffHeap()  ));
-    //This is to cause a scenario where we have received at least X events and want to slow the receiver
-    vm2.invoke(() -> WANTestBase.longPauseAfterNumEvents(500, 200));
     //start the senders on local site
     startSenderInVMs("ln", vm4, vm5);
 
-    //create one RR (RR_1) on local site
-    vm4.invoke(() -> WANTestBase.createReplicatedRegion(
-        testName + "_RR_1", "ln", isOffHeap()  ));
-    vm5.invoke(() -> WANTestBase.createReplicatedRegion(
-        testName + "_RR_1", "ln", isOffHeap()  ));
-    vm6.invoke(() -> WANTestBase.createReplicatedRegion(
-        testName + "_RR_1", "ln", isOffHeap()  ));
-    vm7.invoke(() -> WANTestBase.createReplicatedRegion(
-        testName + "_RR_1", "ln", isOffHeap()  ));
-
     //start puts in RR_1 in another thread
     AsyncInvocation inv1 = vm4.invokeAsync(() -> WANTestBase.doPuts( testName + "_RR_1", numEntries ));
     //destroy RR_1 in remote site
-    vm2.invoke(() -> WANTestBase.destroyRegion( testName + "_RR_1", 500));
+    vm2.invoke(() -> WANTestBase.destroyRegion( testName + "_RR_1", 5));
 
     try {
       inv1.join();


[07/50] [abbrv] incubator-geode git commit: Merge feature/GEODE-1952 into feature/GEODE-1952-3

Posted by kl...@apache.org.
Merge feature/GEODE-1952 into feature/GEODE-1952-3


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/952e7e32
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/952e7e32
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/952e7e32

Branch: refs/heads/feature/GEODE-1930
Commit: 952e7e324026709325dd347fd0ca9db6cb6418b7
Parents: 38aa36f 14a32e2
Author: Karen Miller <km...@pivotal.io>
Authored: Fri Oct 14 14:44:27 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Fri Oct 14 14:44:27 2016 -0700

----------------------------------------------------------------------
 geode-book/.gitignore                           |    2 +
 geode-book/Gemfile                              |    5 +
 geode-book/Gemfile.lock                         |  203 ++
 geode-book/README.md                            |    1 +
 geode-book/config.yml                           |   20 +
 .../master_middleman/source/images/favicon.ico  |  Bin 0 -> 1317 bytes
 .../master_middleman/source/index.html.erb      |    7 +
 .../master_middleman/source/javascripts/book.js |   16 +
 .../source/javascripts/waypoints/context.js     |  300 ++
 .../source/javascripts/waypoints/group.js       |  105 +
 .../javascripts/waypoints/noframeworkAdapter.js |  213 ++
 .../source/javascripts/waypoints/sticky.js      |   63 +
 .../source/javascripts/waypoints/waypoint.js    |  160 +
 .../source/layouts/_book-footer.erb             |    7 +
 .../master_middleman/source/layouts/_title.erb  |    6 +
 .../source/stylesheets/book-styles.css.scss     |    3 +
 .../stylesheets/partials/_book-base-values.scss |    0
 .../source/stylesheets/partials/_book-vars.scss |   19 +
 .../source/subnavs/geode-subnav.erb             | 3082 ++++++++++++++++
 geode-book/redirects.rb                         |    3 +
 geode-docs/.gitignore                           |    6 +
 geode-docs/CONTRIBUTE.md                        |   63 +
 geode-docs/README.md                            |   53 +
 geode-docs/about_geode.html.md.erb              |   26 +
 geode-docs/basic_config/book_intro.html.md.erb  |   40 +
 .../chapter_overview.html.md.erb                |   40 +
 ...uted_system_member_configuration.html.md.erb |   51 +
 .../config_concepts/local_vs_remote.html.md.erb |   29 +
 .../chapter_overview.html.md.erb                |   32 +
 .../managing_data_entries.html.md.erb           |  146 +
 .../using_custom_classes.html.md.erb            |   51 +
 .../data_regions/chapter_overview.html.md.erb   |   65 +
 .../create_a_region_with_API.html.md.erb        |   80 +
 .../create_a_region_with_cacheXML.html.md.erb   |   85 +
 .../create_a_region_with_gfsh.html.md.erb       |   55 +
 .../creating_custom_attributes.html.md.erb      |   64 +
 .../managing_data_regions.html.md.erb           |  222 ++
 .../managing_region_attributes.html.md.erb      |  113 +
 .../new_region_existing_data.html.md.erb        |   28 +
 .../data_regions/region_naming.html.md.erb      |   31 +
 .../data_regions/region_shortcuts.html.md.erb   |  115 +
 .../store_retrieve_region_shortcuts.html.md.erb |   77 +
 .../setting_distributed_properties.html.md.erb  |   81 +
 .../the_cache/chapter_overview.html.md.erb      |   48 +
 .../intro_cache_management.html.md.erb          |   96 +
 .../managing_a_client_cache.html.md.erb         |   84 +
 .../managing_a_multiuser_cache.html.md.erb      |   66 +
 .../managing_a_peer_server_cache.html.md.erb    |   81 +
 .../managing_a_secure_cache.html.md.erb         |   67 +
 .../setting_cache_initializer.html.md.erb       |   76 +
 .../setting_cache_properties.html.md.erb        |   39 +
 .../configuring/chapter_overview.html.md.erb    |   84 +
 .../deploying_application_jars.html.md.erb      |  131 +
 .../cluster_config/export-import.html.md.erb    |   56 +
 .../gfsh_config_troubleshooting.html.md.erb     |   75 +
 .../gfsh_load_from_shared_dir.html.md.erb       |   44 +
 .../cluster_config/gfsh_persist.html.md.erb     |  125 +
 .../cluster_config/gfsh_remote.html.md.erb      |   78 +
 .../persisting_configurations.html.md.erb       |  337 ++
 .../using_member_groups.html.md.erb             |   44 +
 .../running/change_file_spec.html.md.erb        |   57 +
 .../running/default_file_specs.html.md.erb      |   76 +
 .../deploy_config_files_intro.html.md.erb       |   34 +
 .../running/deploying_config_files.html.md.erb  |   45 +
 .../deploying_config_jar_files.html.md.erb      |   52 +
 .../running/firewall_ports_config.html.md.erb   |   32 +
 .../running/firewalls_connections.html.md.erb   |   35 +
 .../running/firewalls_multisite.html.md.erb     |   87 +
 .../running/firewalls_ports.html.md.erb         |  246 ++
 .../running/managing_output_files.html.md.erb   |   33 +
 .../running/running_the_cacheserver.html.md.erb |  199 +
 .../running/running_the_locator.html.md.erb     |  257 ++
 .../starting_up_shutting_down.html.md.erb       |  146 +
 geode-docs/developing/book_intro.html.md.erb    |   74 +
 .../chapter_overview.html.md.erb                |   38 +
 .../continuous_querying_whats_next.html.md.erb  |   88 +
 .../how_continuous_querying_works.html.md.erb   |   98 +
 ...implementing_continuous_querying.html.md.erb |  202 ++
 .../PDX_Serialization_Features.html.md.erb      |   40 +
 .../auto_serialization.html.md.erb              |  141 +
 ...ation_with_class_pattern_strings.html.md.erb |   85 +
 .../chapter_overview.html.md.erb                |   40 +
 .../data_serialization_options.html.md.erb      |   68 +
 .../extending_the_autoserializer.html.md.erb    |  123 +
 .../gemfire_data_serialization.html.md.erb      |   52 +
 .../gemfire_pdx_serialization.html.md.erb       |   64 +
 .../java_serialization.html.md.erb              |   29 +
 .../jsonformatter_pdxinstances.html.md.erb      |   46 +
 .../persist_pdx_metadata_to_disk.html.md.erb    |   53 +
 .../program_application_for_pdx.html.md.erb     |  107 +
 .../use_pdx_high_level_steps.html.md.erb        |   49 +
 .../use_pdx_serializable.html.md.erb            |  115 +
 .../use_pdx_serializer.html.md.erb              |  145 +
 .../using_PdxInstanceFactory.html.md.erb        |   51 +
 .../using_pdx_region_entry_keys.html.md.erb     |   31 +
 .../chapter_overview.html.md.erb                |   48 +
 .../delta_propagation_example.html.md.erb       |  130 +
 .../delta_propagation_properties.html.md.erb    |   96 +
 .../errors_in_delta_propagation.html.md.erb     |   35 +
 .../how_delta_propagation_works.html.md.erb     |   69 +
 .../implementing_delta_propagation.html.md.erb  |   41 +
 .../when_to_use_delta_prop.html.md.erb          |   34 +
 .../chapter_overview.html.md.erb                |   44 +
 .../choosing_level_of_dist.html.md.erb          |   36 +
 .../how_distribution_works.html.md.erb          |   48 +
 .../how_region_versioning_works.html.md.erb     |  127 +
 .../how_region_versioning_works_wan.html.md.erb |   42 +
 .../how_replication_works.html.md.erb           |   51 +
 .../locking_in_global_regions.html.md.erb       |  109 +
 .../managing_distributed_regions.html.md.erb    |   64 +
 .../region_entry_versions.html.md.erb           |   51 +
 .../cache_event_handler_examples.html.md.erb    |  155 +
 .../events/chapter_overview.html.md.erb         |   44 +
 ...re_client_server_event_messaging.html.md.erb |   81 +
 ...figure_multisite_event_messaging.html.md.erb |   39 +
 .../configure_p2p_event_messaging.html.md.erb   |   50 +
 ...uring_gateway_concurrency_levels.html.md.erb |  158 +
 ..._highly_available_gateway_queues.html.md.erb |  119 +
 ...iguring_highly_available_servers.html.md.erb |   55 +
 ...conflate_multisite_gateway_queue.html.md.erb |  130 +
 ...nflate_server_subscription_queue.html.md.erb |   53 +
 .../events/event_handler_overview.html.md.erb   |   40 +
 .../filtering_multisite_events.html.md.erb      |  126 +
 .../ha_event_messaging_whats_next.html.md.erb   |   95 +
 .../events/how_cache_events_work.html.md.erb    |   71 +
 ...client_server_distribution_works.html.md.erb |  137 +
 .../events/how_events_work.html.md.erb          |  111 +
 ...how_multisite_distribution_works.html.md.erb |   68 +
 ...mplementing_cache_event_handlers.html.md.erb |  153 +
 ..._durable_client_server_messaging.html.md.erb |  199 +
 ...nting_write_behind_event_handler.html.md.erb |  245 ++
 ...t_server_subscription_queue_size.html.md.erb |   74 +
 ...ist_of_event_handlers_and_events.html.md.erb |  181 +
 .../resolving_multisite_conflicts.html.md.erb   |   80 +
 ..._client_message_tracking_timeout.html.md.erb |   43 +
 ...ne_client_server_event_messaging.html.md.erb |   37 +
 ..._callbacks_that_modify_the_cache.html.md.erb |   65 +
 .../eviction/chapter_overview.html.md.erb       |   34 +
 .../configuring_data_eviction.html.md.erb       |   88 +
 .../eviction/how_eviction_works.html.md.erb     |   36 +
 .../expiration/chapter_overview.html.md.erb     |   32 +
 .../configuring_data_expiration.html.md.erb     |   83 +
 .../expiration/how_expiration_works.html.md.erb |   70 +
 .../function_exec/chapter_overview.html.md.erb  |   36 +
 .../function_execution.html.md.erb              |  254 ++
 .../how_function_execution_works.html.md.erb    |  131 +
 .../chapter_overview.html.md.erb                |   40 +
 .../chapter_overview.html.md.erb                |   34 +
 .../how_data_loaders_work.html.md.erb           |   52 +
 .../implementing_data_loaders.html.md.erb       |   88 +
 .../sync_outside_data.html.md.erb               |   36 +
 .../chapter_overview.html.md.erb                |   60 +
 .../checking_region_redundancy.html.md.erb      |   55 +
 ...locating_partitioned_region_data.html.md.erb |  128 +
 .../configure_pr_single_hop.html.md.erb         |   39 +
 .../configuring_bucket_for_pr.html.md.erb       |   70 +
 .../configuring_ha_for_pr.html.md.erb           |   58 +
 ...partitioning_and_data_colocation.html.md.erb |   58 +
 .../how_partitioning_works.html.md.erb          |   58 +
 .../how_pr_ha_works.html.md.erb                 |   61 +
 .../how_pr_single_hop_works.html.md.erb         |   48 +
 .../join_query_partitioned_regions.html.md.erb  |   97 +
 .../managing_partitioned_regions.html.md.erb    |   42 +
 .../moving_partitioned_data.html.md.erb         |   76 +
 ...partitioning_and_data_colocation.html.md.erb |   36 +
 .../overview_how_pr_ha_works.html.md.erb        |   32 +
 ...overview_how_pr_single_hop_works.html.md.erb |   32 +
 .../rebalancing_pr_data.html.md.erb             |  106 +
 .../set_crash_redundancy_recovery.html.md.erb   |   60 +
 .../set_enforce_unique_host.html.md.erb         |   34 +
 .../set_join_redundancy_recovery.html.md.erb    |   66 +
 .../set_pr_redundancy.html.md.erb               |   51 +
 .../set_redundancy_zones.html.md.erb            |   40 +
 ...using_custom_partition_resolvers.html.md.erb |  221 ++
 .../advanced_querying.html.md.erb               |   48 +
 .../case_sensitivity.html.md.erb                |   36 +
 .../query_additional/literals.html.md.erb       |   82 +
 .../query_additional/operators.html.md.erb      |   57 +
 .../order_by_on_partitioned_regions.html.md.erb |   35 +
 ...tioned_region_key_or_field_value.html.md.erb |   83 +
 ...tioned_region_query_restrictions.html.md.erb |   50 +
 .../query_debugging.html.md.erb                 |  104 +
 .../query_language_features.html.md.erb         |   41 +
 .../query_on_a_single_node.html.md.erb          |  172 +
 .../supported_keywords.html.md.erb              |   48 +
 .../using_query_bind_parameters.html.md.erb     |   65 +
 .../create_multiple_indexes.html.md.erb         |   78 +
 .../query_index/creating_an_index.html.md.erb   |  111 +
 .../creating_hash_indexes.html.md.erb           |   68 +
 .../creating_key_indexes.html.md.erb            |   66 +
 .../creating_map_indexes.html.md.erb            |   61 +
 .../query_index/index_samples.html.md.erb       |   80 +
 ...indexes_on_single_region_queries.html.md.erb |   50 +
 .../indexes_with_overflow_regions.html.md.erb   |   58 +
 .../query_index/indexing_guidelines.html.md.erb |   41 +
 .../query_index/maintaining_indexes.html.md.erb |   69 +
 .../query_index/query_index.html.md.erb         |   79 +
 .../query_index/query_index_hints.html.md.erb   |   40 +
 ...ng_indexes_with_equijoin_queries.html.md.erb |   69 +
 ...quijoin_queries_multiple_regions.html.md.erb |   79 +
 .../query_select/aggregates.html.md.erb         |  109 +
 .../query_select/the_from_clause.html.md.erb    |  103 +
 .../the_import_statement.html.md.erb            |   31 +
 .../the_select_statement.html.md.erb            |  220 ++
 .../query_select/the_where_clause.html.md.erb   |  353 ++
 .../chapter_overview.html.md.erb                |   38 +
 .../comments_in_query_strings.html.md.erb       |   30 +
 .../monitor_queries_for_low_memory.html.md.erb  |   41 +
 .../oql_compared_to_sql.html.md.erb             |   31 +
 .../performance_considerations.html.md.erb      |   33 +
 .../querying_basics/query_basics.html.md.erb    |   57 +
 ...query_grammar_and_reserved_words.html.md.erb |  163 +
 .../querying_partitioned_regions.html.md.erb    |   41 +
 .../querying_basics/reserved_words.html.md.erb  |  129 +
 ...ictions_and_unsupported_features.html.md.erb |   35 +
 .../querying_basics/running_a_query.html.md.erb |   87 +
 .../supported_character_sets.html.md.erb        |   24 +
 .../what_is_a_query_string.html.md.erb          |   50 +
 .../region_options/chapter_overview.html.md.erb |   40 +
 .../data_hosts_and_accessors.html.md.erb        |   31 +
 .../dynamic_region_creation.html.md.erb         |  197 +
 .../region_options/region_types.html.md.erb     |  146 +
 .../storage_distribution_options.html.md.erb    |   40 +
 .../chapter_overview.html.md.erb                |   41 +
 .../how_persist_overflow_work.html.md.erb       |   64 +
 .../overflow_config_examples.html.md.erb        |   53 +
 .../storing_data_on_disk.html.md.erb            |   79 +
 .../transactions/JTA_transactions.html.md.erb   |  243 ++
 .../transactions/about_transactions.html.md.erb |   47 +
 .../cache_plugins_with_jta.html.md.erb          |   28 +
 .../cache_transaction_performance.html.md.erb   |   29 +
 .../transactions/cache_transactions.html.md.erb |   51 +
 ...ache_transactions_by_region_type.html.md.erb |  156 +
 .../transactions/chapter_overview.html.md.erb   |   48 +
 .../client_server_transactions.html.md.erb      |   55 +
 ...guring_db_connections_using_JNDI.html.md.erb |  330 ++
 ...data_location_cache_transactions.html.md.erb |   32 +
 .../how_cache_transactions_work.html.md.erb     |   73 +
 .../jca_adapter_example.html.md.erb             |   51 +
 ...onitor_troubleshoot_transactions.html.md.erb |   56 +
 .../run_a_cache_transaction.html.md.erb         |   90 +
 ...che_transaction_with_external_db.html.md.erb |   54 +
 .../transaction_coding_examples.html.md.erb     |   44 +
 .../transaction_event_management.html.md.erb    |   56 +
 .../transaction_jta_gemfire_example.html.md.erb |   48 +
 .../transaction_semantics.html.md.erb           |   54 +
 ...ansaction_suspend_resume_example.html.md.erb |   38 +
 ...ctional_and_nontransactional_ops.html.md.erb |  117 +
 .../transactional_function_example.html.md.erb  |   72 +
 .../transactions_overview.html.md.erb           |   67 +
 .../transactions/turning_off_jta.html.md.erb    |   40 +
 .../working_with_transactions.html.md.erb       |  229 ++
 .../15_minute_quickstart_gfsh.html.md.erb       |  516 +++
 .../getting_started/book_intro.html.md.erb      |   40 +
 .../getting_started/geode_overview.html.md.erb  |   37 +
 .../installation/install_standalone.html.md.erb |  138 +
 .../getting_started/product_intro.html.md.erb   |  101 +
 .../querying_quick_reference.html.md.erb        |  711 ++++
 .../getting_started/setup_classpath.html.md.erb |  122 +
 .../host_machine.html.md.erb                    |   49 +
 .../uninstall_gemfire.html.md.erb               |   26 +
 .../images/ClientServerAdvancedTopics-5.gif     |  Bin 0 -> 10798 bytes
 .../images/ClientServerAdvancedTopics-6.gif     |  Bin 0 -> 12056 bytes
 .../images/ClientServerAdvancedTopics-7.gif     |  Bin 0 -> 5000 bytes
 geode-docs/images/ContinuousQuerying-1.gif      |  Bin 0 -> 6955 bytes
 geode-docs/images/ContinuousQuerying-3.gif      |  Bin 0 -> 9141 bytes
 geode-docs/images/DataManagement-9.png          |  Bin 0 -> 48188 bytes
 geode-docs/images/DeltaPropagation-1.gif        |  Bin 0 -> 7593 bytes
 geode-docs/images/DeltaPropagation-3.gif        |  Bin 0 -> 6843 bytes
 geode-docs/images/Events-2.gif                  |  Bin 0 -> 8793 bytes
 geode-docs/images/Events-3.gif                  |  Bin 0 -> 6432 bytes
 geode-docs/images/FuncExecOnMembers.png         |  Bin 0 -> 64959 bytes
 .../images/FuncExecOnRegionHAWithFilter.png     |  Bin 0 -> 80141 bytes
 .../images/FuncExecOnRegionNoMetadata.png       |  Bin 0 -> 70177 bytes
 .../images/FuncExecOnRegionPeersWithFilter.png  |  Bin 0 -> 86576 bytes
 .../images/FuncExecOnRegionWithFilter.png       |  Bin 0 -> 60773 bytes
 .../images/FuncExecOnRegionWithMetadata.png     |  Bin 0 -> 59576 bytes
 geode-docs/images/FuncExecOnServers.png         |  Bin 0 -> 57470 bytes
 geode-docs/images/Gemcached.png                 |  Bin 0 -> 87366 bytes
 geode-docs/images/JConsole.png                  |  Bin 0 -> 64272 bytes
 geode-docs/images/MultiSite-4.gif               |  Bin 0 -> 4991 bytes
 .../images/MultisiteConcurrency_WAN_Gateway.png |  Bin 0 -> 103701 bytes
 geode-docs/images/SQLite_Persistence_Mgr.png    |  Bin 0 -> 58388 bytes
 geode-docs/images/Transaction-simple.png        |  Bin 0 -> 119831 bytes
 geode-docs/images/consistent_multisite.png      |  Bin 0 -> 39442 bytes
 geode-docs/images/diskStores-1.gif              |  Bin 0 -> 7292 bytes
 geode-docs/images/diskStores-3.gif              |  Bin 0 -> 5898 bytes
 geode-docs/images/jconsole_mbeans.png           |  Bin 0 -> 211394 bytes
 geode-docs/images/jvisualvm.png                 |  Bin 0 -> 90153 bytes
 geode-docs/images/logging-1.gif                 |  Bin 0 -> 2778 bytes
 geode-docs/images/member_view_list.png          |  Bin 0 -> 107811 bytes
 .../images/multisite-topology-avoid-3.png       |  Bin 0 -> 9794 bytes
 .../images/multisite-topology-hybrid-1.png      |  Bin 0 -> 7627 bytes
 .../images/multisite-topology-hybrid-2.png      |  Bin 0 -> 7631 bytes
 .../images/multisite-topology-parallel.png      |  Bin 0 -> 7838 bytes
 geode-docs/images/multisite-topology-serial.png |  Bin 0 -> 6886 bytes
 geode-docs/images/parallel_sender.png           |  Bin 0 -> 36089 bytes
 geode-docs/images/pulse-data-browser.png        |  Bin 0 -> 99987 bytes
 geode-docs/images/pulse-region-detail.png       |  Bin 0 -> 61149 bytes
 geode-docs/images/pulse_alerts_widget.png       |  Bin 0 -> 137883 bytes
 geode-docs/images/pulse_cluster_view.png        |  Bin 0 -> 154531 bytes
 geode-docs/images/pulse_data_view.png           |  Bin 0 -> 138140 bytes
 geode-docs/images/pulse_locator.png             |  Bin 0 -> 228513 bytes
 geode-docs/images/pulse_member_view.png         |  Bin 0 -> 132309 bytes
 .../images/rest_example_java_packages.png       |  Bin 0 -> 30206 bytes
 geode-docs/images/security-1.gif                |  Bin 0 -> 8343 bytes
 geode-docs/images/security-3.gif                |  Bin 0 -> 8287 bytes
 geode-docs/images/security-4.gif                |  Bin 0 -> 7028 bytes
 geode-docs/images/security-5.gif                |  Bin 0 -> 7408 bytes
 geode-docs/images/serial_sender.png             |  Bin 0 -> 32385 bytes
 geode-docs/images/statistics-1.gif              |  Bin 0 -> 8644 bytes
 geode-docs/images/swagger_home.png              |  Bin 0 -> 187378 bytes
 geode-docs/images/swagger_post_region.png       |  Bin 0 -> 170963 bytes
 .../images/swagger_post_region_response.png     |  Bin 0 -> 176783 bytes
 geode-docs/images/swagger_v1.png                |  Bin 0 -> 149806 bytes
 geode-docs/images/swagger_v1_response.png       |  Bin 0 -> 143134 bytes
 geode-docs/images/transactions-client-1.png     |  Bin 0 -> 113816 bytes
 geode-docs/images/transactions_jca_adapter.png  |  Bin 0 -> 104775 bytes
 geode-docs/images/transactions_jta.png          |  Bin 0 -> 104780 bytes
 .../images/transactions_jta_app_server.png      |  Bin 0 -> 91885 bytes
 geode-docs/images_svg/JMX_Architecture.svg      |    3 +
 geode-docs/images_svg/MBeans.svg                |    3 +
 .../async_system_queue_conflation.svg           |    3 +
 geode-docs/images_svg/cache_data_loader.svg     |    3 +
 geode-docs/images_svg/cache_data_loader_2.svg   |    3 +
 .../images_svg/client_server_deployment.svg     |    3 +
 .../images_svg/client_server_event_dist.svg     |    3 +
 .../client_server_message_tracking.svg          |    3 +
 geode-docs/images_svg/cluster-group-config.svg  |    3 +
 .../images_svg/cluster_config_overview.svg      |    3 +
 .../colocated_partitioned_regions.svg           |    3 +
 geode-docs/images_svg/cs_connection_pool.svg    |    3 +
 geode-docs/images_svg/cs_locator_discovery.svg  |    3 +
 geode-docs/images_svg/cs_subscriptions.svg      |    3 +
 geode-docs/images_svg/cs_topology.svg           |    3 +
 geode-docs/images_svg/custom_partitioned.svg    |    3 +
 geode-docs/images_svg/developing_overflow.svg   |    3 +
 .../images_svg/developing_persistence.svg       |    3 +
 .../developing_persistence_and_overflow.svg     |    3 +
 geode-docs/images_svg/distributed_how_1.svg     |    3 +
 geode-docs/images_svg/distributed_how_2.svg     |    3 +
 geode-docs/images_svg/distributed_how_3.svg     |    3 +
 geode-docs/images_svg/distributed_preload.svg   |    3 +
 geode-docs/images_svg/distributed_replica.svg   |    3 +
 .../images_svg/distributed_replica_preload.svg  |    3 +
 geode-docs/images_svg/expiration.svg            |    3 +
 .../images_svg/how_partitioning_works_1.svg     |    3 +
 .../images_svg/how_partitioning_works_2.svg     |    3 +
 .../images_svg/http_module_cs_with_locator.svg  |    3 +
 .../images_svg/http_module_p2p_with_locator.svg |    3 +
 geode-docs/images_svg/locator_discovery.svg     |    3 +
 geode-docs/images_svg/member_severe_alert.svg   |    3 +
 .../images_svg/network_partition_scenario.svg   |    3 +
 geode-docs/images_svg/p2p_topology.svg          |    3 +
 geode-docs/images_svg/partitioned_data_HA.svg   |    3 +
 .../images_svg/partitioned_data_buckets_1.svg   |    3 +
 .../images_svg/partitioned_data_buckets_2.svg   |    3 +
 .../images_svg/region_entry_versions_1.svg      |    3 +
 .../images_svg/region_entry_versions_2.svg      |    3 +
 .../images_svg/region_entry_versions_3.svg      |    3 +
 .../images_svg/server_client_event_dist.svg     |    3 +
 geode-docs/images_svg/server_discovery.svg      |    3 +
 geode-docs/images_svg/server_grouping.svg       |    3 +
 .../images_svg/transactions_partitioned_1.svg   |    3 +
 .../images_svg/transactions_partitioned_2.svg   |    3 +
 .../images_svg/transactions_replicate_1.svg     |    3 +
 .../images_svg/transactions_replicate_2.svg     |    3 +
 .../images_svg/transactions_replicate_3.svg     |    3 +
 .../images_svg/transactions_replicate_4.svg     |    3 +
 .../transactions_replicate_local_1.svg          |    3 +
 .../transactions_replicate_no_ack_1.svg         |    3 +
 .../transactions_replicate_no_ack_2.svg         |    3 +
 .../images_svg/tune_cs_event_messaging.svg      |    3 +
 .../unbalanced_network_capacity_probs.svg       |    3 +
 .../autoreconnect/member-reconnect.html.md.erb  |   59 +
 geode-docs/managing/book_intro.html.md.erb      |   69 +
 .../chapter_overview.html.md.erb                |   51 +
 .../exporting_a_snapshot.html.md.erb            |   74 +
 .../filtering_snapshot_entries.html.md.erb      |   46 +
 .../importing_a_snapshot.html.md.erb            |   81 +
 .../read_snapshots_programmatically.html.md.erb |   43 +
 ...using_cache_and_region_snapshots.html.md.erb |   40 +
 .../backup_restore_disk_store.html.md.erb       |  189 +
 .../disk_storage/chapter_overview.html.md.erb   |   56 +
 .../compacting_disk_stores.html.md.erb          |  133 +
 .../disk_free_space_monitoring.html.md.erb      |   57 +
 .../disk_store_configuration_params.html.md.erb |  123 +
 .../file_names_and_extensions.html.md.erb       |   96 +
 .../handling_missing_disk_stores.html.md.erb    |   72 +
 .../how_disk_stores_work.html.md.erb            |   60 +
 ...eping_offline_disk_store_in_sync.html.md.erb |   65 +
 .../managing_disk_buffer_flushes.html.md.erb    |   44 +
 .../managing_disk_stores.html.md.erb            |   42 +
 .../managing_disk_stores_cmds.html.md.erb       |   62 +
 .../disk_storage/operation_logs.html.md.erb     |   69 +
 ...ize_availability_and_performance.html.md.erb |   32 +
 .../overview_using_disk_stores.html.md.erb      |   36 +
 ...starting_system_with_disk_stores.html.md.erb |  128 +
 .../disk_storage/using_disk_stores.html.md.erb  |  216 ++
 .../using_the_default_disk_store.html.md.erb    |   70 +
 .../validating_disk_store.html.md.erb           |   37 +
 .../heap_use/heap_management.html.md.erb        |  225 ++
 .../managing/heap_use/lock_memory.html.md.erb   |   52 +
 .../heap_use/off_heap_management.html.md.erb    |  209 ++
 .../logging/configuring_log4j2.html.md.erb      |   68 +
 .../logging/how_logging_works.html.md.erb       |   39 +
 .../logging/log_collection_utility.html.md.erb  |   71 +
 geode-docs/managing/logging/logging.html.md.erb |   48 +
 .../logging/logging_categories.html.md.erb      |  247 ++
 .../logging/logging_whats_next.html.md.erb      |   56 +
 .../logging/setting_up_logging.html.md.erb      |   76 +
 .../configuring_rmi_connector.html.md.erb       |   34 +
 .../gfsh_and_management_api.html.md.erb         |   69 +
 .../management/jmx_manager_node.html.md.erb     |   37 +
 .../jmx_manager_operations.html.md.erb          |  212 ++
 .../list_of_mbean_notifications.html.md.erb     |   82 +
 .../management/list_of_mbeans.html.md.erb       |   38 +
 .../management/list_of_mbeans_full.html.md.erb  |  227 ++
 .../management_and_monitoring.html.md.erb       |   52 +
 ...nagement_and_monitoring_features.html.md.erb |   41 +
 .../management_system_overview.html.md.erb      |  112 +
 .../management/mbean_architecture.html.md.erb   |   76 +
 .../management/mbean_notifications.html.md.erb  |   34 +
 .../management/mbeans_jconsole.html.md.erb      |   53 +
 .../managing/management/mm_overview.html.md.erb |   94 +
 ...tification_federation_and_alerts.html.md.erb |   54 +
 .../management/programming_example.html.md.erb  |  237 ++
 .../monitor_tune/cache_consistency.html.md.erb  |   80 +
 .../monitor_tune/chapter_overview.html.md.erb   |   60 +
 .../gemfire_performance_on_vsphere.html.md.erb  |   64 +
 ...erformance_on_vsphere_guidelines.html.md.erb |  136 +
 .../multicast_communication.html.md.erb         |   46 +
 ...ication_configuring_speed_limits.html.md.erb |   51 +
 ...unication_provisioning_bandwidth.html.md.erb |   60 +
 ...unication_runtime_considerations.html.md.erb |   47 +
 ...n_testing_multicast_speed_limits.html.md.erb |  145 +
 ...st_communication_troubleshooting.html.md.erb |   38 +
 .../performance_controls.html.md.erb            |   46 +
 ..._controls_controlling_socket_use.html.md.erb |   51 +
 ...ance_controls_data_serialization.html.md.erb |   26 +
 ...e_controls_increasing_cache_hits.html.md.erb |   28 +
 ...controls_managing_slow_receivers.html.md.erb |   73 +
 ..._controls_setting_cache_timeouts.html.md.erb |   41 +
 .../monitor_tune/slow_messages.html.md.erb      |   38 +
 .../monitor_tune/slow_receivers.html.md.erb     |   34 +
 .../slow_receivers_managing.html.md.erb         |  116 +
 ...ow_receivers_preventing_problems.html.md.erb |   45 +
 .../socket_communication.html.md.erb            |   48 +
 ...cation_ephemeral_tcp_port_limits.html.md.erb |   58 +
 ...ommunication_have_enough_sockets.html.md.erb |  185 +
 ...tion_setting_socket_buffer_sizes.html.md.erb |  144 +
 ...ion_tcpip_p2p_handshake_timeouts.html.md.erb |   38 +
 .../socket_tcp_keepalive.html.md.erb            |   31 +
 .../sockets_and_gateways.html.md.erb            |  122 +
 .../system_member_performance.html.md.erb       |   42 +
 ...mance_connection_thread_settings.html.md.erb |   32 +
 ...rmance_distributed_system_member.html.md.erb |   28 +
 ...ystem_member_performance_garbage.html.md.erb |   53 +
 ...ber_performance_jvm_mem_settings.html.md.erb |   78 +
 .../monitor_tune/udp_communication.html.md.erb  |   50 +
 .../chapter_overview.html.md.erb                |   48 +
 .../failure_detection.html.md.erb               |   62 +
 .../handling_network_partitioning.html.md.erb   |   63 +
 ...rk_partitioning_management_works.html.md.erb |   59 +
 ...ators_lead_members_and_weighting.html.md.erb |   79 +
 .../network_partitioning_scenarios.html.md.erb  |   53 +
 .../preventing_network_partitions.html.md.erb   |   28 +
 .../region_compression.html.md.erb              |  226 ++
 .../authentication_examples.html.md.erb         |   70 +
 .../authentication_overview.html.md.erb         |   43 +
 .../security/authorization_example.html.md.erb  |   70 +
 .../security/authorization_overview.html.md.erb |   34 +
 .../security/chapter_overview.html.md.erb       |   47 +
 .../security/enable_security.html.md.erb        |   73 +
 .../security/encrypting_passwords.html.md.erb   |   49 +
 .../encrypting_with_diffie_helman.html.md.erb   |   66 +
 .../implementing_authentication.html.md.erb     |  142 +
 .../implementing_authorization.html.md.erb      |  265 ++
 .../security/implementing_security.html.md.erb  |   80 +
 .../security/implementing_ssl.html.md.erb       |  226 ++
 .../security/post_processing.html.md.erb        |   67 +
 .../security/properties_file.html.md.erb        |   34 +
 .../security/security-audit.html.md.erb         |   64 +
 .../security_audit_overview.html.md.erb         |   39 +
 .../security/security_intro.html.md.erb         |   38 +
 .../managing/security/ssl_example.html.md.erb   |  105 +
 .../managing/security/ssl_overview.html.md.erb  |   45 +
 .../application_defined_statistics.html.md.erb  |   39 +
 .../statistics/chapter_overview.html.md.erb     |   42 +
 .../statistics/how_statistics_work.html.md.erb  |   34 +
 .../setting_up_statistics.html.md.erb           |  151 +
 ...ient_region_and_entry_statistics.html.md.erb |   42 +
 .../statistics/viewing_statistics.html.md.erb   |   24 +
 .../chapter_overview.html.md.erb                |   60 +
 .../diagnosing_system_probs.html.md.erb         |  437 +++
 ...ent_and_recover_disk_full_errors.html.md.erb |   45 +
 ...ducing_troubleshooting_artifacts.html.md.erb |   92 +
 ...ring_conflicting_data_exceptions.html.md.erb |   75 +
 .../recovering_from_app_crashes.html.md.erb     |   32 +
 .../recovering_from_cs_crashes.html.md.erb      |   54 +
 .../recovering_from_machine_crashes.html.md.erb |   62 +
 .../recovering_from_network_outages.html.md.erb |   73 +
 .../recovering_from_p2p_crashes.html.md.erb     |  231 ++
 .../system_failure_and_recovery.html.md.erb     |  283 ++
 geode-docs/prereq_and_install.html.md.erb       |   40 +
 geode-docs/reference/book_intro.html.md.erb     |   48 +
 .../statistics/statistics_list.html.md.erb      | 1293 +++++++
 .../topics/cache-elements-list.html.md.erb      |  185 +
 .../reference/topics/cache_xml.html.md.erb      | 3107 ++++++++++++++++
 .../chapter_overview_cache_xml.html.md.erb      |   47 +
 ...chapter_overview_regionshortcuts.html.md.erb |  107 +
 .../client-cache-elements-list.html.md.erb      |  137 +
 .../reference/topics/client-cache.html.md.erb   | 2683 ++++++++++++++
 .../reference/topics/elements_ref.html.md.erb   |  117 +
 .../topics/gemfire_properties.html.md.erb       |  640 ++++
 .../reference/topics/gfe_cache_xml.html.md.erb  | 3414 ++++++++++++++++++
 .../reference/topics/glossary.html.md.erb       |  618 ++++
 ...handling_exceptions_and_failures.html.md.erb |   32 +
 ...mory_requirements_for_cache_data.html.md.erb |  301 ++
 ...on-ascii_strings_in_config_files.html.md.erb |   43 +
 .../region_shortcuts_reference.html.md.erb      | 1499 ++++++++
 .../topics/region_shortcuts_table.html.md.erb   |  519 +++
 geode-docs/rest_apps/book_intro.html.md.erb     |   59 +
 .../rest_apps/chapter_overview.html.md.erb      |   32 +
 .../rest_apps/delete_all_data.html.md.erb       |   56 +
 .../rest_apps/delete_data_for_key.html.md.erb   |   56 +
 .../delete_data_for_multiple_keys.html.md.erb   |   56 +
 .../rest_apps/delete_named_query.html.md.erb    |   60 +
 .../rest_apps/develop_rest_apps.html.md.erb     |  683 ++++
 .../get_execute_adhoc_query.html.md.erb         |  120 +
 geode-docs/rest_apps/get_functions.html.md.erb  |   67 +
 geode-docs/rest_apps/get_queries.html.md.erb    |   72 +
 .../rest_apps/get_region_data.html.md.erb       |  132 +
 ...et_region_data_for_multiple_keys.html.md.erb |  238 ++
 .../rest_apps/get_region_key_data.html.md.erb   |   87 +
 .../rest_apps/get_region_keys.html.md.erb       |   67 +
 geode-docs/rest_apps/get_regions.html.md.erb    |   95 +
 geode-docs/rest_apps/get_servers.html.md.erb    |   64 +
 .../rest_apps/head_region_size.html.md.erb      |   62 +
 geode-docs/rest_apps/ping_service.html.md.erb   |   54 +
 .../rest_apps/post_create_query.html.md.erb     |  106 +
 .../post_execute_functions.html.md.erb          |  142 +
 .../rest_apps/post_execute_query.html.md.erb    |  172 +
 .../rest_apps/post_if_absent_data.html.md.erb   |  144 +
 .../put_multiple_values_for_keys.html.md.erb    |  103 +
 .../rest_apps/put_replace_data.html.md.erb      |   83 +
 .../rest_apps/put_update_cas_data.html.md.erb   |  215 ++
 .../rest_apps/put_update_data.html.md.erb       |   82 +
 .../rest_apps/put_update_query.html.md.erb      |   85 +
 geode-docs/rest_apps/rest_admin.html.md.erb     |   32 +
 .../rest_apps/rest_api_reference.html.md.erb    |   43 +
 geode-docs/rest_apps/rest_examples.html.md.erb  |  708 ++++
 geode-docs/rest_apps/rest_functions.html.md.erb |   32 +
 geode-docs/rest_apps/rest_prereqs.html.md.erb   |   37 +
 geode-docs/rest_apps/rest_queries.html.md.erb   |   48 +
 geode-docs/rest_apps/rest_regions.html.md.erb   |   82 +
 geode-docs/rest_apps/setup_config.html.md.erb   |  179 +
 .../rest_apps/troubleshooting.html.md.erb       |  169 +
 geode-docs/rest_apps/using_swagger.html.md.erb  |   71 +
 geode-docs/tools_modules/book_intro.html.md.erb |   47 +
 .../gemcached/about_gemcached.html.md.erb       |   46 +
 .../gemcached/advantages.html.md.erb            |   36 +
 .../gemcached/chapter_overview.html.md.erb      |   40 +
 .../gemcached/deploying_gemcached.html.md.erb   |   97 +
 .../tools_modules/gfsh/about_gfsh.html.md.erb   |   42 +
 .../gfsh/cache_xml_2_gfsh.html.md.erb           |  105 +
 .../gfsh/chapter_overview.html.md.erb           |   66 +
 .../gfsh/command-pages/alter.html.md.erb        |  520 +++
 .../gfsh/command-pages/backup.html.md.erb       |   85 +
 .../gfsh/command-pages/change.html.md.erb       |   98 +
 .../gfsh/command-pages/clear.html.md.erb        |   50 +
 .../gfsh/command-pages/close.html.md.erb        |  141 +
 .../gfsh/command-pages/compact.html.md.erb      |  114 +
 .../gfsh/command-pages/configure.html.md.erb    |   85 +
 .../gfsh/command-pages/connect.html.md.erb      |  172 +
 .../gfsh/command-pages/create.html.md.erb       |  954 +++++
 .../gfsh/command-pages/debug.html.md.erb        |   54 +
 .../gfsh/command-pages/define.html.md.erb       |   68 +
 .../gfsh/command-pages/deploy.html.md.erb       |   78 +
 .../gfsh/command-pages/describe.html.md.erb     |  409 +++
 .../gfsh/command-pages/destroy.html.md.erb      |  178 +
 .../gfsh/command-pages/disconnect.html.md.erb   |   56 +
 .../gfsh/command-pages/echo.html.md.erb         |   66 +
 .../gfsh/command-pages/encrypt.html.md.erb      |   57 +
 .../gfsh/command-pages/execute.html.md.erb      |   59 +
 .../gfsh/command-pages/exit.html.md.erb         |   40 +
 .../gfsh/command-pages/export.html.md.erb       |  271 ++
 .../gfsh/command-pages/gc.html.md.erb           |   58 +
 .../gfsh/command-pages/get.html.md.erb          |   67 +
 .../gfsh/command-pages/help.html.md.erb         |   77 +
 .../gfsh/command-pages/hint.html.md.erb         |   78 +
 .../gfsh/command-pages/history.html.md.erb      |   59 +
 .../gfsh/command-pages/import.html.md.erb       |  100 +
 .../gfsh/command-pages/list.html.md.erb         |  474 +++
 .../gfsh/command-pages/load-balance.html.md.erb |   64 +
 .../gfsh/command-pages/locate.html.md.erb       |   72 +
 .../gfsh/command-pages/netstat.html.md.erb      |  139 +
 .../gfsh/command-pages/pause.html.md.erb        |   51 +
 .../gfsh/command-pages/pdx.html.md.erb          |   90 +
 .../gfsh/command-pages/put.html.md.erb          |   78 +
 .../gfsh/command-pages/query.html.md.erb        |   69 +
 .../gfsh/command-pages/rebalance.html.md.erb    |   73 +
 .../gfsh/command-pages/remove.html.md.erb       |   63 +
 .../gfsh/command-pages/resume.html.md.erb       |   51 +
 .../gfsh/command-pages/revoke.html.md.erb       |   65 +
 .../gfsh/command-pages/run.html.md.erb          |  105 +
 .../gfsh/command-pages/set.html.md.erb          |   70 +
 .../gfsh/command-pages/sh.html.md.erb           |   63 +
 .../gfsh/command-pages/show.html.md.erb         |  302 ++
 .../gfsh/command-pages/shutdown.html.md.erb     |   62 +
 .../gfsh/command-pages/sleep.html.md.erb        |   57 +
 .../gfsh/command-pages/start.html.md.erb        |  776 ++++
 .../gfsh/command-pages/status.html.md.erb       |  298 ++
 .../gfsh/command-pages/stop.html.md.erb         |  227 ++
 .../gfsh/command-pages/undeploy.html.md.erb     |   77 +
 .../gfsh/command-pages/validate.html.md.erb     |   48 +
 .../gfsh/command-pages/version.html.md.erb      |   60 +
 .../gfsh/command_scripting.html.md.erb          |   37 +
 .../gfsh/configuring_gfsh.html.md.erb           |  129 +
 .../gfsh/getting_started_gfsh.html.md.erb       |  156 +
 .../gfsh/gfsh_command_index.html.md.erb         |  224 ++
 .../gfsh/gfsh_quick_reference.html.md.erb       |   58 +
 .../gfsh/os_command_line_execution.html.md.erb  |   50 +
 .../gfsh/quick_ref_commands_by_area.html.md.erb |  318 ++
 .../gfsh/starting_gfsh.html.md.erb              |   75 +
 .../tools_modules/gfsh/tour_of_gfsh.html.md.erb |  457 +++
 .../useful_gfsh_shell_variables.html.md.erb     |   72 +
 .../chapter_overview.html.md.erb                |   60 +
 .../common_gemfire_topologies.html.md.erb       |   36 +
 .../http_why_use_gemfire.html.md.erb            |   56 +
 .../interactive_mode_ref.html.md.erb            |  142 +
 .../http_session_mgmt/quick_start.html.md.erb   |  118 +
 .../session_mgmt_tcserver.html.md.erb           |   38 +
 .../session_mgmt_tomcat.html.md.erb             |   38 +
 .../session_mgmt_weblogic.html.md.erb           |   34 +
 .../session_state_log_files.html.md.erb         |  111 +
 .../tc_additional_info.html.md.erb              |   52 +
 .../tc_changing_gf_default_cfg.html.md.erb      |   98 +
 .../tc_installing_the_module.html.md.erb        |   38 +
 .../tc_setting_up_the_module.html.md.erb        |  139 +
 .../tomcat_changing_gf_default_cfg.html.md.erb  |  170 +
 .../tomcat_installing_the_module.html.md.erb    |   38 +
 .../tomcat_setting_up_the_module.html.md.erb    |  120 +
 ...weblogic_changing_gf_default_cfg.html.md.erb |  179 +
 ...gic_common_configuration_changes.html.md.erb |   52 +
 .../weblogic_setting_up_the_module.html.md.erb  |  213 ++
 .../pulse/chapter_overview.html.md.erb          |   49 +
 .../tools_modules/pulse/quickstart.html.md.erb  |  827 +++++
 .../pulse/system_requirements.html.md.erb       |   35 +
 .../tools_modules/redis_adapter.html.md.erb     |   90 +
 .../topologies_and_comm/book_intro.html.md.erb  |   42 +
 .../chapter_overview.html.md.erb                |   52 +
 ...nt_server_example_configurations.html.md.erb |  164 +
 .../client_server_whats_next.html.md.erb        |   56 +
 ...gure_servers_into_logical_groups.html.md.erb |   54 +
 ...etting_up_a_client_server_system.html.md.erb |   87 +
 ...tandard_client_server_deployment.html.md.erb |   35 +
 .../chapter_overview.html.md.erb                |   44 +
 .../multisite_topologies.html.md.erb            |   67 +
 .../setting_up_a_multisite_system.html.md.erb   |  381 ++
 .../chapter_overview.html.md.erb                |   36 +
 .../configuring_peer_member_groups.html.md.erb  |   60 +
 .../setting_up_a_p2p_system.html.md.erb         |   42 +
 .../setting_up_peer_communication.html.md.erb   |   64 +
 .../topology_concepts/IPv4_and_IPv6.html.md.erb |   49 +
 .../chapter_overview.html.md.erb                |   48 +
 .../how_communication_works.html.md.erb         |   62 +
 .../how_member_discovery_works.html.md.erb      |   60 +
 .../how_multisite_systems_work.html.md.erb      |   44 +
 .../how_server_discovery_works.html.md.erb      |   77 +
 ...how_the_pool_manages_connections.html.md.erb |   78 +
 .../member_communication.html.md.erb            |   46 +
 .../multisite_overview.html.md.erb              |  122 +
 .../topology_types.html.md.erb                  |   48 +
 .../using_bind_addresses.html.md.erb            |  112 +
 675 files changed, 68806 insertions(+)
----------------------------------------------------------------------




[14/50] [abbrv] incubator-geode git commit: GEODE-1993: refactor tests to use rules rather than abstract classes

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshShellConnectionRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshShellConnectionRule.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshShellConnectionRule.java
index 4d1bae9..da7a883 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshShellConnectionRule.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshShellConnectionRule.java
@@ -25,10 +25,11 @@ import org.apache.geode.management.internal.cli.result.CommandResult;
 import org.apache.geode.management.internal.cli.result.ErrorResultData;
 import org.apache.geode.management.internal.cli.result.ResultBuilder;
 import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
 import org.apache.geode.test.junit.rules.DescribedExternalResource;
 
 /**
- * Class which eases the creation of MBeans for security testing. When combined with {@link JMXConnectionConfiguration}
+ * Class which eases the creation of MBeans for security testing. When combined with {@link ConnectionConfiguration}
  * it allows for the creation of per-test connections with different user/password combinations.
  */
 public class GfshShellConnectionRule extends DescribedExternalResource {
@@ -53,7 +54,7 @@ public class GfshShellConnectionRule extends DescribedExternalResource {
   }
 
   protected void before(Description description) throws Throwable {
-    JMXConnectionConfiguration config = description.getAnnotation(JMXConnectionConfiguration.class);
+    ConnectionConfiguration config = description.getAnnotation(ConnectionConfiguration.class);
     if(config==null)
       return;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/JMXConnectionConfiguration.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/JMXConnectionConfiguration.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/JMXConnectionConfiguration.java
deleted file mode 100644
index 4f57baa..0000000
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/JMXConnectionConfiguration.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.geode.management.internal.security;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * This annotation is intended to be used with {@link MBeanServerConnectionRule} in order to configure a per-test JMX
- * connection with a specific user and password.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target({ElementType.METHOD})
-public @interface JMXConnectionConfiguration {
-  String user();
-  String password();
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/JavaRmiServerNameTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/JavaRmiServerNameTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/JavaRmiServerNameTest.java
index c544e6f..e885344 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/JavaRmiServerNameTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/JavaRmiServerNameTest.java
@@ -23,11 +23,13 @@ import static org.junit.Assert.*;
 
 import java.util.Properties;
 
+import org.junit.After;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.internal.AvailablePort;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
@@ -36,20 +38,26 @@ public class JavaRmiServerNameTest {
   private static final String JMX_HOST = "myHostname";
 
   private static int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+  static Properties properties = new Properties(){{
+    setProperty(JMX_MANAGER_PORT, AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET)+"");
+    setProperty("jmx-manager-hostname-for-clients", JMX_HOST);
+  }};
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    ServerStarter serverStarter = new ServerStarter(properties);
+    serverStarter.startServer();
+  }
 
   //https://issues.apache.org/jira/browse/GEODE-1548
   @Test
   public void testThatJavaRmiServerNameGetsSet() {
-    Properties properties = new Properties();
-    properties.put(LOCATORS, "");
-    properties.put(MCAST_PORT, "0");
-    properties.put(JMX_MANAGER, "true");
-    properties.put(JMX_MANAGER_START, "true");
-    properties.put(JMX_MANAGER_PORT, String.valueOf(jmxManagerPort));
-    properties.put("jmx-manager-hostname-for-clients", JMX_HOST);
-
-    new CacheFactory(properties).create();
     assertEquals(JMX_HOST, System.getProperty("java.rmi.server.hostname"));
   }
 
+  @After
+  public void after(){
+    System.setProperty("java.rmi.server.hostname", "");
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/JsonAuthorizationCacheStartRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/JsonAuthorizationCacheStartRule.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/JsonAuthorizationCacheStartRule.java
deleted file mode 100644
index 136319c..0000000
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/JsonAuthorizationCacheStartRule.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.geode.management.internal.security;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
-import java.util.Properties;
-
-import org.apache.geode.security.templates.SampleSecurityManager;
-import org.junit.rules.ExternalResource;
-
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheFactory;
-
-public class JsonAuthorizationCacheStartRule extends ExternalResource {
-
-  private Cache cache;
-  private int jmxManagerPort = 0;
-  private int httpPort = 0;
-  private String jsonFile;
-  private Class postProcessor;
-
-  public JsonAuthorizationCacheStartRule(int jmxManagerPort, String jsonFile, Class postProcessor) {
-    this.jmxManagerPort = jmxManagerPort;
-    this.jsonFile = jsonFile;
-    this.postProcessor = postProcessor;
-  }
-
-  public JsonAuthorizationCacheStartRule(int jmxManagerPort, String jsonFile) {
-    this.jmxManagerPort = jmxManagerPort;
-    this.jsonFile = jsonFile;
-  }
-
-  public JsonAuthorizationCacheStartRule(int jmxManagerPort, int httpPort, String jsonFile) {
-    this.jmxManagerPort = jmxManagerPort;
-    this.httpPort = httpPort;
-    this.jsonFile = jsonFile;
-  }
-
-  protected void before() throws Throwable {
-    Properties properties = new Properties();
-    properties.put(SampleSecurityManager.SECURITY_JSON, jsonFile);
-    properties.put(NAME, JsonAuthorizationCacheStartRule.class.getSimpleName());
-    properties.put(LOCATORS, "");
-    properties.put(MCAST_PORT, "0");
-    properties.put(JMX_MANAGER, "true");
-    properties.put(JMX_MANAGER_START, "true");
-    properties.put(JMX_MANAGER_PORT, String.valueOf(jmxManagerPort));
-    properties.put(HTTP_SERVICE_PORT, String.valueOf(httpPort));
-    properties.put(SECURITY_MANAGER, SampleSecurityManager.class.getName());
-
-    if (postProcessor!=null) {
-      properties.put(SECURITY_POST_PROCESSOR, postProcessor.getName());
-    }
-
-    cache = new CacheFactory(properties).create();
-    cache.addCacheServer().start();
-    cache.createRegionFactory().create("region1");
-  }
-
-  public Cache getCache() {
-    return cache;
-  }
-
-  /**
-   * Override to tear down your specific external resource.
-   */
-  protected void after() {
-    cache.close();
-    cache = null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
index 1377fb6..2862369 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
@@ -31,6 +31,8 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.locks.DLockService;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.LockServiceMXBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -42,8 +44,7 @@ public class LockServiceMBeanAuthorizationJUnitTest {
   private LockServiceMXBean lockServiceMBean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -65,7 +66,7 @@ public class LockServiceMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testAllAccess() throws Exception {
     lockServiceMBean.becomeLockGrantor();
     lockServiceMBean.fetchGrantorMember();
@@ -75,14 +76,14 @@ public class LockServiceMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-admin", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-admin", password = "1234567")
   public void testSomeAccess() throws Exception {
     assertThatThrownBy(() -> lockServiceMBean.becomeLockGrantor());
     lockServiceMBean.getMemberCount();
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testNoAccess() throws Exception {
     assertThatThrownBy(() -> lockServiceMBean.becomeLockGrantor()).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> lockServiceMBean.fetchGrantorMember()).hasMessageContaining(TestCommand.clusterRead.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanSecurityJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanSecurityJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanSecurityJUnitTest.java
index 4beff0b..9614bf8 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanSecurityJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanSecurityJUnitTest.java
@@ -40,6 +40,8 @@ import org.apache.geode.management.ManagementException;
 import org.apache.geode.management.ManagementService;
 import org.apache.geode.management.MemberMXBean;
 import org.apache.geode.management.internal.MBeanJMXAdapter;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -49,7 +51,7 @@ public class MBeanSecurityJUnitTest {
   private static int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -58,7 +60,7 @@ public class MBeanSecurityJUnitTest {
    * No user can call createBean or unregisterBean of GemFire Domain
    */
   @Test
-  @JMXConnectionConfiguration(user = "super-user", password = "1234567")
+  @ConnectionConfiguration(user = "super-user", password = "1234567")
   public void testNoAccessWithWhoever() throws Exception{
     MBeanServerConnection con = connectionRule.getMBeanServerConnection();
     assertThatThrownBy(
@@ -79,7 +81,7 @@ public class MBeanSecurityJUnitTest {
    * looks like everyone can query for beans, but the AccessControlMXBean is filtered from the result
    */
   @Test
-  @JMXConnectionConfiguration(user = "stranger", password = "1234567")
+  @ConnectionConfiguration(user = "stranger", password = "1234567")
   public void testQueryBean() throws MalformedObjectNameException, IOException {
     MBeanServerConnection con = connectionRule.getMBeanServerConnection();
     Set<ObjectInstance> objects = con.queryMBeans(ObjectName.getInstance(ResourceConstants.OBJECT_NAME_ACCESSCONTROL), null);
@@ -106,7 +108,7 @@ public class MBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "stranger", password = "1234567")
+  @ConnectionConfiguration(user = "stranger", password = "1234567")
   public void testServerSideCalls(){
     // calls through ManagementService is not going through authorization checks
     ManagementService service = ManagementService.getManagementService(serverRule.getCache());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanServerConnectionRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanServerConnectionRule.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanServerConnectionRule.java
deleted file mode 100644
index 9243032..0000000
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/MBeanServerConnectionRule.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.geode.management.internal.security;
-
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import javax.management.JMX;
-import javax.management.MBeanServerConnection;
-import javax.management.MalformedObjectNameException;
-import javax.management.ObjectInstance;
-import javax.management.ObjectName;
-import javax.management.Query;
-import javax.management.QueryExp;
-import javax.management.remote.JMXConnector;
-import javax.management.remote.JMXConnectorFactory;
-import javax.management.remote.JMXServiceURL;
-
-import org.junit.runner.Description;
-
-import org.apache.geode.test.junit.rules.DescribedExternalResource;
-
-/**
- * Class which eases the creation of MBeans for security testing. When combined with {@link JMXConnectionConfiguration}
- * it allows for the creation of per-test connections with different user/password combinations.
- */
-public class MBeanServerConnectionRule extends DescribedExternalResource {
-
-  private final int jmxServerPort;
-  private JMXConnector jmxConnector;
-  private MBeanServerConnection con;
-
-  /**
-   * Rule constructor
-   *
-   * @param port The JMX server port to connect to
-   */
-  public MBeanServerConnectionRule(int port) {
-    this.jmxServerPort = port;
-  }
-
-  /**
-   * Retrieve a new proxy MBean
-   *
-   * @return A new proxy MBean of the same type with which the class was constructed
-   */
-  public <T> T getProxyMBean(Class<T> proxyClass, String beanQueryName) throws MalformedObjectNameException, IOException {
-    ObjectName name = null;
-    QueryExp query = null;
-
-    if (proxyClass != null) {
-      query = Query.isInstanceOf(Query.value(proxyClass.getName()));
-    }
-
-    if (beanQueryName != null) {
-      name = ObjectName.getInstance(beanQueryName);
-    }
-
-    Set<ObjectInstance> beans = con.queryMBeans(name, query);
-    assertEquals("failed to find only one instance of type " + proxyClass.getName() + " with name " + beanQueryName, 1, beans.size());
-
-    return JMX.newMXBeanProxy(con, ((ObjectInstance) beans.toArray()[0]).getObjectName(), proxyClass);
-  }
-
-  public AccessControlMXBean getAccessControlMBean() throws Exception{
-    return JMX.newMXBeanProxy(con, new ObjectName("GemFire:service=AccessControl,type=Distributed"), AccessControlMXBean.class);
-  }
-
-  /**
-   * Retrieve a new proxy MBean
-   *
-   * @return A new proxy MBean of the same type with which the class was constructed
-   */
-  public <T> T getProxyMBean(Class<T> proxyClass) throws MalformedObjectNameException, IOException {
-    return getProxyMBean(proxyClass, null);
-  }
-
-  public <T> T getProxyMBean(String beanQueryName) throws MalformedObjectNameException, IOException {
-    return getProxyMBean(null, beanQueryName);
-  }
-
-  public MBeanServerConnection getMBeanServerConnection() throws IOException {
-    return con;
-  }
-
-  protected void before(Description description) throws Throwable {
-    JMXConnectionConfiguration config = description.getAnnotation(JMXConnectionConfiguration.class);
-    Map<String, String[]> env = new HashMap<>();
-    if (config != null) {
-      String user = config.user();
-      String password = config.password();
-      env.put(JMXConnector.CREDENTIALS, new String[] { user, password });
-
-      JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://:" + jmxServerPort + "/jmxrmi");
-      jmxConnector = JMXConnectorFactory.connect(url, env);
-      con = jmxConnector.getMBeanServerConnection();
-    }
-  }
-
-  /**
-   * Override to tear down your specific external resource.
-   */
-  protected void after(Description description) throws Throwable {
-    if (jmxConnector != null) {
-      jmxConnector.close();
-      jmxConnector = null;
-    }
-
-    con = null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
index 873b649..ed653f9 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
@@ -33,6 +33,8 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.ManagerMXBean;
 import org.apache.geode.management.internal.beans.ManagerMBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -44,8 +46,7 @@ public class ManagerMBeanAuthorizationJUnitTest {
   private ManagerMXBean managerMXBean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -64,7 +65,7 @@ public class ManagerMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-admin", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-admin", password = "1234567")
   public void testAllAccess() throws Exception {
     managerMXBean.setPulseURL("foo");
     managerMXBean.start();
@@ -73,7 +74,7 @@ public class ManagerMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testSomeAccess() throws Exception {
     assertThatThrownBy(() -> managerMXBean.start()).hasMessageContaining(TestCommand.clusterManage.toString());
     assertThatThrownBy(() -> managerMXBean.getPulseURL()).hasMessageContaining(TestCommand.clusterWrite.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/MemberMBeanSecurityJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/MemberMBeanSecurityJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/MemberMBeanSecurityJUnitTest.java
index e5cbd15..8d7dbe5 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/MemberMBeanSecurityJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/MemberMBeanSecurityJUnitTest.java
@@ -26,6 +26,8 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.MemberMXBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -37,8 +39,7 @@ public class MemberMBeanSecurityJUnitTest {
   private MemberMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -49,7 +50,7 @@ public class MemberMBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "super-user", password = "1234567")
+  @ConnectionConfiguration(user = "super-user", password = "1234567")
   public void testAllAccess() throws Exception {
     bean.shutDownMember();
     bean.compactAllDiskStores();
@@ -67,7 +68,7 @@ public class MemberMBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-admin", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-admin", password = "1234567")
   public void testClusterAdmin() throws Exception {
     assertThatThrownBy(() -> bean.compactAllDiskStores()).hasMessageContaining(TestCommand.dataManage.toString());
     bean.shutDownMember();
@@ -84,7 +85,7 @@ public class MemberMBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testDataAdmin() throws Exception {
     bean.compactAllDiskStores();
     assertThatThrownBy(() -> bean.shutDownMember()).hasMessageContaining(TestCommand.clusterManage.toString());
@@ -94,7 +95,7 @@ public class MemberMBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testDataUser() throws Exception {
     assertThatThrownBy(() -> bean.shutDownMember()).hasMessageContaining(TestCommand.clusterManage.toString());
     assertThatThrownBy(() -> bean.createManager()).hasMessageContaining(TestCommand.clusterManage.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/ResourcePermissionTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/ResourcePermissionTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/ResourcePermissionTest.java
index d6491ff..8378876 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/ResourcePermissionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/ResourcePermissionTest.java
@@ -18,19 +18,18 @@ package org.apache.geode.management.internal.security;
 
 import static org.junit.Assert.*;
 
-import org.apache.geode.security.ResourcePermission;
-import org.apache.geode.security.ResourcePermission.Operation;
-import org.apache.geode.security.ResourcePermission.Resource;
 import org.apache.shiro.authz.permission.WildcardPermission;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.geode.security.ResourcePermission;
+import org.apache.geode.security.ResourcePermission.Operation;
+import org.apache.geode.security.ResourcePermission.Resource;
 import org.apache.geode.test.junit.categories.SecurityTest;
 import org.apache.geode.test.junit.categories.UnitTest;
 
 @Category({ UnitTest.class, SecurityTest.class })
 public class ResourcePermissionTest {
-
   private ResourcePermission context;
 
   @Test
@@ -90,5 +89,23 @@ public class ResourcePermissionTest {
 
     context = new ResourcePermission("DATA", "MANAGE", "REGIONA");
     assertEquals("DATA:MANAGE:REGIONA", context.toString());
+
+    context = new ResourcePermission("data", "manage");
+    assertEquals("DATA:MANAGE", context.toString());
+  }
+
+  @Test
+  public void testImples(){
+    WildcardPermission role = new WildcardPermission("*:read");
+    role.implies(new ResourcePermission("data", "read"));
+    role.implies(new ResourcePermission("cluster", "read"));
+
+    role = new WildcardPermission("*:read:*");
+    role.implies(new ResourcePermission("data", "read", "testRegion"));
+    role.implies(new ResourcePermission("cluster", "read", "anotherRegion", "key1"));
+
+    role = new WildcardPermission("data:*:testRegion");
+    role.implies(new ResourcePermission("data", "read", "testRegion"));
+    role.implies(new ResourcePermission("data", "write", "testRegion"));
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/ShiroCacheStartRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/ShiroCacheStartRule.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/ShiroCacheStartRule.java
deleted file mode 100644
index 848c05c..0000000
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/ShiroCacheStartRule.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.geode.management.internal.security;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
-import java.util.Properties;
-
-import org.junit.rules.ExternalResource;
-
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheFactory;
-
-public class ShiroCacheStartRule extends ExternalResource {
-  private Cache cache;
-  private int jmxManagerPort;
-  private String shiroFile;
-
-  public ShiroCacheStartRule(int jmxManagerPort, String shiroFile) {
-    this.jmxManagerPort = jmxManagerPort;
-    this.shiroFile = shiroFile;
-  }
-
-  protected void before() throws Throwable {
-    Properties properties = new Properties();
-    properties.put(NAME, ShiroCacheStartRule.class.getSimpleName());
-    properties.put(LOCATORS, "");
-    properties.put(MCAST_PORT, "0");
-    properties.put(JMX_MANAGER, "true");
-    properties.put(JMX_MANAGER_START, "true");
-    properties.put(JMX_MANAGER_PORT, String.valueOf(jmxManagerPort));
-    properties.put(HTTP_SERVICE_PORT, "0");
-    properties.put(SECURITY_SHIRO_INIT, shiroFile);
-
-    cache = new CacheFactory(properties).create();
-    cache.addCacheServer().start();
-  }
-
-  public Cache getCache(){
-    return cache;
-  }
-
-  /**
-   * Override to tear down your specific external resource.
-   */
-  protected void after() {
-    cache.close();
-    cache = null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/AbstractSecureServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/AbstractSecureServerDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/AbstractSecureServerDUnitTest.java
index d2e4440..2cf804b 100644
--- a/geode-core/src/test/java/org/apache/geode/security/AbstractSecureServerDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/AbstractSecureServerDUnitTest.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.geode.security;
 
 import static org.apache.geode.distributed.ConfigurationProperties.*;
@@ -27,24 +28,20 @@ import java.util.Properties;
 import org.assertj.core.api.ThrowableAssert.ThrowingCallable;
 import org.junit.Before;
 
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.cache.client.ClientCache;
 import org.apache.geode.cache.client.ClientCacheFactory;
 import org.apache.geode.cache.client.ClientRegionShortcut;
-import org.apache.geode.cache.server.CacheServer;
-import org.apache.geode.distributed.ConfigurationProperties;
 import org.apache.geode.security.templates.SampleSecurityManager;
 import org.apache.geode.security.templates.UserPasswordAuthInit;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 
-public class AbstractSecureServerDUnitTest extends JUnit4CacheTestCase {
+public abstract class AbstractSecureServerDUnitTest extends JUnit4DistributedTestCase {
 
   protected static final String REGION_NAME = "AuthRegion";
 
@@ -52,87 +49,41 @@ public class AbstractSecureServerDUnitTest extends JUnit4CacheTestCase {
   protected VM client2 = null;
   protected VM client3 = null;
   protected int serverPort;
-
-  // child classes can customize these parameters
-  protected Class postProcessor = null;
   protected boolean pdxPersistent = false;
-  protected int jmxPort = 0;
-  protected int restPort = 0;
-  protected Map<String, Object> values;
-  protected volatile Properties dsProperties;
 
-  public AbstractSecureServerDUnitTest(){
-    values = new HashMap();
+  // overwrite this in child classes
+  public Properties getProperties(){
+    return new Properties() {{
+      setProperty(SECURITY_MANAGER, SampleSecurityManager.class.getName());
+      setProperty(SampleSecurityManager.SECURITY_JSON, "org/apache/geode/management/internal/security/clientServer.json");
+    }};
+  }
+
+  // overwrite this if you want a different set of initial data
+  public Map<String, String> getData(){
+    Map<String, String> data = new HashMap();
     for(int i=0; i<5; i++){
-      values.put("key"+i, "value"+i);
+      data.put("key"+i, "value"+i);
     }
+    return data;
   }
 
   @Before
   public void before() throws Exception {
-    IgnoredException.addIgnoredException("No longer connected to localhost");
+    ServerStarter serverStarter = new ServerStarter(getProperties());
+    serverStarter.startServer(0, pdxPersistent);
+    serverPort = serverStarter.server.getPort();
+    Region region = serverStarter.cache.createRegionFactory(RegionShortcut.REPLICATE).create(REGION_NAME);
+    for(Entry entry:getData().entrySet()){
+      region.put(entry.getKey(), entry.getValue());
+    }
 
+    IgnoredException.addIgnoredException("No longer connected to localhost");
+    IgnoredException.addIgnoredException(AuthenticationFailedException.class.getName());
     final Host host = Host.getHost(0);
     this.client1 = host.getVM(1);
     this.client2 = host.getVM(2);
     this.client3 = host.getVM(3);
-
-    Properties props = new Properties();
-    props.setProperty(SampleSecurityManager.SECURITY_JSON, "org/apache/geode/management/internal/security/clientServer.json");
-    props.setProperty(SECURITY_MANAGER, SampleSecurityManager.class.getName());
-    props.setProperty(LOCATORS, "");
-    props.setProperty(MCAST_PORT, "0");
-    if (postProcessor!=null) {
-      props.setProperty(SECURITY_POST_PROCESSOR, postProcessor.getName());
-    }
-    props.setProperty(SECURITY_LOG_LEVEL, "finest");
-
-    props.setProperty("security-pdx", pdxPersistent+"");
-    if(jmxPort>0){
-      props.put(JMX_MANAGER, "true");
-      props.put(JMX_MANAGER_START, "true");
-      props.put(JMX_MANAGER_PORT, String.valueOf(jmxPort));
-    }
-
-    if(restPort>0){
-      props.setProperty(START_DEV_REST_API, "true");
-      props.setProperty(HTTP_SERVICE_BIND_ADDRESS, "localhost");
-      props.setProperty(HTTP_SERVICE_PORT, restPort+"");
-    }
-
-    props.put(ConfigurationProperties.ENABLE_NETWORK_PARTITION_DETECTION, "false");
-    
-    this.dsProperties = props;
-
-    getSystem(props);
-
-    CacheFactory cf = new CacheFactory();
-    cf.setPdxPersistent(pdxPersistent);
-    cf.setPdxReadSerialized(pdxPersistent);
-    Cache cache = getCache(cf);
-
-    Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(REGION_NAME);
-
-    CacheServer server = cache.addCacheServer();
-    server.setPort(0);
-    server.start();
-
-    this.serverPort = server.getPort();
-
-    for(Entry entry:values.entrySet()){
-      region.put(entry.getKey(), entry.getValue());
-    }
-  }
-
-  @Override
-  public Properties getDistributedSystemProperties() {
-    return dsProperties;
-  }
-
-  @Override
-  public void preTearDownCacheTestCase() throws Exception {
-    Invoke.invokeInEveryVM(()->closeCache());
-    closeCache();
   }
 
   public static void assertNotAuthorized(ThrowingCallable shouldRaiseThrowable, String permString) {
@@ -146,8 +97,7 @@ public class AbstractSecureServerDUnitTest extends JUnit4CacheTestCase {
     props.setProperty(LOG_LEVEL, "fine");
     props.setProperty(LOCATORS, "");
     props.setProperty(MCAST_PORT, "0");
-    props.setProperty(SECURITY_CLIENT_AUTH_INIT, UserPasswordAuthInit.class.getName() + ".create");
-    props.setProperty(SECURITY_LOG_LEVEL, "finest");
+    props.setProperty(SECURITY_CLIENT_AUTH_INIT, UserPasswordAuthInit.class.getName());
     return props;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
index 3854bb1..72dbd1a 100644
--- a/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
@@ -30,13 +30,13 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.GemFireConfigException;
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.security.templates.SimpleSecurityManager;
 import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.dunit.rules.LocatorServerConfigurationRule;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -45,13 +45,13 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 public class ClusterConfigWithoutSecurityDUnitTest extends JUnit4DistributedTestCase {
 
   @Rule
-  public LocatorServerConfigurationRule lsRule = new LocatorServerConfigurationRule(this);
+  public LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
 
   @Before
   public void before() throws Exception {
     IgnoredException.addIgnoredException(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION.toString());
     IgnoredException.addIgnoredException(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION_2.toString());
-    lsRule.getLocatorVM(new Properties());
+    lsRule.getLocatorVM(0, new Properties());
   }
 
   @After
@@ -70,10 +70,9 @@ public class ClusterConfigWithoutSecurityDUnitTest extends JUnit4DistributedTest
     props.setProperty("use-cluster-configuration", "false");
 
     // initial security properties should only contain initial set of values
-    InternalDistributedSystem ds = lsRule.getSystem(props);
-    assertEquals(2, ds.getSecurityProperties().size());
-
-    CacheFactory.create(ds);
+    ServerStarter serverStarter = new ServerStarter(props);
+    serverStarter.startServer(lsRule.getLocatorPort(0));
+    DistributedSystem ds = serverStarter.cache.getDistributedSystem();
 
     // after cache is created, the configuration won't chagne
     Properties secProps = ds.getSecurityProperties();
@@ -91,12 +90,11 @@ public class ClusterConfigWithoutSecurityDUnitTest extends JUnit4DistributedTest
     props.setProperty("security-manager", "mySecurityManager");
     props.setProperty("use-cluster-configuration", "true");
 
-    InternalDistributedSystem ds = lsRule.getSystem(props);
-
-    assertThatThrownBy(() -> CacheFactory.create(ds)).isInstanceOf(GemFireConfigException.class)
-                                                     .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
-                                                       .toLocalizedString());
+    ServerStarter serverStarter = new ServerStarter(props);
 
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+      .isInstanceOf(GemFireConfigException.class)
+      .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION.toLocalizedString());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/IntegratedClientAuthDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/IntegratedClientAuthDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/IntegratedClientAuthDUnitTest.java
index 2aa633c..3a066ca 100644
--- a/geode-core/src/test/java/org/apache/geode/security/IntegratedClientAuthDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/IntegratedClientAuthDUnitTest.java
@@ -17,6 +17,7 @@
 package org.apache.geode.security;
 
 import static com.googlecode.catchexception.CatchException.*;
+import static com.googlecode.catchexception.apis.BDDCatchException.caughtException;
 import static org.assertj.core.api.Assertions.*;
 
 import org.junit.Test;
@@ -24,42 +25,28 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.client.ClientCache;
 import org.apache.geode.cache.client.ClientCacheFactory;
-import org.apache.geode.cache.client.ClientRegionFactory;
-import org.apache.geode.cache.client.ClientRegionShortcut;
-import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.FlakyTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
 @Category({ DistributedTest.class, SecurityTest.class })
 public class IntegratedClientAuthDUnitTest extends AbstractSecureServerDUnitTest {
 
-  @Category(FlakyTest.class) // GEODE-1877
   @Test
   public void authWithCorrectPasswordShouldPass() {
     client1.invoke("logging in super-user with correct password", () -> {
       ClientCache cache = new ClientCacheFactory(createClientProperties("super-user", "1234567")).setPoolSubscriptionEnabled(true)
                                                                                                  .addPoolServer("localhost", serverPort)
                                                                                                  .create();
-
-      ClientRegionFactory<String, String> crf = cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
-
-      crf.create(REGION_NAME);
     });
   }
 
-  @Category(FlakyTest.class) // GEODE-1875
   @Test
   public void authWithIncorrectPasswordShouldFail() {
-    IgnoredException.addIgnoredException(AuthenticationFailedException.class.getName());
-
     client2.invoke("logging in super-user with wrong password", () -> {
-      AuthenticationFailedException expected = new AuthenticationFailedException("Authentication error. Please check your credentials.");
-
-      catchException(new ClientCacheFactory(createClientProperties("super-user", "wrong")).setPoolSubscriptionEnabled(true)
+      catchException(new ClientCacheFactory(createClientProperties("data", "wrong")).setPoolSubscriptionEnabled(true)
                                                                                           .addPoolServer("localhost", serverPort))
         .create();
-      assertThat((Throwable) caughtException()).hasCause(expected);
+      assertThat((Throwable) caughtException()).isInstanceOf(AuthenticationFailedException.class);
     });
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/NoShowValue1PostProcessorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/NoShowValue1PostProcessorDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/NoShowValue1PostProcessorDUnitTest.java
index d2a9887..932235b 100644
--- a/geode-core/src/test/java/org/apache/geode/security/NoShowValue1PostProcessorDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/NoShowValue1PostProcessorDUnitTest.java
@@ -16,11 +16,13 @@
  */
 package org.apache.geode.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_POST_PROCESSOR;
 import static org.junit.Assert.*;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -36,8 +38,10 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 @Category({ DistributedTest.class, SecurityTest.class })
 public class NoShowValue1PostProcessorDUnitTest extends AbstractSecureServerDUnitTest {
 
-  public NoShowValue1PostProcessorDUnitTest(){
-    this.postProcessor = NoShowValue1PostProcessor.class;
+  public Properties getProperties(){
+    Properties  properties = super.getProperties();
+    properties.setProperty(SECURITY_POST_PROCESSOR, NoShowValue1PostProcessor.class.getName());
+    return properties;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/PDXPostProcessorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/PDXPostProcessorDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/PDXPostProcessorDUnitTest.java
index cf0df1b..7423fdc 100644
--- a/geode-core/src/test/java/org/apache/geode/security/PDXPostProcessorDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/PDXPostProcessorDUnitTest.java
@@ -17,12 +17,15 @@
 
 package org.apache.geode.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
 import com.jayway.awaitility.Awaitility;
@@ -59,6 +62,7 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
 @Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
 public class PDXPostProcessorDUnitTest extends AbstractSecureServerDUnitTest {
   private static byte[] BYTES = PDXPostProcessor.BYTES;
+  private int jmxPort = AvailablePortHelper.getRandomAvailableTCPPort();
 
   @Parameterized.Parameters
   public static Collection<Object[]> parameters(){
@@ -66,11 +70,19 @@ public class PDXPostProcessorDUnitTest extends AbstractSecureServerDUnitTest {
     return Arrays.asList(params);
   }
 
+  public Properties getProperties(){
+    Properties  properties = super.getProperties();
+    properties.setProperty(SECURITY_POST_PROCESSOR, PDXPostProcessor.class.getName());
+    properties.setProperty(JMX_MANAGER_PORT, jmxPort+"");
+    properties.setProperty("security-pdx", pdxPersistent+"");
+    return properties;
+  }
+
+  public Map<String, String> getData(){
+    return new HashMap();
+  }
   public PDXPostProcessorDUnitTest(boolean pdxPersistent){
-    this.postProcessor = PDXPostProcessor.class;
     this.pdxPersistent = pdxPersistent;
-    this.jmxPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    values = new HashMap();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/PostProcessorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/PostProcessorDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/PostProcessorDUnitTest.java
index a7cdb0f..760c292 100644
--- a/geode-core/src/test/java/org/apache/geode/security/PostProcessorDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/PostProcessorDUnitTest.java
@@ -16,13 +16,14 @@
  */
 package org.apache.geode.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 
-import org.apache.geode.security.templates.SamplePostProcessor;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -36,14 +37,17 @@ import org.apache.geode.cache.client.Pool;
 import org.apache.geode.cache.client.PoolManager;
 import org.apache.geode.cache.query.SelectResults;
 import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.security.templates.SamplePostProcessor;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
 @Category({ DistributedTest.class, SecurityTest.class })
 public class PostProcessorDUnitTest extends AbstractSecureServerDUnitTest {
 
-  public PostProcessorDUnitTest(){
-    this.postProcessor = SamplePostProcessor.class;
+  public Properties getProperties(){
+    Properties  properties = super.getProperties();
+    properties.setProperty(SECURITY_POST_PROCESSOR, SamplePostProcessor.class.getName());
+    return properties;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
index 54c02f7..5364c91 100644
--- a/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
@@ -29,13 +29,13 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.GemFireConfigException;
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.security.templates.SimpleSecurityManager;
 import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.dunit.rules.LocatorServerConfigurationRule;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -43,7 +43,7 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
 
   @Rule
-  public LocatorServerConfigurationRule lsRule = new LocatorServerConfigurationRule(this);
+  public LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
 
   @Before
   public void before() throws Exception {
@@ -55,7 +55,7 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     props.put(JMX_MANAGER_START, "false");
     props.put(JMX_MANAGER_PORT, 0);
     props.setProperty(SECURITY_POST_PROCESSOR, PDXPostProcessor.class.getName());
-    lsRule.getLocatorVM(props);
+    lsRule.getLocatorVM(0, props);
   }
 
   @Test
@@ -67,10 +67,9 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     props.setProperty("use-cluster-configuration", "true");
 
     // initial security properties should only contain initial set of values
-    InternalDistributedSystem ds = lsRule.getSystem(props);
-    assertEquals(2, ds.getSecurityProperties().size());
-
-    CacheFactory.create(ds);
+    ServerStarter serverStarter = new ServerStarter(props);
+    serverStarter.startServer(lsRule.getLocatorPort(0));
+    DistributedSystem ds = serverStarter.cache.getDistributedSystem();
 
     // after cache is created, we got the security props passed in by cluster config
     Properties secProps = ds.getSecurityProperties();
@@ -90,9 +89,9 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     props.setProperty(SECURITY_MANAGER, SimpleSecurityManager.class.getName());
 
     // initial security properties should only contain initial set of values
-    InternalDistributedSystem ds = lsRule.getSystem(props);
-
-    CacheFactory.create(ds);
+    ServerStarter serverStarter = new ServerStarter(props);
+    serverStarter.startServer(lsRule.getLocatorPort(0));
+    DistributedSystem ds = serverStarter.cache.getDistributedSystem();
 
     // after cache is created, we got the security props passed in by cluster config
     Properties secProps = ds.getSecurityProperties();
@@ -111,11 +110,12 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     props.setProperty("use-cluster-configuration", "true");
 
     // initial security properties should only contain initial set of values
-    InternalDistributedSystem ds = lsRule.getSystem(props);
+    ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> CacheFactory.create(ds)).isInstanceOf(GemFireConfigException.class)
-                                                     .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
-                                                       .toLocalizedString());
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+      .isInstanceOf(GemFireConfigException.class)
+      .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
+        .toLocalizedString());
 
   }
 
@@ -130,10 +130,11 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     props.setProperty("use-cluster-configuration", "true");
 
     // initial security properties should only contain initial set of values
-    InternalDistributedSystem ds = lsRule.getSystem(props);
+    ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> CacheFactory.create(ds)).isInstanceOf(GemFireConfigException.class)
-                                                     .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+      .isInstanceOf(GemFireConfigException.class)
+      .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
                                                        .toLocalizedString());
 
   }
@@ -148,10 +149,11 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     props.setProperty("security-manager", "mySecurityManager");
     props.setProperty("use-cluster-configuration", "false");
 
-    InternalDistributedSystem ds = lsRule.getSystem(props);
+    ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> CacheFactory.create(ds)).isInstanceOf(GemFireConfigException.class)
-                                                     .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION_2
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+      .isInstanceOf(GemFireConfigException.class)
+      .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION_2
                                                        .toLocalizedString());
 
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
index d5f8686..d3ed823 100644
--- a/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
@@ -27,14 +27,14 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.security.templates.SampleSecurityManager;
 import org.apache.geode.security.templates.SimpleSecurityManager;
 import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.dunit.rules.LocatorServerConfigurationRule;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -43,7 +43,7 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 public class SecurityWithoutClusterConfigDUnitTest extends JUnit4DistributedTestCase {
 
   @Rule
-  public LocatorServerConfigurationRule lsRule = new LocatorServerConfigurationRule(this);
+  public LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
 
   @Before
   public void before() throws Exception {
@@ -53,7 +53,7 @@ public class SecurityWithoutClusterConfigDUnitTest extends JUnit4DistributedTest
     props.setProperty(SECURITY_MANAGER, SimpleSecurityManager.class.getName());
     props.setProperty(SECURITY_POST_PROCESSOR, PDXPostProcessor.class.getName());
     props.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
-    lsRule.getLocatorVM(props);
+    lsRule.getLocatorVM(0, props);
   }
 
   @Test
@@ -69,11 +69,11 @@ public class SecurityWithoutClusterConfigDUnitTest extends JUnit4DistributedTest
     props.setProperty("use-cluster-configuration", "true");
 
     // initial security properties should only contain initial set of values
-    InternalDistributedSystem ds = lsRule.getSystem(props);
+    ServerStarter serverStarter = new ServerStarter(props);
+    serverStarter.startServer(lsRule.getLocatorPort(0));
+    DistributedSystem ds = serverStarter.cache.getDistributedSystem();
     assertEquals(3, ds.getSecurityProperties().size());
 
-    CacheFactory.create(ds);
-
     // after cache is created, we got the security props passed in by cluster config
     Properties secProps = ds.getSecurityProperties();
     assertEquals(3, secProps.size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java b/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
index 953cdb7..f6928bf 100644
--- a/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
@@ -30,7 +30,8 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.security.templates.SimpleSecurityManager;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.dunit.rules.LocatorServerConfigurationRule;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -38,17 +39,13 @@ import org.apache.geode.test.junit.categories.SecurityTest;
 public class StartServerAuthorizationTest extends JUnit4DistributedTestCase {
 
   @Rule
-  public LocatorServerConfigurationRule lsRule = new LocatorServerConfigurationRule(this);
+  public LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
 
   @Before
   public void before() throws Exception {
     Properties props = new Properties();
     props.setProperty(SECURITY_MANAGER, SimpleSecurityManager.class.getName());
-    props.put(JMX_MANAGER, "true");
-    props.put(JMX_MANAGER_START, "true");
-    props.put(JMX_MANAGER_PORT, 0);
-    props.setProperty(SECURITY_POST_PROCESSOR, PDXPostProcessor.class.getName());
-    lsRule.getLocatorVM(props);
+    lsRule.getLocatorVM(0, props);
   }
 
   @Test
@@ -60,7 +57,8 @@ public class StartServerAuthorizationTest extends JUnit4DistributedTestCase {
 
     VM server = lsRule.getNodeVM(1);
     server.invoke(()->{
-      assertThatThrownBy(()->lsRule.getSystem(props)).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("Security check failed. Authentication error. Please check your credentials");
+      ServerStarter serverStarter = new ServerStarter(props);
+      assertThatThrownBy(()->serverStarter.startServer(lsRule.getLocatorPort(0))).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("Security check failed. Authentication error. Please check your credentials");
     });
   }
 
@@ -74,7 +72,8 @@ public class StartServerAuthorizationTest extends JUnit4DistributedTestCase {
 
     VM server = lsRule.getNodeVM(1);
     server.invoke(()->{
-      assertThatThrownBy(()->lsRule.getSystem(props)).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("user not authorized for CLUSTER:MANAGE");
+      ServerStarter serverStarter = new ServerStarter(props);
+      assertThatThrownBy(()->serverStarter.startServer(lsRule.getLocatorPort(0))).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("user not authorized for CLUSTER:MANAGE");
     });
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ConnectionConfiguration.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ConnectionConfiguration.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ConnectionConfiguration.java
new file mode 100644
index 0000000..3ba300f
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ConnectionConfiguration.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.test.dunit.rules;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation is intended to be used with {@link MBeanServerConnectionRule} in order to configure a per-test JMX
+ * connection with a specific user and password.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD})
+public @interface ConnectionConfiguration {
+  String user();
+  String password();
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerConfigurationRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerConfigurationRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerConfigurationRule.java
deleted file mode 100644
index 7f52ce1..0000000
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerConfigurationRule.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.geode.test.dunit.rules;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.apache.geode.test.dunit.Host.*;
-import static org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase.*;
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
-import com.jayway.awaitility.Awaitility;
-import org.junit.rules.ExternalResource;
-
-import org.apache.geode.distributed.Locator;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.distributed.internal.InternalLocator;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-
-
-public class LocatorServerConfigurationRule extends ExternalResource implements Serializable {
-
-  private int locatorPort = 0;
-
-  private boolean locatorInitialized = false;
-
-  private JUnit4DistributedTestCase testCase;
-
-  public LocatorServerConfigurationRule(JUnit4DistributedTestCase testCase) {
-    this.testCase = testCase;
-  }
-
-  Host host = getHost(0);
-  VM locator = host.getVM(0);
-
-  @Override
-  protected void before() {
-    // Add initialization requirement if any.
-    disconnectAllFromDS();
-  }
-
-  @Override
-  protected void after() {
-    disconnectAllFromDS();
-  }
-
-  /**
-   * Returns getHost(0).getVM(0) as a locator instance with the given
-   * configuration properties.
-   * @param locatorProperties
-   *
-   * @return VM locator vm
-   *
-   * @throws IOException
-   */
-  public VM getLocatorVM(Properties locatorProperties) throws IOException {
-    if (!locatorProperties.containsKey(MCAST_PORT)) {
-      locatorProperties.setProperty(MCAST_PORT, "0");
-    }
-
-    locatorPort = locator.invoke(() -> {
-      InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(0, null, locatorProperties);
-      locator.resetInternalLocatorFileNamesWithCorrectPortNumber(locatorPort);
-
-      if (locator.getConfig().getEnableClusterConfiguration()) {
-        Awaitility.await().atMost(65, TimeUnit.SECONDS).until(() -> assertTrue(locator.isSharedConfigurationRunning()));
-      }
-      return locator.getPort();
-    });
-
-    this.locatorInitialized = true;
-    return locator;
-  }
-
-  /**
-   * Returns a node VM with given configuration properties.
-   * @param index valid 1 to 3 (returns getHist(0).getVM(index)
-   * @param properties
-   *
-   * @return VM node vm
-   */
-  public VM getServerVM(int index, Properties properties) {
-    assertTrue("Locator not initialized. Initialize locator by calling getLocatorVM()", this.locatorInitialized);
-    assertTrue("VM with index 0 is used for locator service.", (index != 0));
-    VM nodeVM = getNodeVM(index);
-    nodeVM.invoke(() -> {
-      getSystem(properties);
-    });
-    return nodeVM;
-  }
-
-  /**
-   * this will simply returns the node
-   * @param index
-   * @return
-   */
-  public VM getNodeVM(int index){
-    return host.getVM(index);
-  }
-
-  public InternalDistributedSystem getSystem(Properties properties){
-    if (!properties.containsKey(MCAST_PORT)) {
-      properties.setProperty(MCAST_PORT, "0");
-    }
-    properties.setProperty(LOCATORS, getHostName() + "[" + locatorPort + "]");
-    InternalDistributedSystem ds = testCase.getSystem(properties);
-    if(testCase instanceof JUnit4CacheTestCase){
-      ((JUnit4CacheTestCase)testCase).getCache();
-    }
-    return ds;
-  }
-
-  public int getLocatorPort(){
-    return locatorPort;
-  }
-
-  private String getHostName() {
-    try {
-      return InetAddress.getLocalHost().getHostName();
-    } catch (UnknownHostException ignore) {
-      return "localhost";
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
new file mode 100644
index 0000000..71894c8
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.test.dunit.rules;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.test.dunit.Host.*;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.rules.ExternalResource;
+
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.Invoke;
+import org.apache.geode.test.dunit.VM;
+
+
+/**
+ * this rule can help you start up locator/server in different VMs
+ * you can multiple locators/servers combination
+ */
+public class LocatorServerStartupRule extends ExternalResource implements Serializable {
+
+  private Host host = getHost(0);
+
+  public int[] locatorPorts = new int[4];
+
+
+  // these are only avaialbe in each VM
+  public static ServerStarter serverStarter;
+  public static LocatorStarter locatorStarter;
+
+  @Before
+  public void before() {
+    after();
+  }
+
+  @After
+  public void after() {
+    stop();
+    Invoke.invokeInEveryVM("Stop each VM", ()->stop());
+  }
+
+  /**
+   * Returns getHost(0).getVM(0) as a locator instance with the given
+   * configuration properties.
+   * @param locatorProperties
+   *
+   * @return VM locator vm
+   *
+   * @throws IOException
+   */
+  public VM getLocatorVM(int index, Properties locatorProperties) throws IOException {
+    VM locatorVM = host.getVM(index);
+    int locatorPort = locatorVM.invoke(() -> {
+      locatorStarter = new LocatorStarter(locatorProperties);
+      locatorStarter.startLocator();
+      return locatorStarter.locator.getPort();
+    });
+    locatorPorts[index] = locatorPort;
+    return locatorVM;
+  }
+
+  /**
+   * starts a cache server that does not connect to a locator
+   * @return VM node vm
+   */
+
+  public VM getServerVM(int index, Properties properties) {
+    return getServerVM(index, properties, 0);
+  }
+
+  /**
+   * starts a cache server that connect to the locator running at the given port.
+   * @param index
+   * @param properties
+   * @param locatorPort
+   * @return
+   */
+  public VM getServerVM(int index, Properties properties, int locatorPort) {
+    VM nodeVM = getNodeVM(index);
+    properties.setProperty(NAME, "server-"+index);
+    nodeVM.invoke(() -> {
+      serverStarter = new ServerStarter(properties);
+      serverStarter.startServer(locatorPort);
+    });
+    return nodeVM;
+  }
+
+
+
+  /**
+   * this will simply returns the node
+   * @param index
+   * @return
+   */
+  public VM getNodeVM(int index){
+    return host.getVM(index);
+  }
+
+  public int getLocatorPort(int index){
+    return locatorPorts[index];
+  }
+
+
+  public final void stop(){
+    if(serverStarter!=null) {
+      serverStarter.after();
+    }
+    if(locatorStarter!=null){
+      locatorStarter.after();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorStarter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorStarter.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorStarter.java
new file mode 100644
index 0000000..02ba672
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorStarter.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.test.dunit.rules;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.io.Serializable;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import com.jayway.awaitility.Awaitility;
+import org.junit.rules.ExternalResource;
+
+import org.apache.geode.distributed.Locator;
+import org.apache.geode.distributed.internal.InternalLocator;
+
+/**
+ * This is a rule to start up a locator in your current VM. It's useful for your
+ * Integration Tests.
+ *
+ * If you need a rule to start a server/locator in different VM for Distribution tests,
+ * You should use LocatorServerStartupRule
+ *
+ * This rule does not have a before(), because you may choose to start a locator in different time
+ * of your tests. You may choose to use this class not as a rule or use it in your own rule,
+ * (see LocatorServerStartupRule) you will need to call after() manually in that case.
+ */
+
+public class LocatorStarter extends ExternalResource implements Serializable {
+
+  public InternalLocator locator;
+
+  private Properties properties;
+
+  public LocatorStarter(Properties properties){
+    this.properties = properties;
+  }
+
+  public void startLocator() throws Exception{
+    if (!properties.containsKey(MCAST_PORT)) {
+      properties.setProperty(MCAST_PORT, "0");
+    }
+    locator = (InternalLocator) Locator.startLocatorAndDS(0, null, properties);
+    int locatorPort = locator.getPort();
+    locator.resetInternalLocatorFileNamesWithCorrectPortNumber(locatorPort);
+
+    if (locator.getConfig().getEnableClusterConfiguration()) {
+      Awaitility.await().atMost(65, TimeUnit.SECONDS).until(() -> assertTrue(locator.isSharedConfigurationRunning()));
+    }
+  }
+
+  @Override
+  public void after(){
+    if(locator!=null){
+      locator.stop();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MBeanServerConnectionRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MBeanServerConnectionRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MBeanServerConnectionRule.java
new file mode 100644
index 0000000..ace0c53
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MBeanServerConnectionRule.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.test.dunit.rules;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import javax.management.JMX;
+import javax.management.MBeanServerConnection;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectInstance;
+import javax.management.ObjectName;
+import javax.management.Query;
+import javax.management.QueryExp;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+
+import org.junit.runner.Description;
+
+import org.apache.geode.management.internal.security.AccessControlMXBean;
+import org.apache.geode.test.junit.rules.DescribedExternalResource;
+
+/**
+ * Class which eases the creation of MBeans for security testing. When combined with {@link ConnectionConfiguration}
+ * it allows for the creation of per-test connections with different user/password combinations.
+ */
+public class MBeanServerConnectionRule extends DescribedExternalResource {
+
+  private final int jmxServerPort;
+  private JMXConnector jmxConnector;
+  private MBeanServerConnection con;
+
+  /**
+   * Rule constructor
+   *
+   * @param port The JMX server port to connect to
+   */
+  public MBeanServerConnectionRule(int port) {
+    this.jmxServerPort = port;
+  }
+
+  /**
+   * Retrieve a new proxy MBean
+   *
+   * @return A new proxy MBean of the same type with which the class was constructed
+   */
+  public <T> T getProxyMBean(Class<T> proxyClass, String beanQueryName) throws MalformedObjectNameException, IOException {
+    ObjectName name = null;
+    QueryExp query = null;
+
+    if (proxyClass != null) {
+      query = Query.isInstanceOf(Query.value(proxyClass.getName()));
+    }
+
+    if (beanQueryName != null) {
+      name = ObjectName.getInstance(beanQueryName);
+    }
+
+    Set<ObjectInstance> beans = con.queryMBeans(name, query);
+    assertEquals("failed to find only one instance of type " + proxyClass.getName() + " with name " + beanQueryName, 1, beans.size());
+
+    return JMX.newMXBeanProxy(con, ((ObjectInstance) beans.toArray()[0]).getObjectName(), proxyClass);
+  }
+
+  public AccessControlMXBean getAccessControlMBean() throws Exception{
+    return JMX.newMXBeanProxy(con, new ObjectName("GemFire:service=AccessControl,type=Distributed"), AccessControlMXBean.class);
+  }
+
+  /**
+   * Retrieve a new proxy MBean
+   *
+   * @return A new proxy MBean of the same type with which the class was constructed
+   */
+  public <T> T getProxyMBean(Class<T> proxyClass) throws MalformedObjectNameException, IOException {
+    return getProxyMBean(proxyClass, null);
+  }
+
+  public <T> T getProxyMBean(String beanQueryName) throws MalformedObjectNameException, IOException {
+    return getProxyMBean(null, beanQueryName);
+  }
+
+  public MBeanServerConnection getMBeanServerConnection() throws IOException {
+    return con;
+  }
+
+  protected void before(Description description) throws Throwable {
+    ConnectionConfiguration config = description.getAnnotation(ConnectionConfiguration.class);
+    Map<String, String[]> env = new HashMap<>();
+    if (config != null) {
+      String user = config.user();
+      String password = config.password();
+      env.put(JMXConnector.CREDENTIALS, new String[] { user, password });
+
+      JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://:" + jmxServerPort + "/jmxrmi");
+      jmxConnector = JMXConnectorFactory.connect(url, env);
+      con = jmxConnector.getMBeanServerConnection();
+    }
+  }
+
+  /**
+   * Override to tear down your specific external resource.
+   */
+  protected void after(Description description) throws Throwable {
+    if (jmxConnector != null) {
+      jmxConnector.close();
+      jmxConnector = null;
+    }
+
+    con = null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ServerStarter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ServerStarter.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ServerStarter.java
new file mode 100644
index 0000000..910f232
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ServerStarter.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.test.dunit.rules;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.io.Serializable;
+import java.util.Properties;
+
+import org.junit.rules.ExternalResource;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.server.CacheServer;
+
+
+/**
+ * This is a rule to start up a server in your current VM. It's useful for your
+ * Integration Tests.
+ *
+ * If you need a rule to start a server/locator in different VM for Distribution tests,
+ * You should use LocatorServerStartupRule
+ *
+ * This rule does not have a before(), because you may choose to start a server in different time
+ * of your tests. You may choose to use this class not as a rule or use it in your own rule,
+ * (see LocatorServerStartupRule) you will need to call after() manually in that case.
+ */
+public class ServerStarter extends ExternalResource implements Serializable{
+
+  public Cache cache;
+  public CacheServer server;
+
+  private  Properties properties;
+
+  public ServerStarter(Properties properties){
+    this.properties = properties;
+  }
+
+  public void startServer() throws Exception {
+    startServer(0, false);
+  }
+
+  public void startServer(int locatorPort) throws Exception {
+    startServer(locatorPort, false);
+  }
+
+  public void startServer(int locatorPort, boolean pdxPersistent) throws Exception {
+    if (!properties.containsKey(MCAST_PORT)) {
+      properties.setProperty(MCAST_PORT, "0");
+    }
+    if (!properties.containsKey(NAME)) {
+      properties.setProperty(NAME, this.getClass().getName());
+    }
+    if (locatorPort>0) {
+      properties.setProperty(LOCATORS, "localhost["+locatorPort+"]");
+    }
+    else {
+      properties.setProperty(LOCATORS, "");
+    }
+    if(properties.containsKey(JMX_MANAGER_PORT)){
+      int jmxPort = Integer.parseInt(properties.getProperty(JMX_MANAGER_PORT));
+      if(jmxPort>0) {
+        if (!properties.containsKey(JMX_MANAGER))
+          properties.put(JMX_MANAGER, "true");
+        if (!properties.containsKey(JMX_MANAGER_START))
+          properties.put(JMX_MANAGER_START, "true");
+      }
+    }
+
+    CacheFactory cf = new CacheFactory(properties);
+    cf.setPdxReadSerialized(pdxPersistent);
+    cf.setPdxPersistent(pdxPersistent);
+
+    cache = cf.create();
+    server = cache.addCacheServer();
+    server.setPort(0);
+    server.start();
+  }
+
+  public void after(){
+    if(cache!=null) cache.close();
+    if(server!=null) server.stop();
+  }
+}



[21/50] [abbrv] incubator-geode git commit: GEODE-1883 AuthInitializer should be made optional

Posted by kl...@apache.org.
GEODE-1883 AuthInitializer should be made optional

Documentation of how a cache client may set its credentials
for authentication is revised to no longer state that the
client can set the two security properties. When this
bug is fixed, the documentation should be revised again.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f1df6fc5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f1df6fc5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f1df6fc5

Branch: refs/heads/feature/GEODE-1930
Commit: f1df6fc5920d0d1eebd210e816e61ad44074d39d
Parents: cf09ac9
Author: Karen Miller <km...@pivotal.io>
Authored: Mon Oct 17 12:58:33 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Mon Oct 17 12:58:33 2016 -0700

----------------------------------------------------------------------
 .../implementing_authentication.html.md.erb     | 22 ++++++++++++++++++++
 1 file changed, 22 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1df6fc5/geode-docs/managing/security/implementing_authentication.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/implementing_authentication.html.md.erb b/geode-docs/managing/security/implementing_authentication.html.md.erb
index 374a95e..c66be1a 100644
--- a/geode-docs/managing/security/implementing_authentication.html.md.erb
+++ b/geode-docs/managing/security/implementing_authentication.html.md.erb
@@ -77,6 +77,7 @@ sender or receiver.
 
 ## How a Cache Client Sets Its Credential
 
+<!--  Revised for GEODE-1883
 In order to connect with a locator or a server that does authentication,
 a client will need to set its credential, composed of the two properties
 `security-username` and `security-password`.
@@ -93,6 +94,27 @@ as in the example
 The user name and password are stored in the clear, so the
 `gfsecurity.properties` file must be protected by restricting access with
 file system permissions.
+To accomplish this:
+
+- Implement the `getCredentials` method of the `AuthInitialize` interface
+for the client.
+This callback's location is defined in the property `security-client-auth-init`,
+as in the example
+
+     ``` pre
+     security-client-auth-init=com.example.security.ClientAuthInitialize
+     ```
+The implementation of `getCredentials` may then acquire values for
+the properties `security-username` and `security-password` in whatever way
+it wishes.
+It might look up values in a database or another external resource,
+or it might prompt for values.
+-->
+
+In order to connect with a locator or a server that does authentication,
+a client will need to set its credential, composed of the two properties
+`security-username` and `security-password`.
+To accomplish this:
 
 - Implement the `getCredentials` method of the `AuthInitialize` interface
 for the client.


[50/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
Convert from ManagementTestCase to ManagementTestRule


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/24f496df
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/24f496df
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/24f496df

Branch: refs/heads/feature/GEODE-1930
Commit: 24f496df4ab628dc95b931000733269869fdf117
Parents: 56917a2
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Oct 31 13:45:28 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Oct 31 13:55:09 2016 -0700

----------------------------------------------------------------------
 .../RestAPIsAndInterOpsDUnitTest.java           |   43 +-
 geode-core/build.gradle                         |    1 +
 .../org/apache/geode/management/JVMMetrics.java |    3 +-
 .../org/apache/geode/management/OSMetrics.java  |    3 +-
 .../internal/SystemManagementService.java       |    2 +-
 .../DistributedLockServiceDUnitTest.java        |    2 +-
 .../cache/ConnectDisconnectDUnitTest.java       |    2 +-
 ...gionBucketCreationDistributionDUnitTest.java |    2 +-
 .../cache/locks/TXLockServiceDUnitTest.java     |    4 +-
 .../management/CacheManagementDUnitTest.java    |  937 ++++-----
 .../management/ClientHealthStatsDUnitTest.java  |  597 +++---
 .../management/CompositeTypeTestDUnitTest.java  |  202 +-
 .../management/DLockManagementDUnitTest.java    |  549 ++----
 .../management/DiskManagementDUnitTest.java     |  821 +++-----
 .../management/DistributedSystemDUnitTest.java  | 1038 ++++------
 .../geode/management/JMXMBeanDUnitTest.java     |    2 +-
 .../management/LocatorManagementDUnitTest.java  |  429 ++--
 .../geode/management/ManagementTestBase.java    |  577 ++----
 .../geode/management/ManagementTestRule.java    |  430 ++++
 .../org/apache/geode/management/Manager.java    |   31 +
 .../org/apache/geode/management/Member.java     |   31 +
 .../management/OffHeapManagementDUnitTest.java  |  723 ++++---
 .../geode/management/QueryDataDUnitTest.java    | 1297 +++++-------
 .../management/RegionManagementDUnitTest.java   | 1856 ++++++++----------
 .../stats/DistributedSystemStatsDUnitTest.java  |  110 +-
 .../QueryDataFunctionApplyLimitClauseTest.java  |   10 +-
 .../internal/pulse/TestClientIdsDUnitTest.java  |   52 +-
 .../pulse/TestSubscriptionsDUnitTest.java       |  291 +--
 .../geode/test/dunit/AsyncInvocation.java       |   59 +-
 .../org/apache/geode/test/dunit/Invoke.java     |    4 +-
 .../java/org/apache/geode/test/dunit/VM.java    |    7 +-
 .../java/org/apache/geode/test/dunit/Wait.java  |    2 +
 .../cache/internal/JUnit4CacheTestCase.java     |    5 +
 .../internal/JUnit4DistributedTestCase.java     |    4 +-
 .../dunit/rules/DistributedDisconnectRule.java  |    4 +-
 .../DistributedRestoreSystemProperties.java     |    4 +-
 .../geode/test/dunit/rules/DistributedRule.java |   68 +
 .../test/dunit/rules/DistributedRunRules.java   |   76 +
 .../test/dunit/rules/DistributedStatement.java  |   76 +
 .../test/dunit/rules/DistributedTestRule.java   |  192 ++
 .../DistributedUseJacksonForJsonPathRule.java   |   51 +
 .../dunit/rules/DistributedWrapperRule.java     |   52 +
 .../geode/test/dunit/rules/RemoteInvoker.java   |   16 +-
 .../apache/geode/test/dunit/rules/WhichVMs.java |   58 +
 .../rules/tests/DistributedTestRuleTest.java    |   54 +
 .../test/dunit/standalone/DUnitLauncher.java    |    9 +-
 geode-junit/build.gradle                        |    1 +
 .../junit/rules/UseJacksonForJsonPathRule.java  |  128 ++
 .../SerializableExternalResource.java           |   22 +
 .../serializable/SerializableStatement.java     |   27 +
 .../management/LuceneManagementDUnitTest.java   |   20 +-
 gradle/dependency-versions.properties           |    3 +-
 52 files changed, 5054 insertions(+), 5933 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/controllers/RestAPIsAndInterOpsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/controllers/RestAPIsAndInterOpsDUnitTest.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/controllers/RestAPIsAndInterOpsDUnitTest.java
index 0299615..baefcba 100644
--- a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/controllers/RestAPIsAndInterOpsDUnitTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/controllers/RestAPIsAndInterOpsDUnitTest.java
@@ -67,7 +67,6 @@ import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.internal.AvailablePortHelper;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.management.ManagementTestBase;
 import org.apache.geode.pdx.PdxInstance;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.NetworkUtils;
@@ -80,28 +79,14 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
  *
  * @since GemFire 8.0
  */
-
 @Category(DistributedTest.class)
 @RunWith(Parameterized.class)
 @Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+@SuppressWarnings("serial")
 public class RestAPIsAndInterOpsDUnitTest extends LocatorTestBase {
 
-  private static final long serialVersionUID = -254776154266339226L;
-
-  @Parameterized.Parameter
-  public String urlContext;
-
-  @Parameterized.Parameters
-  public static Collection<String> data() {
-    return Arrays.asList("/geode", "/gemfire-api");
-  }
-
-  private ManagementTestBase helper;
-
   public static final String PEOPLE_REGION_NAME = "People";
 
-  //private static RestTemplate restTemplate;
-
   private static final String findAllPeopleQuery = "/queries?id=findAllPeople&q=SELECT%20*%20FROM%20/People";
   private static final String findPeopleByGenderQuery = "/queries?id=filterByGender&q=SELECT%20*%20from%20/People%20where%20gender=$1";
   private static final String findPeopleByLastNameQuery = "/queries?id=filterByLastName&q=SELECT%20*%20from%20/People%20where%20lastName=$1";
@@ -182,20 +167,12 @@ public class RestAPIsAndInterOpsDUnitTest extends LocatorTestBase {
       + " \"middleName\": \"kiran12\"," + " \"lastName\": \"Patel\","
       + " \"birthDate\": \"23/08/2012\"," + "\"gender\": \"MALE\"" + "}" + "]";
 
-  public RestAPIsAndInterOpsDUnitTest() {
-    super();
-    this.helper = new ManagementTestBase() {{}};
-
-  }
-
-  @Override
-  public final void preSetUp() throws Exception {
-    disconnectAllFromDS();
-  }
+  @Parameterized.Parameter
+  public String urlContext;
 
-  @Override
-  protected final void postTearDownLocatorTestBase() throws Exception {
-    disconnectAllFromDS();
+  @Parameterized.Parameters
+  public static Collection<String> data() {
+    return Arrays.asList("/geode", "/gemfire-api");
   }
 
   public String startBridgeServerWithRestService(final String hostName, final String[] groups, final String locators, final String[] regions,
@@ -842,14 +819,6 @@ public class RestAPIsAndInterOpsDUnitTest extends LocatorTestBase {
 
     //Querying
     doQueryOpsUsingRestApis(restEndpoint);
-
-    // stop the client and make sure the bridge server notifies
-    // stopBridgeMemberVM(client);
-    helper.closeCache(locator);
-    helper.closeCache(manager);
-    helper.closeCache(server);
-    helper.closeCache(client);
-
   }
 
   private void createClientCache(final String host, final int port) throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/build.gradle
----------------------------------------------------------------------
diff --git a/geode-core/build.gradle b/geode-core/build.gradle
index 067bafc..383e071 100755
--- a/geode-core/build.gradle
+++ b/geode-core/build.gradle
@@ -114,6 +114,7 @@ dependencies {
 
   // Test Dependencies
   // External
+  testCompile 'com.jayway.jsonpath:json-path-assert:' + project.'json-path-assert.version'
   testCompile 'org.apache.bcel:bcel:' + project.'bcel.version'
   testRuntime 'org.apache.derby:derby:' + project.'derby.version'
   testCompile 'org.mockito:mockito-core:' + project.'mockito-core.version'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/main/java/org/apache/geode/management/JVMMetrics.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/JVMMetrics.java b/geode-core/src/main/java/org/apache/geode/management/JVMMetrics.java
index b11cfef..992ef99 100644
--- a/geode-core/src/main/java/org/apache/geode/management/JVMMetrics.java
+++ b/geode-core/src/main/java/org/apache/geode/management/JVMMetrics.java
@@ -17,6 +17,7 @@
 package org.apache.geode.management;
 
 import java.beans.ConstructorProperties;
+import java.io.Serializable;
 
 import org.apache.geode.cache.Region;
 
@@ -28,7 +29,7 @@ import org.apache.geode.cache.Region;
  * @since GemFire 7.0
  *
  */
-public class JVMMetrics {
+public class JVMMetrics implements Serializable {
 
   /**
    * Number of GCs performed

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/main/java/org/apache/geode/management/OSMetrics.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/OSMetrics.java b/geode-core/src/main/java/org/apache/geode/management/OSMetrics.java
index 07dab6c..48974ba 100644
--- a/geode-core/src/main/java/org/apache/geode/management/OSMetrics.java
+++ b/geode-core/src/main/java/org/apache/geode/management/OSMetrics.java
@@ -17,6 +17,7 @@
 package org.apache.geode.management;
 
 import java.beans.ConstructorProperties;
+import java.io.Serializable;
 
 /**
  * Composite data type used to distribute metrics for the operating system hosting
@@ -25,7 +26,7 @@ import java.beans.ConstructorProperties;
  * @since GemFire 7.0
  *
  */
-public class OSMetrics {
+public class OSMetrics implements Serializable {
   
   /**
    * Maximum number file descriptor which can be opened

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/main/java/org/apache/geode/management/internal/SystemManagementService.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/SystemManagementService.java b/geode-core/src/main/java/org/apache/geode/management/internal/SystemManagementService.java
index 29bbb15..65b71d6 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/SystemManagementService.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/SystemManagementService.java
@@ -357,7 +357,7 @@ public final class SystemManagementService extends BaseManagementService {
   }
 
 
-  public <T> T getMBeanProxy(ObjectName objectName, Class<T> interfaceClass) {
+  public <T> T getMBeanProxy(ObjectName objectName, Class<T> interfaceClass) { // TODO: this is too generic
     if (!isStartedAndOpen()) {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/distributed/DistributedLockServiceDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/DistributedLockServiceDUnitTest.java b/geode-core/src/test/java/org/apache/geode/distributed/DistributedLockServiceDUnitTest.java
index 25d6013..e391aba 100755
--- a/geode-core/src/test/java/org/apache/geode/distributed/DistributedLockServiceDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/distributed/DistributedLockServiceDUnitTest.java
@@ -100,7 +100,7 @@ public class DistributedLockServiceDUnitTest extends JUnit4DistributedTestCase {
   @Override
   public final void preTearDown() throws Exception {
     Invoke.invokeInEveryVM(() -> destroyAllDLockServices());
-//    invokeInEveryVM(DistributedLockServiceDUnitTest.class,
+//    invokeInEveryVMAndController(DistributedLockServiceDUnitTest.class,
 //                    "remoteDumpAllDLockServices"); 
                     
     //InternalDistributedLockService.destroyAll();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
index 9b6030a..162c3a5 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
@@ -42,7 +42,7 @@ public class ConnectDisconnectDUnitTest extends JUnit4CacheTestCase {
   // see bugs #50785 and #46438
   @Test
   public void testManyConnectsAndDisconnects() throws Throwable {
-//    invokeInEveryVM(new SerializableRunnable() {
+//    invokeInEveryVMAndController(new SerializableRunnable() {
 //
 //      @Override
 //      public void run() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
index 91bafea..07546a0 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
@@ -462,7 +462,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends Partit
     
 //    final int bucketPerHost = (int) Math.ceil(((double) maxBuckets / Host.getHostCount()));
 
-//    invokeInEveryVM(new SerializableRunnable("") {
+//    invokeInEveryVMAndController(new SerializableRunnable("") {
 //      
 //    }
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/internal/cache/locks/TXLockServiceDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/locks/TXLockServiceDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/locks/TXLockServiceDUnitTest.java
index b835cbc..3b54cc6 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/locks/TXLockServiceDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/locks/TXLockServiceDUnitTest.java
@@ -98,7 +98,7 @@ public class TXLockServiceDUnitTest extends JUnit4DistributedTestCase {
 
   @Override
   public final void preTearDown() throws Exception {
-//    invokeInEveryVM(TXLockServiceDUnitTest.class,
+//    invokeInEveryVMAndController(TXLockServiceDUnitTest.class,
 //                    "remoteDumpAllDLockServices");
                     
     Invoke.invokeInEveryVM(TXLockServiceDUnitTest.class,
@@ -123,7 +123,7 @@ public class TXLockServiceDUnitTest extends JUnit4DistributedTestCase {
   @Test
   public void testGetAndDestroy() {
     forEachVMInvoke("checkGetAndDestroy", new Object[] {});
-    /*invokeInEveryVM(TXLockServiceDUnitTest.class,
+    /*invokeInEveryVMAndController(TXLockServiceDUnitTest.class,
                     "destroyServices"); 
     forEachVMInvoke("checkGetAndDestroy", new Object[] {});*/
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/CacheManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/CacheManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/CacheManagementDUnitTest.java
index 8c57aab..0e750e9 100644
--- a/geode-core/src/test/java/org/apache/geode/management/CacheManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/CacheManagementDUnitTest.java
@@ -16,11 +16,14 @@
  */
 package org.apache.geode.management;
 
-import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static com.jayway.awaitility.Awaitility.*;
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.assertj.core.api.Assertions.*;
 import static org.hamcrest.Matchers.*;
-import static org.junit.Assert.*;
 
+import java.io.Serializable;
+import java.lang.management.ManagementFactory;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -28,13 +31,16 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import javax.management.InstanceNotFoundException;
+
 import javax.management.JMException;
 import javax.management.Notification;
 import javax.management.NotificationListener;
 import javax.management.ObjectName;
 
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -45,54 +51,69 @@ import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.management.internal.LocalManager;
 import org.apache.geode.management.internal.MBeanJMXAdapter;
 import org.apache.geode.management.internal.ManagementConstants;
 import org.apache.geode.management.internal.NotificationHub.NotificationHubListener;
 import org.apache.geode.management.internal.SystemManagementService;
-import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.SerializableRunnable;
+import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 
 /**
  * This class checks and verifies various data and operations exposed through
  * MemberMXBean interface.
  * <p>
- * Goal of the Test : MemberMBean gets created once cache is created. Data like
+ * <p>Goal of the Test : MemberMBean gets created once cache is created. Data like
  * config data and stats are of proper value To check proper federation of
  * MemberMBean including remote ops and remote data access
+ * <p>
+ * <p>This test is a mess and needs to be rewritten.
  */
 @Category(DistributedTest.class)
-public class CacheManagementDUnitTest extends ManagementTestBase {
+@SuppressWarnings({ "serial", "unused" })
+public class CacheManagementDUnitTest implements Serializable {
+
+  /** used in memberVMs */
+  private static final String NOTIFICATION_REGION_NAME = "NotifTestRegion_";
+
+  /** used in managerVM */
+  private static final List<Notification> notifications = new ArrayList<>();
+
+  @Manager
+  private VM managerVM;
+
+  @Member
+  private VM[] memberVMs;
 
-  private final String VERIFY_CONFIG_METHOD = "verifyConfigData";
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().build();
 
-  private final String VERIFY_REMOTE_CONFIG_METHOD = "verifyConfigDataRemote";
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
 
-  static final List<Notification> notifList = new ArrayList<Notification>();
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
 
-  // This must be bigger than the dunit ack-wait-threshold for the revoke
-  // tests. The command line is setting the ack-wait-threshold to be
-  // 60 seconds.
-  private static final int MAX_WAIT = 70 * 1000;
+  @Before
+  public void before() throws Exception {
+    this.managerVM.invoke(() -> notifications.clear());
+  }
 
   @Test
   public void testGemFireConfigData() throws Exception {
-    initManagement(false);
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
 
-    Map<DistributedMember, DistributionConfig> configMap = new HashMap<DistributedMember, DistributionConfig>();
-    for (VM vm : getManagedNodeList()) {
-      Map<DistributedMember, DistributionConfig> configMapMember = (Map<DistributedMember, DistributionConfig>) vm.invoke(CacheManagementDUnitTest.class, VERIFY_CONFIG_METHOD);
+    Map<DistributedMember, DistributionConfig> configMap = new HashMap<>();
+    for (VM memberVM : this.memberVMs) {
+      Map<DistributedMember, DistributionConfig> configMapMember = memberVM.invoke(() -> verifyConfigData());
       configMap.putAll(configMapMember);
     }
 
-    Object[] args = new Object[1];
-    args[0] = configMap;
-    getManagingNode().invoke(CacheManagementDUnitTest.class, VERIFY_REMOTE_CONFIG_METHOD, args);
+    this.managerVM.invoke(() -> verifyConfigDataRemote(configMap));
   }
 
   /**
@@ -100,29 +121,34 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
    */
   @Test
   public void testMemberMBeanOperations() throws Exception {
-    initManagement(false);
+    int i = 1;
+    for (VM memberVM : this.memberVMs) {
+      Properties props = new Properties();
+      props.setProperty(LOG_FILE, this.temporaryFolder.newFile(this.testName.getMethodName() + "-VM" + i + ".log").getAbsolutePath());
+      this.managementTestRule.createMember(memberVM, props);
+      i++;
+    }
 
-    for (VM vm : managedNodeList) {
+    this.managementTestRule.createManagers();
 
-      //Do some operations to fill the logs
+    for (VM memberVM : this.memberVMs) {
+      String logMessage = "This line should be in the log";
+      memberVM.invoke(() -> this.managementTestRule.getCache().getLogger().info(logMessage));
 
-      createLocalRegion(vm, "testRegion");
+      String log = memberVM.invoke(() -> fetchLog(30));
+      assertThat(log).isNotNull();
+      assertThat(log).contains(logMessage);
 
-      String log = (String) vm.invoke(() -> CacheManagementDUnitTest.fetchLog());
-      assertNotNull(log);
-      LogWriterUtils.getLogWriter().info("<ExpectedString> Log Of Member is " + log.toString() + "</ExpectedString> ");
+      JVMMetrics jvmMetrics = memberVM.invoke(() -> fetchJVMMetrics());
 
-      vm.invoke(() -> CacheManagementDUnitTest.fetchJVMMetrics());
+      OSMetrics osMetrics = memberVM.invoke(() -> fetchOSMetrics());
 
-      vm.invoke(() -> CacheManagementDUnitTest.fetchOSMetrics());
+      // TODO: need assertions
 
-      vm.invoke(() -> CacheManagementDUnitTest.shutDownMember());
+      memberVM.invoke(() -> shutDownMember());
     }
 
-    VM managingNode = getManagingNode();
-    Object[] args = new Object[1];
-    args[0] = 1;// Only locator member wont be shutdown
-    managingNode.invoke(CacheManagementDUnitTest.class, "assertExpectedMembers", args);
+    this.managerVM.invoke(() -> verifyExpectedMembers(0));
   }
 
   /**
@@ -130,263 +156,254 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
    */
   @Test
   public void testMemberMBeanOpsRemote() throws Exception {
-    initManagement(false);
-    getManagingNode().invoke(() -> CacheManagementDUnitTest.invokeRemoteOps());
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
+    this.managerVM.invoke(() -> invokeRemoteMemberMXBeanOps());
   }
 
   /**
-   * Creates and starts a manager.
+   * Creates and starts a managerVM.
    * Multiple Managers
    */
   @Test
   public void testManager() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
 
-    createCache(node1);
-    createCache(node2);
-    createManagementCache(node3);
+    this.managementTestRule.createManager(this.memberVMs[2], false);
 
-    // Only creates a cache in Managing Node
-    // Does not start the manager
-    createManagementCache(managingNode);
+    this.managementTestRule.createManager(this.managerVM, false);
 
-    node3.invoke(() -> CacheManagementDUnitTest.startManager());
+    this.memberVMs[2].invoke(() -> startManager());
 
-    // Now start Managing node manager. System will have two Managers now which
+    // Now start Managing node managerVM. System will have two Managers now which
     // should be OK
-    DistributedMember member = getMember(node3);
-    startManagingNode(managingNode);
-    checkManagerView(managingNode, member);
-    stopManagingNode(managingNode);
+    DistributedMember member = this.managementTestRule.getDistributedMember(this.memberVMs[2]);
+    this.managementTestRule.startManager(this.managerVM);
+
+    verifyManagerStarted(this.managerVM, member);
+    this.managementTestRule.stopManager(this.managerVM);
   }
 
   /**
-   * Creates and starts a manager.
+   * Creates and starts a managerVM.
    * Multiple Managers
    */
   @Test
   public void testManagerShutdown() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
+    this.managementTestRule.createMember(this.memberVMs[2]);
 
-    createCache(node1);
-    createCache(node2);
-    createCache(node3);
+    this.managementTestRule.createManager(this.managerVM, false);
+    this.managementTestRule.startManager(this.managerVM);
 
-    // Only creates a cache in Managing Node
-    // Does not start the manager
-    createManagementCache(managingNode);
+    verifyManagerStarted(this.managerVM, this.managementTestRule.getDistributedMember(this.memberVMs[0]));
 
-    startManagingNode(managingNode);
-    DistributedMember member = getMember(managingNode);
-    checkManagerView(managingNode, member);
-    stopManagingNode(managingNode);
-    checkNonManagerView(managingNode);
+    this.managementTestRule.stopManager(this.managerVM);
+    verifyManagerStopped(this.managerVM, this.memberVMs.length);
   }
 
   @Test
-  public void testServiceCloseManagedNode() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
+  public void closeCacheShouldStopLocalManager() throws Exception {
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
 
-    createCache(node1);
-    createCache(node2);
-    createManagementCache(node3);
+    this.managementTestRule.createManager(this.memberVMs[2], false);
 
     // Only creates a cache in Managing Node
-    // Does not start the manager
-    createManagementCache(managingNode);
-
-    node3.invoke(() -> CacheManagementDUnitTest.startManager());
-
-    closeCache(node3);
-    validateServiceResource(node3);
+    // Does not start the managerVM
+    this.managementTestRule.createManager(this.managerVM, false);
+
+    this.memberVMs[2].invoke(() -> startManager());
+
+    this.memberVMs[2].invoke(() -> {
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
+      LocalManager localManager = service.getLocalManager();
+      this.managementTestRule.getCache().close();
+      assertThat(localManager.isRunning()).isFalse();
+      assertThat(service.isManager()).isFalse();
+      assertThat(service.getLocalManager()).isNull();
+    });
   }
 
   @Test
   public void testGetMBean() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
+    this.managementTestRule.createMember(this.memberVMs[2]);
 
-    createCache(node1);
-    createCache(node2);
-    createCache(node3);
+    this.managementTestRule.createManager(this.managerVM, false);
 
-    createManagementCache(managingNode);
+    this.managementTestRule.startManager(this.managerVM);
 
-    startManagingNode(managingNode);
-
-    checkGetMBean(managingNode);
+    verifyGetMBeanInstance(this.managerVM);
   }
 
   @Test
   public void testQueryMBeans() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
-
-    createCache(node1);
-    createCache(node2);
-    createCache(node3);
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
+    this.managementTestRule.createMember(this.memberVMs[2]);
 
-    createManagementCache(managingNode);
+    this.managementTestRule.createManager(this.managerVM, false);
 
-    startManagingNode(managingNode);
+    this.managementTestRule.startManager(this.managerVM);
 
-    checkQueryMBeans(managingNode);
+    verifyQueryMBeans(this.managerVM);
   }
 
-  protected void checkQueryMBeans(final VM vm) {
-    SerializableRunnable validateServiceResource = new SerializableRunnable("Check Query MBeans") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-
-        Set<DistributedMember> otherMembers = cache.getDistributionManager().getOtherNormalDistributionManagerIds();
-
-        Set<ObjectName> superSet = new HashSet<ObjectName>();
+  @Test
+  public void testNotification() throws Exception {
+    // Step : 1 : Create Managed Node Caches
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
+    this.managementTestRule.createMember(this.memberVMs[2]);
 
-        for (DistributedMember member : otherMembers) {
+    // Step : 2 : Create Managing Node Cache, start managerVM, add a notification
+    // handler to DistributedSystemMXBean
+    this.managementTestRule.createManager(this.managerVM, false);
+    this.managementTestRule.startManager(this.managerVM);
+    attachListenerToDistributedSystemMXBean(this.managerVM);
 
-          ObjectName memberMBeanName = managementService.getMemberMBeanName(member);
+    // Step : 3 : Verify Notification count, notification region sizes
+    verifyNotificationsAndRegionSize(this.memberVMs[0], this.memberVMs[1], this.memberVMs[2], this.managerVM);
+  }
 
-          waitForProxy(memberMBeanName, MemberMXBean.class);
-          Set<ObjectName> names = managementService.queryMBeanNames(member);
-          superSet.addAll(names);
-          assertTrue(names.contains(memberMBeanName));
+  @Test
+  public void testNotificationManagingNodeFirst() throws Exception {
+    // Step : 1 : Create Managing Node Cache, start managerVM, add a notification
+    // handler to DistributedSystemMXBean
+    this.managementTestRule.createManager(this.managerVM, false);
+    this.managementTestRule.startManager(this.managerVM);
 
-        }
+    attachListenerToDistributedSystemMXBean(this.managerVM);
 
-        Set<ObjectName> names = managementService.queryMBeanNames(cache.getDistributedSystem().getDistributedMember());
-        assertTrue(!superSet.contains(names));
-      }
-    };
-    vm.invoke(validateServiceResource);
+    // Step : 2 : Create Managed Node Caches
+    this.managementTestRule.createMember(this.memberVMs[0]);
+    this.managementTestRule.createMember(this.memberVMs[1]);
+    this.managementTestRule.createMember(this.memberVMs[2]);
 
+    // Step : 3 : Verify Notification count, notification region sizes
+    verifyNotificationsAndRegionSize(this.memberVMs[0], this.memberVMs[1], this.memberVMs[2], this.managerVM);
   }
 
-  protected void checkGetMBean(final VM vm) {
-    SerializableRunnable validateServiceResource = new SerializableRunnable("Check Get MBean") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMembers = cache.getDistributionManager().getOtherNormalDistributionManagerIds();
+  @Test
+  public void testRedundancyZone() throws Exception {
+    String redundancyZone = "ARMY_ZONE";
+
+    Properties props = new Properties();
+    props.setProperty(REDUNDANCY_ZONE, redundancyZone);
 
-        for (DistributedMember member : otherMembers) {
+    this.managementTestRule.createMember(this.memberVMs[0], props);
 
-          ObjectName memberMBeanName = managementService.getMemberMBeanName(member);
+    this.memberVMs[0].invoke("verifyRedundancyZone", () -> {
+      ManagementService service = this.managementTestRule.getExistingManagementService();
+      MemberMXBean memberMXBean = service.getMemberMXBean();
+      assertThat(memberMXBean.getRedundancyZone()).isEqualTo(redundancyZone);
+    });
+  }
 
-          waitForProxy(memberMBeanName, MemberMXBean.class);
+  private void verifyQueryMBeans(final VM vm) {
+    vm.invoke("validateQueryMBeans", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
+      Set<ObjectName> superSet = new HashSet<>();
 
-          MemberMXBean bean = managementService.getMBeanInstance(memberMBeanName, MemberMXBean.class);
-          assertNotNull(bean);
-        }
+      for (DistributedMember member : otherMembers) {
+        ObjectName memberMBeanName = service.getMemberMBeanName(member);
 
-        DistributedMember thisMember = cache.getDistributedSystem().getDistributedMember();
-        ObjectName memberMBeanName = managementService.getMemberMBeanName(thisMember);
-        MemberMXBean bean = managementService.getMBeanInstance(memberMBeanName, MemberMXBean.class);
-        assertNotNull(bean);
+        awaitMemberMXBeanProxy(member);
 
+        Set<ObjectName> objectNames = service.queryMBeanNames(member);
+        superSet.addAll(objectNames);
+        assertThat(objectNames.contains(memberMBeanName)).isTrue();
       }
-    };
-    vm.invoke(validateServiceResource);
+
+      Set<ObjectName> names = service.queryMBeanNames(this.managementTestRule.getDistributedMember());
+      ObjectName[] arrayOfNames = names.toArray(new ObjectName[names.size()]);
+
+      assertThat(superSet).doesNotContain(arrayOfNames); // TODO: what value does this method have?
+    });
   }
 
-  protected void validateServiceResource(final VM vm) {
-    SerializableRunnable validateServiceResource = new SerializableRunnable("Valideate Management Service Resource") {
-      public void run() {
+  private void verifyGetMBeanInstance(final VM vm) {
+    vm.invoke("verifyGetMBeanInstance", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
+
+      for (DistributedMember member : otherMembers) {
+        ObjectName memberMBeanName = service.getMemberMBeanName(member);
 
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        assertNull(cache);
-        assertFalse(managementService.isManager());
+        awaitMemberMXBeanProxy(member);
 
-        SystemManagementService service = (SystemManagementService) managementService;
-        assertNull(service.getLocalManager());
+        MemberMXBean memberMXBean = service.getMBeanInstance(memberMBeanName, MemberMXBean.class);
+        assertThat(memberMXBean).isNotNull();
       }
-    };
-    vm.invoke(validateServiceResource);
+
+      DistributedMember distributedMember = this.managementTestRule.getDistributedMember();
+      ObjectName memberMBeanName = service.getMemberMBeanName(distributedMember);
+      MemberMXBean memberMXBean = service.getMBeanInstance(memberMBeanName, MemberMXBean.class);
+      assertThat(memberMXBean).isNotNull();
+    });
   }
 
-  /**
-   * Creates a Distributed Region
-   */
-  protected AsyncInvocation checkManagerView(final VM vm, final DistributedMember oneManager) {
-    SerializableRunnable createRegion = new SerializableRunnable("Check Manager View") {
-      public void run() {
-
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        ObjectName memberMBeanName = service.getMemberMBeanName(oneManager);
-        MemberMXBean bean = service.getMBeanProxy(memberMBeanName, MemberMXBean.class);
-        assertNotNull(bean);
-        //Ensure Data getting federated from Managing node
-        long t1 = bean.getMemberUpTime();
-        try {
-          this.wait(ManagementConstants.REFRESH_TIME * 3);
-        } catch (InterruptedException e) {
-          fail("interrupted");
-        }
-        long t2 = bean.getMemberUpTime();
+  private void verifyManagerStarted(final VM managerVM, final DistributedMember otherMember) {
+    managerVM.invoke("verifyManagerStarted", () -> {
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
+      assertThat(service.isManager()).isTrue();
 
-        assertTrue(t2 > t1);
+      assertThat(service.getLocalManager().isRunning()).isTrue();
 
-      }
-    };
-    return vm.invokeAsync(createRegion);
+      assertThat(service.getLocalManager().getFederationSheduler().isShutdown()).isFalse();
+
+      ObjectName memberMBeanName = service.getMemberMBeanName(otherMember);
+
+      await().until(() -> assertThat(service.getMBeanProxy(memberMBeanName, MemberMXBean.class)).isNotNull());
+      MemberMXBean memberMXBean = service.getMBeanProxy(memberMBeanName, MemberMXBean.class);
+
+      //Ensure Data getting federated from Managing node
+      long start = memberMXBean.getMemberUpTime();
+      await().until(() -> assertThat(memberMXBean.getMemberUpTime()).isGreaterThan(start));
+    });
   }
 
   /**
    * Add any Manager clean up asserts here
    */
-  protected void checkNonManagerView(final VM vm) {
-    SerializableRunnable checkNonManagerView = new SerializableRunnable("Check Non Manager View") {
-      public void run() {
-
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        assertNotNull(cache);
-        assertFalse(managementService.isManager());
-
-        SystemManagementService service = (SystemManagementService) managementService;
-        assertTrue(service.getLocalManager().isRunning());
-        assertFalse(service.getLocalManager().getFederationSheduler().isShutdown());
-
-        // Check for Proxies
-        Set<DistributedMember> otherMembers = cache.getDistributionManager().getOtherNormalDistributionManagerIds();
-        assertTrue(otherMembers.size() > 0);
-        for (DistributedMember member : otherMembers) {
-          Set<ObjectName> proxyNames = service.getFederatingManager().getProxyFactory().findAllProxies(member);
-          assertTrue(proxyNames.isEmpty());
-          ObjectName proxyMBeanName = service.getMemberMBeanName(member);
-          assertFalse(MBeanJMXAdapter.mbeanServer.isRegistered(proxyMBeanName));
-        }
+  private void verifyManagerStopped(final VM managerVM, final int otherMembersCount) {
+    managerVM.invoke("verifyManagerStopped", () -> {
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
+
+      assertThat(service.isManager()).isFalse();
+      assertThat(service.getLocalManager().isRunning()).isTrue();
+      assertThat(service.getLocalManager().getFederationSheduler().isShutdown()).isFalse();
 
+      // Check for Proxies
+      Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
+      assertThat(otherMembers).hasSize(otherMembersCount);
+
+      for (DistributedMember member : otherMembers) {
+        Set<ObjectName> proxyNames = service.getFederatingManager().getProxyFactory().findAllProxies(member);
+        assertThat(proxyNames).isEmpty();
+
+        ObjectName proxyMBeanName = service.getMemberMBeanName(member);
+        assertThat(MBeanJMXAdapter.mbeanServer.isRegistered(proxyMBeanName)).isFalse();
       }
-    };
-    vm.invoke(checkNonManagerView);
+    });
   }
 
-  public static Map<DistributedMember, DistributionConfig> verifyConfigData() {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    ManagementService service = getManagementService();
-    DistributionConfig config = ((InternalDistributedSystem) cache.getDistributedSystem()).getConfig();
+  private Map<DistributedMember, DistributionConfig> verifyConfigData() {
+    ManagementService service = this.managementTestRule.getManagementService();
+    InternalDistributedSystem ids = (InternalDistributedSystem) this.managementTestRule.getCache().getDistributedSystem();
+    DistributionConfig config = ids.getConfig();
+
     MemberMXBean bean = service.getMemberMXBean();
     GemFireProperties data = bean.listGemFireProperties();
-    assertConfigEquals(config, data);
-    Map<DistributedMember, DistributionConfig> configMap = new HashMap<DistributedMember, DistributionConfig>();
-    configMap.put(cache.getMyId(), config);
+    verifyGemFirePropertiesData(config, data);
+
+    Map<DistributedMember, DistributionConfig> configMap = new HashMap<>();
+    configMap.put(ids.getDistributedMember(), config);
     return configMap;
   }
 
@@ -394,15 +411,15 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
    * This is to check whether the config data has been propagated to the
    * Managing node properly or not.
    */
-  public static void verifyConfigDataRemote(Map<DistributedMember, DistributionConfig> configMap) throws Exception {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    Set<DistributedMember> otherMemberSet = cache.getDistributionManager().getOtherNormalDistributionManagerIds();
+  private void verifyConfigDataRemote(final Map<DistributedMember, DistributionConfig> configMap) throws Exception {
+    Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
+
+    for (DistributedMember member : otherMembers) {
+      MemberMXBean memberMXBean = awaitMemberMXBeanProxy(member);
 
-    for (DistributedMember member : otherMemberSet) {
-      MemberMXBean bean = MBeanUtil.getMemberMbeanProxy(member);
-      GemFireProperties data = bean.listGemFireProperties();
+      GemFireProperties data = memberMXBean.listGemFireProperties();
       DistributionConfig config = configMap.get(member);
-      assertConfigEquals(config, data);
+      verifyGemFirePropertiesData(config, data);
     }
   }
 
@@ -410,358 +427,238 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
    * Asserts that distribution config and gemfireProperty composite types hold
    * the same values
    */
-  public static void assertConfigEquals(DistributionConfig config, GemFireProperties data) {
+  private void verifyGemFirePropertiesData(final DistributionConfig config, final GemFireProperties data) {
+    assertThat(data.getMemberName()).isEqualTo(config.getName());
 
-    assertEquals(data.getMemberName(), config.getName());
     // **TODO **
     String memberGroups = null;
 
-    assertEquals(data.getMcastPort(), config.getMcastPort());
-    assertEquals(data.getMcastAddress(), config.getMcastAddress().getHostAddress());
-    assertEquals(data.getBindAddress(), config.getBindAddress());
-    assertEquals(data.getTcpPort(), config.getTcpPort());
-    assertEquals(removeVMDir(data.getCacheXMLFile()), removeVMDir(config.getCacheXmlFile().getAbsolutePath()));
+    assertThat(data.getMcastPort()).isEqualTo(config.getMcastPort());
+    assertThat(data.getMcastAddress()).isEqualTo(config.getMcastAddress().getHostAddress());
+    assertThat(data.getBindAddress()).isEqualTo(config.getBindAddress());
+    assertThat(data.getTcpPort()).isEqualTo(config.getTcpPort());
+    assertThat(removeVMDir(data.getCacheXMLFile())).isEqualTo(removeVMDir(config.getCacheXmlFile().getAbsolutePath()));
+
     // **TODO **
-    assertEquals(data.getMcastTTL(), config.getMcastTtl());
-    assertEquals(data.getServerBindAddress(), config.getServerBindAddress());
-    assertEquals(data.getLocators(), config.getLocators());
+    assertThat(data.getMcastTTL()).isEqualTo(config.getMcastTtl());
+    assertThat(data.getServerBindAddress()).isEqualTo(config.getServerBindAddress());
+    assertThat(data.getLocators()).isEqualTo(config.getLocators());
+
     //The start locator may contain a directory
-    assertEquals(removeVMDir(data.getStartLocator()), removeVMDir(config.getStartLocator()));
-    assertEquals(removeVMDir(data.getLogFile()), removeVMDir(config.getLogFile().getAbsolutePath()));
-    assertEquals(data.getLogLevel(), config.getLogLevel());
-    assertEquals(data.isStatisticSamplingEnabled(), config.getStatisticSamplingEnabled());
-    assertEquals(removeVMDir(data.getStatisticArchiveFile()), removeVMDir(config.getStatisticArchiveFile().getAbsolutePath()));
+    assertThat(removeVMDir(data.getStartLocator())).isEqualTo(removeVMDir(config.getStartLocator()));
+    assertThat(removeVMDir(data.getLogFile())).isEqualTo(removeVMDir(config.getLogFile().getAbsolutePath()));
+    assertThat(data.getLogLevel()).isEqualTo(config.getLogLevel());
+    assertThat(data.isStatisticSamplingEnabled()).isEqualTo(config.getStatisticSamplingEnabled());
+    assertThat(removeVMDir(data.getStatisticArchiveFile())).isEqualTo(removeVMDir(config.getStatisticArchiveFile().getAbsolutePath()));
+
     // ** TODO **
     String includeFile = null;
-    assertEquals(data.getAckWaitThreshold(), config.getAckWaitThreshold());
-    assertEquals(data.getAckSevereAlertThreshold(), config.getAckSevereAlertThreshold());
-    assertEquals(data.getArchiveFileSizeLimit(), config.getArchiveFileSizeLimit());
-    assertEquals(data.getArchiveDiskSpaceLimit(), config.getArchiveDiskSpaceLimit());
-    assertEquals(data.getLogFileSizeLimit(), config.getLogFileSizeLimit());
-    assertEquals(data.getLogDiskSpaceLimit(), config.getLogDiskSpaceLimit());
-    assertEquals(data.isClusterSSLEnabled(), config.getClusterSSLEnabled());
-
-    assertEquals(data.getClusterSSLCiphers(), config.getClusterSSLCiphers());
-    assertEquals(data.getClusterSSLProtocols(), config.getClusterSSLProtocols());
-    assertEquals(data.isClusterSSLRequireAuthentication(), config.getClusterSSLRequireAuthentication());
-    assertEquals(data.getSocketLeaseTime(), config.getSocketLeaseTime());
-    assertEquals(data.getSocketBufferSize(), config.getSocketBufferSize());
-    assertEquals(data.getMcastSendBufferSize(), config.getMcastSendBufferSize());
-    assertEquals(data.getMcastRecvBufferSize(), config.getMcastRecvBufferSize());
-    assertEquals(data.getMcastByteAllowance(), config.getMcastFlowControl().getByteAllowance());
-    assertEquals(data.getMcastRechargeThreshold(), config.getMcastFlowControl().getRechargeThreshold(), 0);
-    assertEquals(data.getMcastRechargeBlockMs(), config.getMcastFlowControl().getRechargeBlockMs());
-    assertEquals(data.getUdpFragmentSize(), config.getUdpFragmentSize());
-    assertEquals(data.getUdpSendBufferSize(), config.getUdpSendBufferSize());
-    assertEquals(data.getUdpRecvBufferSize(), config.getUdpRecvBufferSize());
-    assertEquals(data.isDisableTcp(), config.getDisableTcp());
-    assertEquals(data.isEnableTimeStatistics(), config.getEnableTimeStatistics());
-    assertEquals(data.isEnableNetworkPartitionDetection(), config.getEnableNetworkPartitionDetection());
-    assertEquals(data.getMemberTimeout(), config.getMemberTimeout());
-
-    int[] configPortRange = config.getMembershipPortRange();
-    int[] dataPortRange = data.getMembershipPortRange();
-
-    assertEquals(dataPortRange.length, configPortRange.length);
-    for (int i = 0; i < dataPortRange.length; i++) {
-      assertEquals(dataPortRange[i], configPortRange[i]);
-    }
-    assertEquals(data.isConserveSockets(), config.getConserveSockets());
-    assertEquals(data.getRoles(), config.getRoles());
-    assertEquals(data.getMaxWaitTimeForReconnect(), config.getMaxWaitTimeForReconnect());
-    assertEquals(data.getMaxNumReconnectTries(), config.getMaxNumReconnectTries());
-    assertEquals(data.getAsyncDistributionTimeout(), config.getAsyncDistributionTimeout());
-    assertEquals(data.getAsyncQueueTimeout(), config.getAsyncQueueTimeout());
-    assertEquals(data.getAsyncMaxQueueSize(), config.getAsyncMaxQueueSize());
-    assertEquals(data.getClientConflation(), config.getClientConflation());
-    assertEquals(data.getDurableClientId(), config.getDurableClientId());
-    assertEquals(data.getDurableClientTimeout(), config.getDurableClientTimeout());
-    assertEquals(data.getSecurityClientAuthInit(), config.getSecurityClientAuthInit());
-    assertEquals(data.getSecurityClientAuthenticator(), config.getSecurityClientAuthenticator());
-    assertEquals(data.getSecurityClientDHAlgo(), config.getSecurityClientDHAlgo());
-    assertEquals(data.getSecurityPeerAuthInit(), config.getSecurityPeerAuthInit());
-    assertEquals(data.getSecurityClientAuthenticator(), config.getSecurityPeerAuthenticator());
-    assertEquals(data.getSecurityClientAccessor(), config.getSecurityClientAccessor());
-    assertEquals(data.getSecurityClientAccessorPP(), config.getSecurityClientAccessorPP());
-    assertEquals(data.getSecurityLogLevel(), config.getSecurityLogLevel());
-    assertEquals(removeVMDir(data.getSecurityLogFile()), removeVMDir(config.getSecurityLogFile().getAbsolutePath()));
-    assertEquals(data.getSecurityPeerMembershipTimeout(), config.getSecurityPeerMembershipTimeout());
-    assertEquals(data.isRemoveUnresponsiveClient(), config.getRemoveUnresponsiveClient());
-    assertEquals(data.isDeltaPropagation(), config.getDeltaPropagation());
-    assertEquals(data.getRedundancyZone(), config.getRedundancyZone());
-    assertEquals(data.isEnforceUniqueHost(), config.getEnforceUniqueHost());
-    assertEquals(data.getStatisticSampleRate(), config.getStatisticSampleRate());
+    assertThat(data.getAckWaitThreshold()).isEqualTo(config.getAckWaitThreshold());
+    assertThat(data.getAckSevereAlertThreshold()).isEqualTo(config.getAckSevereAlertThreshold());
+    assertThat(data.getArchiveFileSizeLimit()).isEqualTo(config.getArchiveFileSizeLimit());
+    assertThat(data.getArchiveDiskSpaceLimit()).isEqualTo(config.getArchiveDiskSpaceLimit());
+    assertThat(data.getLogFileSizeLimit()).isEqualTo(config.getLogFileSizeLimit());
+    assertThat(data.getLogDiskSpaceLimit()).isEqualTo(config.getLogDiskSpaceLimit());
+    assertThat(data.isClusterSSLEnabled()).isEqualTo(config.getClusterSSLEnabled());
+
+    assertThat(data.getClusterSSLCiphers()).isEqualTo(config.getClusterSSLCiphers());
+    assertThat(data.getClusterSSLProtocols()).isEqualTo(config.getClusterSSLProtocols());
+    assertThat(data.isClusterSSLRequireAuthentication()).isEqualTo(config.getClusterSSLRequireAuthentication());
+    assertThat(data.getSocketLeaseTime()).isEqualTo(config.getSocketLeaseTime());
+    assertThat(data.getSocketBufferSize()).isEqualTo(config.getSocketBufferSize());
+    assertThat(data.getMcastSendBufferSize()).isEqualTo(config.getMcastSendBufferSize());
+    assertThat(data.getMcastRecvBufferSize()).isEqualTo(config.getMcastRecvBufferSize());
+    assertThat(data.getMcastByteAllowance()).isEqualTo(config.getMcastFlowControl().getByteAllowance());
+    assertThat(data.getMcastRechargeThreshold()).isEqualTo(config.getMcastFlowControl().getRechargeThreshold());
+    assertThat(data.getMcastRechargeBlockMs()).isEqualTo(config.getMcastFlowControl().getRechargeBlockMs());
+    assertThat(data.getUdpFragmentSize()).isEqualTo(config.getUdpFragmentSize());
+    assertThat(data.getUdpSendBufferSize()).isEqualTo(config.getUdpSendBufferSize());
+    assertThat(data.getUdpRecvBufferSize()).isEqualTo(config.getUdpRecvBufferSize());
+    assertThat(data.isDisableTcp()).isEqualTo(config.getDisableTcp());
+    assertThat(data.isEnableTimeStatistics()).isEqualTo(config.getEnableTimeStatistics());
+    assertThat(data.isEnableNetworkPartitionDetection()).isEqualTo(config.getEnableNetworkPartitionDetection());
+    assertThat(data.getMemberTimeout()).isEqualTo(config.getMemberTimeout());
+
+    assertThat(data.getMembershipPortRange()).containsExactly(config.getMembershipPortRange());
+
+    assertThat(data.isConserveSockets()).isEqualTo(config.getConserveSockets());
+    assertThat(data.getRoles()).isEqualTo(config.getRoles());
+    assertThat(data.getMaxWaitTimeForReconnect()).isEqualTo(config.getMaxWaitTimeForReconnect());
+    assertThat(data.getMaxNumReconnectTries()).isEqualTo(config.getMaxNumReconnectTries());
+    assertThat(data.getAsyncDistributionTimeout()).isEqualTo(config.getAsyncDistributionTimeout());
+    assertThat(data.getAsyncMaxQueueSize()).isEqualTo(config.getAsyncMaxQueueSize());
+    assertThat(data.getClientConflation()).isEqualTo(config.getClientConflation());
+    assertThat(data.getDurableClientId()).isEqualTo(config.getDurableClientId());
+    assertThat(data.getDurableClientTimeout()).isEqualTo(config.getDurableClientTimeout());
+    assertThat(data.getSecurityClientAuthInit()).isEqualTo(config.getSecurityClientAuthInit());
+    assertThat(data.getSecurityClientAuthenticator()).isEqualTo(config.getSecurityClientAuthenticator());
+    assertThat(data.getSecurityClientDHAlgo()).isEqualTo(config.getSecurityClientDHAlgo());
+    assertThat(data.getSecurityPeerAuthInit()).isEqualTo(config.getSecurityPeerAuthInit());
+    assertThat(data.getSecurityClientAuthenticator()).isEqualTo(config.getSecurityPeerAuthenticator());
+    assertThat(data.getSecurityClientAccessor()).isEqualTo(config.getSecurityClientAccessor());
+    assertThat(data.getSecurityClientAccessorPP()).isEqualTo(config.getSecurityClientAccessorPP());
+    assertThat(data.getSecurityLogLevel()).isEqualTo(config.getSecurityLogLevel());
+    assertThat(removeVMDir(data.getSecurityLogFile())).isEqualTo(removeVMDir(config.getSecurityLogFile().getAbsolutePath()));
+    assertThat(data.getSecurityPeerMembershipTimeout()).isEqualTo(config.getSecurityPeerMembershipTimeout());
+    assertThat(data.isRemoveUnresponsiveClient()).isEqualTo(config.getRemoveUnresponsiveClient());
+    assertThat(data.isDeltaPropagation()).isEqualTo(config.getDeltaPropagation());
+    assertThat(data.getRedundancyZone()).isEqualTo(config.getRedundancyZone());
+    assertThat(data.isEnforceUniqueHost()).isEqualTo(config.getEnforceUniqueHost());
+    assertThat(data.getStatisticSampleRate()).isEqualTo(config.getStatisticSampleRate());
   }
 
   private static String removeVMDir(String string) {
     return string.replaceAll("vm.", "");
   }
 
-  public static void startManager() {
-    MemberMXBean bean = getManagementService().getMemberMXBean();
-    // When the cache is created if jmx-manager is true then we create the manager.
-    // So it may already exist when we get here.
-    if (!bean.isManagerCreated()) {
-      if (!bean.createManager()) {
-        fail("Could not create Manager");
-      } else if (!bean.isManagerCreated()) {
-        fail("Should have been a manager after createManager returned true.");
-      }
+  private void startManager() throws JMException {
+    ManagementService service = this.managementTestRule.getManagementService();
+    MemberMXBean memberMXBean = service.getMemberMXBean();
+    if (memberMXBean.isManagerCreated()) {
+      return;
     }
-    ManagerMXBean mngrBean = getManagementService().getManagerMXBean();
-    try {
-      mngrBean.start();
-    } catch (JMException e) {
-      fail("Could not start Manager " + e);
-    }
-    assertTrue(mngrBean.isRunning());
-    assertTrue(getManagementService().isManager());
-    assertTrue(bean.isManager());
-  }
-
-  public static void isManager() {
-    MemberMXBean bean = getManagementService().getMemberMXBean();
-    if (bean.createManager()) {
-      ManagerMXBean mngrBean = getManagementService().getManagerMXBean();
-      try {
-        mngrBean.start();
-      } catch (JMException e) {
-        fail("Could not start Manager " + e);
-      }
-    } else {
-      fail(" Could not create Manager");
-    }
-  }
 
-  public static String fetchLog() {
-    MemberMXBean bean = getManagementService().getMemberMXBean();
-    String log = bean.showLog(30);
-    return log;
-  }
-
-  public static void fetchJVMMetrics() {
-    MemberMXBean bean = getManagementService().getMemberMXBean();
-    JVMMetrics metrics = bean.showJVMMetrics();
+    // TODO: cleanup this mess
+    // When the cache is created if jmx-managerVM is true then we create the managerVM.
+    // So it may already exist when we get here.
 
-    LogWriterUtils.getLogWriter().info("<ExpectedString> JVMMetrics is " + metrics.toString() + "</ExpectedString> ");
-  }
+    assertThat(memberMXBean.createManager()).isTrue();
+    assertThat(memberMXBean.isManagerCreated()).isTrue();
 
-  public static void fetchOSMetrics() {
-    MemberMXBean bean = getManagementService().getMemberMXBean();
-    OSMetrics metrics = bean.showOSMetrics();
+    ManagerMXBean managerMXBean = service.getManagerMXBean();
+    managerMXBean.start();
 
-    LogWriterUtils.getLogWriter().info("<ExpectedString> OSMetrics is " + metrics.toString() + "</ExpectedString> ");
+    assertThat(managerMXBean.isRunning()).isTrue();
+    assertThat(memberMXBean.isManager()).isTrue();
+    assertThat(service.isManager()).isTrue();
   }
 
-  public static void shutDownMember() {
-    MemberMXBean bean = getManagementService().getMemberMXBean();
-    bean.shutDownMember();
+  private String fetchLog(final int numberOfLines) {
+    ManagementService service = this.managementTestRule.getManagementService();
+    MemberMXBean memberMXBean = service.getMemberMXBean();
+    return memberMXBean.showLog(numberOfLines);
   }
 
-  public static void assertExpectedMembers(int expectedMemberCount) {
-    Wait.waitForCriterion(new WaitCriterion() {
-      public String description() {
-        return "Waiting all nodes to shutDown";
-      }
-
-      public boolean done() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> setOfOtherMembers = cache.getDistributedSystem().getAllOtherMembers();
-        boolean done = (setOfOtherMembers != null && setOfOtherMembers.size() == 1);
-        return done;
-      }
-
-    }, MAX_WAIT, 500, true);
+  private JVMMetrics fetchJVMMetrics() {
+    ManagementService service = this.managementTestRule.getManagementService();
+    MemberMXBean memberMXBean = service.getMemberMXBean();
+    JVMMetrics metrics = memberMXBean.showJVMMetrics();
+    return metrics;
   }
 
-  public static void invokeRemoteOps() throws Exception {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    Set<DistributedMember> otherMemberSet = cache.getDistributionManager().getOtherNormalDistributionManagerIds();
-
-    for (DistributedMember member : otherMemberSet) {
-      MemberMXBean bean = MBeanUtil.getMemberMbeanProxy(member);
-      JVMMetrics metrics = bean.showJVMMetrics();
-
-      LogWriterUtils.getLogWriter().info("<ExpectedString> JVMMetrics is " + metrics.toString() + "</ExpectedString> ");
-      LogWriterUtils.getLogWriter().info("<ExpectedString> OSMetrics is " + metrics.toString() + "</ExpectedString> ");
-
-      LogWriterUtils.getLogWriter().info("<ExpectedString> Boolean Data Check " + bean.isManager() + "</ExpectedString> ");
-    }
+  private OSMetrics fetchOSMetrics() {
+    ManagementService service = this.managementTestRule.getManagementService();
+    MemberMXBean memberMXBean = service.getMemberMXBean();
+    OSMetrics metrics = memberMXBean.showOSMetrics();
+    return metrics;
   }
 
-  @Test
-  public void testNotification() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
-
-    // Step : 1 : Create Managed Node Caches
-    createCache(node1);
-    createCache(node2);
-    createCache(node3);
-
-    // Step : 2 : Create Managing Node Cache, start manager, add a notification
-    // handler to DistributedSystemMXBean
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    attchListenerToDSMBean(managingNode);
-
-    // Step : 3 : Verify Notification count, notification region sizes
-    countNotificationsAndCheckRegionSize(node1, node2, node3, managingNode);
+  private void shutDownMember() {
+    ManagementService service = this.managementTestRule.getManagementService();
+    MemberMXBean memberMXBean = service.getMemberMXBean();
+    memberMXBean.shutDownMember();
   }
 
-  @Test
-  public void testNotificationManagingNodeFirst() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    VM managingNode = getManagingNode();
-
-    // Step : 1 : Create Managing Node Cache, start manager, add a notification
-    // handler to DistributedSystemMXBean
-    createManagementCache(managingNode);
-    startManagingNode(managingNode);
-    attchListenerToDSMBean(managingNode);
-
-    // Step : 2 : Create Managed Node Caches
-    createCache(node1);
-    createCache(node2);
-    createCache(node3);
-
-    // Step : 3 : Verify Notification count, notification region sizes
-    countNotificationsAndCheckRegionSize(node1, node2, node3, managingNode);
+  private void verifyExpectedMembers(final int otherMembersCount) {
+    String alias = "awaiting " + this.managementTestRule.getOtherNormalMembers() + " to have size " + otherMembersCount;
+    await(alias).until(() -> assertThat(this.managementTestRule.getOtherNormalMembers()).hasSize(otherMembersCount));
   }
 
-  @Test
-  public void testRedundancyZone() throws Exception {
-    List<VM> managedNodeList = getManagedNodeList();
-    VM node1 = managedNodeList.get(0);
-    VM node2 = managedNodeList.get(1);
-    VM node3 = managedNodeList.get(2);
-    Properties props = new Properties();
-    props.setProperty(REDUNDANCY_ZONE, "ARMY_ZONE");
+  private void invokeRemoteMemberMXBeanOps() throws Exception {
+    Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
 
-    createCache(node1, props);
+    for (DistributedMember member : otherMembers) {
+      MemberMXBean memberMXBean = awaitMemberMXBeanProxy(member);
 
-    node1.invoke(new SerializableRunnable("Assert Redundancy Zone") {
+      JVMMetrics metrics = memberMXBean.showJVMMetrics();
 
-      public void run() {
-        ManagementService service = ManagementService.getExistingManagementService(getCache());
-        MemberMXBean bean = service.getMemberMXBean();
-        assertEquals("ARMY_ZONE", bean.getRedundancyZone());
-      }
-    });
-  }
+      String value = metrics.toString();
+      boolean isManager = memberMXBean.isManager();
 
-  protected void attchListenerToDSMBean(final VM vm) {
-    SerializableRunnable attchListenerToDSMBean = new SerializableRunnable("Attach Listener to DS MBean") {
-      public void run() {
-        assertTrue(managementService.isManager());
-        DistributedSystemMXBean dsMBean = managementService.getDistributedSystemMXBean();
-
-        // First clear the notification list
-        notifList.clear();
-
-        NotificationListener nt = new NotificationListener() {
-          @Override
-          public void handleNotification(Notification notification, Object handback) {
-            if (notification.getType().equals(JMXNotificationType.REGION_CREATED)) {
-              notifList.add(notification);
-            }
-          }
-        };
-
-        try {
-          mbeanServer.addNotificationListener(MBeanJMXAdapter.getDistributedSystemName(), nt, null, null);
-        } catch (InstanceNotFoundException e) {
-          throw new AssertionError("Failed With Exception ", e);
-        }
+      // TODO: need assertions
 
-      }
-    };
-    vm.invoke(attchListenerToDSMBean);
+      //("<ExpectedString> JVMMetrics is " + metrics.toString() + "</ExpectedString> ");
+      //("<ExpectedString> OSMetrics is " + metrics.toString() + "</ExpectedString> ");
+      //("<ExpectedString> Boolean Data Check " + bean.isManager() + "</ExpectedString> ");
+    }
   }
 
-  public void waitForManagerToRegisterListener() {
-    SystemManagementService service = (SystemManagementService) getManagementService();
-    final Map<ObjectName, NotificationHubListener> hubMap = service.getNotificationHub().getListenerObjectMap();
-
-    Wait.waitForCriterion(new WaitCriterion() {
-      public String description() {
-        return "Waiting for manager to register the listener";
-      }
+  private void attachListenerToDistributedSystemMXBean(final VM managerVM) {
+    managerVM.invoke("attachListenerToDistributedSystemMXBean", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
+      assertThat(service.isManager()).isTrue();
 
-      public boolean done() {
-        boolean done = (1 == hubMap.size());
-        return done;
-      }
+      NotificationListener listener = (final Notification notification, final Object handback) -> {
+        if (notification.getType().equals(JMXNotificationType.REGION_CREATED)) {
+          notifications.add(notification);
+        }
+      };
 
-    }, MAX_WAIT, 500, true);
+      ManagementFactory.getPlatformMBeanServer().addNotificationListener(MBeanJMXAdapter.getDistributedSystemName(), listener, null, null);
+    });
   }
 
-  public void countNotificationsAndCheckRegionSize(VM node1, VM node2, VM node3, VM managingNode) {
-
-    DistributedMember member1 = getMember(node1);
-    DistributedMember member2 = getMember(node2);
-    DistributedMember member3 = getMember(node3);
-
-    final String appender1 = MBeanJMXAdapter.getUniqueIDForMember(member1);
-    final String appender2 = MBeanJMXAdapter.getUniqueIDForMember(member2);
-    final String appender3 = MBeanJMXAdapter.getUniqueIDForMember(member3);
+  private void verifyNotificationsAndRegionSize(final VM memberVM1, final VM memberVM2, final VM memberVM3, final VM managerVM) {
+    DistributedMember member1 = this.managementTestRule.getDistributedMember(memberVM1);
+    DistributedMember member2 = this.managementTestRule.getDistributedMember(memberVM2);
+    DistributedMember member3 = this.managementTestRule.getDistributedMember(memberVM3);
 
-    node1.invoke("Create Regions", () -> createNotifTestRegion(appender1));
-    node2.invoke("Create Regions", () -> createNotifTestRegion(appender2));
-    node3.invoke("Create Regions", () -> createNotifTestRegion(appender3));
+    String memberId1 = MBeanJMXAdapter.getUniqueIDForMember(member1);
+    String memberId2 = MBeanJMXAdapter.getUniqueIDForMember(member2);
+    String memberId3 = MBeanJMXAdapter.getUniqueIDForMember(member3);
 
-    managingNode.invoke(new SerializableRunnable("Validate Notification Count") {
+    memberVM1.invoke("createNotificationRegion", () -> createNotificationRegion(memberId1));
+    memberVM2.invoke("createNotificationRegion", () -> createNotificationRegion(memberId2));
+    memberVM3.invoke("createNotificationRegion", () -> createNotificationRegion(memberId3));
 
-      public void run() {
+    managerVM.invoke("verify notifications size", () -> {
+      await().until(() -> assertThat(notifications.size()).isEqualTo(45));
 
-        Wait.waitForCriterion(new WaitCriterion() {
-          public String description() {
-            return "Waiting for all the RegionCreated notification to reach the manager " + notifList.size();
-          }
+      Cache cache = this.managementTestRule.getCache();
 
-          public boolean done() {
-            boolean done = (45 == notifList.size());
-            return done;
-          }
+      Region region1 = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + memberId1);
+      Region region2 = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + memberId2);
+      Region region3 = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + memberId3);
 
-        }, MAX_WAIT, 500, true);
+      // Even though we got 15 notification only 10 should be there due to
+      // eviction attributes set in notification region
 
-        assertEquals(45, notifList.size());
-        Cache cache = getCache();
-        SystemManagementService service = (SystemManagementService) getManagementService();
+      await().until(() -> assertThat(region1).hasSize(10));
+      await().until(() -> assertThat(region2).hasSize(10));
+      await().until(() -> assertThat(region3).hasSize(10));
+    });
+  }
 
-        Region member1NotifRegion = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + appender1);
-        Region member2NotifRegion = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + appender2);
-        Region member3NotifRegion = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + appender3);
+  private void createNotificationRegion(final String memberId) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    Map<ObjectName, NotificationHubListener> notificationHubListenerMap = service.getNotificationHub().getListenerObjectMap();
 
-        // Even though we got 15 notification only 10 should be there due to
-        // eviction attributes set in notification region
+    await().until(() -> assertThat(notificationHubListenerMap.size()).isEqualTo(1));
 
-        waitAtMost(5, TimeUnit.SECONDS).untilCall(to(member1NotifRegion).size(), equalTo(10));
-        waitAtMost(5, TimeUnit.SECONDS).untilCall(to(member2NotifRegion).size(), equalTo(10));
-        waitAtMost(5, TimeUnit.SECONDS).untilCall(to(member3NotifRegion).size(), equalTo(10));
-      }
-    });
+    RegionFactory regionFactory = this.managementTestRule.getCache().createRegionFactory(RegionShortcut.REPLICATE);
+    for (int i = 1; i <= 15; i++) {
+      regionFactory.create(NOTIFICATION_REGION_NAME + i);
+    }
+    Region region = this.managementTestRule.getCache().getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + memberId);
 
+    assertThat(region).isEmpty();
   }
 
-  private void createNotifTestRegion(final String appender1) {
-    Cache cache = getCache();
+  private MemberMXBean awaitMemberMXBeanProxy(final DistributedMember member) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    ObjectName objectName = service.getMemberMBeanName(member);
 
-    waitForManagerToRegisterListener();
-    RegionFactory rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
-    for (int i = 1; i <= 15; i++) {
-      rf.create("NotifTestRegion_" + i);
-    }
-    Region member1NotifRegion = cache.getRegion(ManagementConstants.NOTIFICATION_REGION + "_" + appender1);
+    String alias = "awaiting MemberMXBean proxy for " + member;
+    await(alias).until(() -> assertThat(service.getMBeanProxy(objectName, MemberMXBean.class)).isNotNull());
 
-    assertEquals(0, member1NotifRegion.size());
+    return service.getMBeanProxy(objectName, MemberMXBean.class);
   }
 
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
+  }
+
+  private ConditionFactory await(final String alias) {
+    return Awaitility.await(alias).atMost(2, MINUTES);
+  }
 }


[29/50] [abbrv] incubator-geode git commit: GEODE-1927: more protection from seeing com.gemstone.gemfire packaged objects

Posted by kl...@apache.org.
GEODE-1927: more protection from seeing com.gemstone.gemfire packaged objects

DeadlockDetector can read an archive of dependencies across the distributed
system.  This adds a small ObjectInputStream modification to its method that
reads these archives to let it handle those created before the package rename.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e130e5b6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e130e5b6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e130e5b6

Branch: refs/heads/feature/GEODE-1930
Commit: e130e5b62bcb2d3c561be90f46407dc49f100b4a
Parents: 56836e5
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Tue Oct 18 15:47:20 2016 -0700
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Tue Oct 18 15:49:59 2016 -0700

----------------------------------------------------------------------
 .../internal/deadlock/DeadlockDetector.java     | 33 ++++++++++++++++++--
 1 file changed, 30 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e130e5b6/geode-core/src/main/java/org/apache/geode/distributed/internal/deadlock/DeadlockDetector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/deadlock/DeadlockDetector.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/deadlock/DeadlockDetector.java
index 2c70418..65a521a 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/deadlock/DeadlockDetector.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/deadlock/DeadlockDetector.java
@@ -19,7 +19,10 @@ package org.apache.geode.distributed.internal.deadlock;
 import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
 import java.io.ObjectInputStream;
+import java.io.ObjectStreamClass;
 import java.io.Serializable;
 import java.lang.management.LockInfo;
 import java.lang.management.ManagementFactory;
@@ -314,13 +317,37 @@ public class DeadlockDetector {
       System.exit(-1);
     }
 
-    ObjectInputStream ois = new ObjectInputStream(new BufferedInputStream(new FileInputStream(file)));
+    ObjectInputStream ois = new DDObjectInputStream(new BufferedInputStream(new FileInputStream(file)));
     DependencyGraph graph = (DependencyGraph) ois.readObject();
 
     return graph;
   }
-  
-  
+
+  private static class DDObjectInputStream extends ObjectInputStream {
+
+    /**
+     * Creates a new <code>DDObjectInputStream</code> that delegates
+     * its behavior to a given <code>InputStream</code>.
+     */
+    public DDObjectInputStream(InputStream stream) throws IOException {
+      super(stream);
+    }
+
+    @Override
+    protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
+
+      String className = desc.getName();
+      if (className.startsWith("com.gemstone.gemfire")) {
+        className = "org.apache.geode" + className.substring("com.gemstone.gemfire".length());
+      } try {
+        Class clazz = Class.forName(className);
+        return clazz;
+      } catch (ClassNotFoundException ex) {
+        return super.resolveClass(desc);
+      }
+    }
+  }
+
   private static void printHelp() {
     System.out.println("DeadlockDetector reads serialized graphs of the state of the distributed");
     System.out.println("system created by collectDependencies.");


[47/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/LocatorManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/LocatorManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/LocatorManagementDUnitTest.java
index a426096..a91a2e1 100644
--- a/geode-core/src/test/java/org/apache/geode/management/LocatorManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/LocatorManagementDUnitTest.java
@@ -16,54 +16,82 @@
  */
 package org.apache.geode.management;
 
+import static java.util.concurrent.TimeUnit.*;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.junit.Assert.*;
+import static org.apache.geode.internal.AvailablePortHelper.*;
+import static org.apache.geode.test.dunit.Host.*;
+import static org.apache.geode.test.dunit.NetworkUtils.*;
+import static org.assertj.core.api.Assertions.*;
 
 import java.io.File;
-import java.io.IOException;
+import java.io.Serializable;
 import java.net.InetAddress;
-import java.net.UnknownHostException;
 import java.util.Properties;
 
+import javax.management.ObjectName;
+
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.Locator;
 import org.apache.geode.distributed.internal.DistributionConfig;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.InternalLocator;
-import org.apache.geode.internal.AvailablePortHelper;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.management.internal.ManagementConstants;
-import org.apache.geode.test.dunit.Assert;
+import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 
 /**
- * Test cases
- * 
- * DistributedSystem Cache Locator no no yes yes no yes yes yes yes
+ * Distributed tests for managing {@code Locator} with {@link LocatorMXBean}.
  */
 @Category(DistributedTest.class)
-public class LocatorManagementDUnitTest extends ManagementTestBase {
+@SuppressWarnings({ "serial", "unused" })
+public class LocatorManagementDUnitTest implements Serializable {
+
+  private static final int MAX_WAIT_MILLIS = 120 * 1000;
+
+  private static final int ZERO = 0;
+
+  @Manager
+  private VM managerVM;
+  @Member
+  private VM[] membersVM;
+  private VM locatorVM;
 
-  private static final int MAX_WAIT = 8 * ManagementConstants.REFRESH_TIME;
+  private String hostName;
+  private int port;
 
-  private VM locator;
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().build();
 
-  @Override
-  protected final void postSetUpManagementTestBase() throws Exception {
-    locator = managedNode1;
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
+
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
+
+  @Before
+  public void before() throws Exception {
+//    this.managerVM = managingNode;
+//    this.membersVM = getManagedNodeList().toArray(new VM[getManagedNodeList().size()]);
+    this.locatorVM = this.membersVM[0];
+    this.hostName = getServerHostName(getHost(0));
+    this.port = getRandomAvailableTCPPort();
   }
 
-  @Override
-  protected final void preTearDownManagementTestBase() throws Exception {
-    stopLocator(locator);
+  @After
+  public void after() throws Exception {
+    stopLocator(this.locatorVM);
   }
 
   /**
@@ -72,48 +100,52 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
    */
   @Test
   public void testPeerLocation() throws Exception {
-    int locPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    startLocator(locator, true, locPort);
-    locatorMBeanExist(locator, locPort, true);
+    startLocator(this.locatorVM, true, this.port);
+
+    verifyLocalLocatorMXBean(this.locatorVM, this.port, true);
 
-    Host host = Host.getHost(0);
-    String host0 = getServerHostName(host);
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
-    props.setProperty(LOCATORS, host0 + "[" + locPort
-        + "]");
+    props.setProperty(LOCATORS, this.hostName + "[" + this.port + "]");
     props.setProperty(JMX_MANAGER, "true");
     props.setProperty(JMX_MANAGER_START, "false");
     props.setProperty(JMX_MANAGER_PORT, "0");
     props.setProperty(JMX_MANAGER_HTTP_PORT, "0");
-    createCache(managingNode, props);
-    startManagingNode(managingNode);
-    DistributedMember locatorMember = getMember(locator);
-    remoteLocatorMBeanExist(managingNode,locatorMember);
 
+    this.managementTestRule.createManager(this.managerVM, props, false);
+    this.managementTestRule.startManager(this.managerVM);
+
+    verifyRemoteLocatorMXBeanProxy(this.managerVM, this.managementTestRule.getDistributedMember(this.locatorVM));
   }
 
   @Test
   public void testPeerLocationWithPortZero() throws Exception {
-    // Start the locator with port=0
-    int locPort = startLocator(locator, true, 0);
-    locatorMBeanExist(locator, locPort, true);
+    this.port = startLocator(this.locatorVM, true, ZERO);
+    //this.locatorVM.invoke(() -> this.managementTestRule.getCache());
+
+    this.locatorVM.invoke(() -> assertHasCache());
+
+    verifyLocalLocatorMXBean(this.locatorVM, this.port, true);
 
-    Host host = Host.getHost(0);
-    String host0 = getServerHostName(host);
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
-    props.setProperty(LOCATORS, host0 + "[" + locPort
-        + "]");
+    props.setProperty(LOCATORS, this.hostName + "[" + this.port + "]");
     props.setProperty(JMX_MANAGER, "true");
     props.setProperty(JMX_MANAGER_START, "false");
     props.setProperty(JMX_MANAGER_PORT, "0");
     props.setProperty(JMX_MANAGER_HTTP_PORT, "0");
-    createCache(managingNode, props);
-    startManagingNode(managingNode);
-    DistributedMember locatorMember = getMember(locator);
-    remoteLocatorMBeanExist(managingNode,locatorMember);
 
+    this.managementTestRule.createManager(this.managerVM, props, false);
+    this.managementTestRule.startManager(this.managerVM);
+
+    verifyRemoteLocatorMXBeanProxy(this.managerVM, this.managementTestRule.getDistributedMember(this.locatorVM));
+  }
+
+  private void assertHasCache() {
+    assertThat(GemFireCacheImpl.getInstance()).isNotNull();
+    assertThat(GemFireCacheImpl.getInstance().isClosed()).isFalse();
+    assertThat(InternalDistributedSystem.getAnyInstance()).isNotNull();
+    assertThat(InternalDistributedSystem.getAnyInstance().isConnected()).isTrue();
   }
 
   /**
@@ -121,276 +153,199 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
    */
   @Test
   public void testColocatedLocator() throws Exception {
-    initManagement(false);
-    int locPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    startLocator(locator, false, locPort);
-    locatorMBeanExist(locator, locPort, false);
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
+
+    startLocator(this.locatorVM, false, this.port);
 
+    verifyLocalLocatorMXBean(this.locatorVM, this.port, false);
   }
 
   @Test
   public void testColocatedLocatorWithPortZero() throws Exception {
-    initManagement(false);
-    int locPort = startLocator(locator, false, 0);
-    locatorMBeanExist(locator, locPort, false);
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
+
+    this.port = startLocator(this.locatorVM, false, ZERO);
 
+    verifyLocalLocatorMXBean(this.locatorVM, this.port, false);
   }
 
   @Test
   public void testListManagers() throws Exception {
-    initManagement(false);
-    int locPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    startLocator(locator, false, locPort);
-    listManagers(locator, locPort, false);
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
+
+    startLocator(this.locatorVM, false, this.port);
+
+    verifyListManagers(this.locatorVM);
   }
 
   @Test
   public void testListManagersWithPortZero() throws Exception {
-    initManagement(false);
-    int locPort = startLocator(locator, false, 0);
-    listManagers(locator, locPort, false);
+    this.managementTestRule.createMembers();
+    this.managementTestRule.createManagers();
+
+    this.port = startLocator(this.locatorVM, false, ZERO);
+
+    verifyListManagers(this.locatorVM);
   }
 
   @Test
   public void testWillingManagers() throws Exception {
-    int locPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    startLocator(locator, true, locPort);
+    startLocator(this.locatorVM, true, this.port);
 
-    Host host = Host.getHost(0);
-    String host0 = getServerHostName(host);
-    
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
-    props.setProperty(LOCATORS, host0 + "[" + locPort
-        + "]");
+    props.setProperty(LOCATORS, this.hostName + "[" + this.port + "]");
     props.setProperty(JMX_MANAGER, "true");
 
-    createCache(managedNode2, props);
-    createCache(managedNode3, props);
+    this.managementTestRule.createMember(this.membersVM[1], props);
+    this.managementTestRule.createMember(this.membersVM[2], props);
 
-    listWillingManagers(locator, locPort, false);
+    verifyListPotentialManagers(this.locatorVM);
   }
 
   @Test
   public void testWillingManagersWithPortZero() throws Exception {
-    int locPort = startLocator(locator, true, 0);
-
-    Host host = Host.getHost(0);
-    String host0 = getServerHostName(host);
+    this.port = startLocator(this.locatorVM, true, 0);
 
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
-    props.setProperty(LOCATORS, host0 + "[" + locPort
-        + "]");
+    props.setProperty(LOCATORS, this.hostName + "[" + this.port + "]");
     props.setProperty(JMX_MANAGER, "true");
 
-    createCache(managedNode2, props);
-    createCache(managedNode3, props);
+    this.managementTestRule.createMember(this.membersVM[1], props);
+    this.managementTestRule.createMember(this.membersVM[2], props);
 
-    listWillingManagers(locator, locPort, false);
+    verifyListPotentialManagers(this.locatorVM);
   }
 
   /**
    * Starts a locator with given configuration.
    * If DS is already started it will use the same DS
-   * 
-   * @param vm
-   *          reference to VM
    */
-  protected Integer startLocator(final VM vm, final boolean isPeer, final int port) {
-
-    return (Integer) vm.invoke(new SerializableCallable("Start Locator In VM") {
-
-      public Object call() throws Exception {
+  private int startLocator(final VM locatorVM, final boolean isPeer, final int port) {
+    return locatorVM.invoke("startLocator", () -> {
+      assertThat(InternalLocator.hasLocator()).isFalse();
 
-        assertFalse(InternalLocator.hasLocator());
+      Properties properties = new Properties();
+      properties.setProperty(MCAST_PORT, "0");
+      properties.setProperty(LOCATORS, "");
 
-        Properties props = new Properties();
-        props.setProperty(MCAST_PORT, "0");
+      InetAddress bindAddress = InetAddress.getByName(this.hostName);
+      File logFile = this.temporaryFolder.newFile(testName.getMethodName() + "-locator-" + port + ".log");
+      Locator locator = Locator.startLocatorAndDS(port, logFile, bindAddress, properties, isPeer, true, null);
 
-        props.setProperty(LOCATORS, "");
-        props.setProperty(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
+      assertThat(InternalLocator.hasLocator()).isTrue();
 
-        InetAddress bindAddr = null;
-        try {
-          bindAddr = InetAddress.getByName(getServerHostName(vm.getHost()));
-        } catch (UnknownHostException uhe) {
-          Assert.fail("While resolving bind address ", uhe);
-        }
-
-        Locator locator = null;
-        try {
-          File logFile = new File(getTestMethodName() + "-locator" + port + ".log");
-          locator = Locator.startLocatorAndDS(port, logFile, bindAddr, props, isPeer, true, null);
-        } catch (IOException ex) {
-          Assert.fail("While starting locator on port " + port, ex);
-        }
+      return locator.getPort();
+    });
+  }
 
-        assertTrue(InternalLocator.hasLocator());
-        return locator.getPort();
-      }
+  private void stopLocator(final VM locatorVM) {
+    locatorVM.invoke("stopLocator", () -> {
+      assertThat(InternalLocator.hasLocator()).isTrue();
+      InternalLocator.getLocator().stop();
     });
   }
 
-  /**
-   * Creates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  protected String stopLocator(VM vm) {
+  private void verifyLocalLocatorMXBean(final VM locatorVM, final int port, final boolean isPeer) {
+    locatorVM.invoke("verifyLocalLocatorMXBean", () -> {
+      //ManagementService service = this.managementTestRule.getExistingManagementService();
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      ManagementService service = ManagementService.getExistingManagementService(cache);
+      assertThat(service).isNotNull();
 
-    return (String) vm.invoke(new SerializableCallable("Stop Locator In VM") {
+      LocatorMXBean locatorMXBean = service.getLocalLocatorMXBean();
+      assertThat(locatorMXBean).isNotNull();
+      assertThat(locatorMXBean.getPort()).isEqualTo(port);
 
-      public Object call() throws Exception {
+      //        LogWriterUtils.getLogWriter().info("Log of Locator" + bean.viewLog());
+      //        LogWriterUtils.getLogWriter().info("BindAddress" + bean.getBindAddress());
 
-        assertTrue(InternalLocator.hasLocator());
-        InternalLocator.getLocator().stop();
-        return null;
-      }
+      assertThat(locatorMXBean.isPeerLocator()).isEqualTo(isPeer);
     });
   }
 
-  /**
-   * Creates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  protected void locatorMBeanExist(VM vm, final int locPort,
-      final boolean isPeer) {
-
-    vm.invoke(new SerializableCallable("Locator MBean created") {
-
-      public Object call() throws Exception {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-
-        ManagementService service = ManagementService
-            .getExistingManagementService(cache);
-        assertNotNull(service);
-        LocatorMXBean bean = service.getLocalLocatorMXBean();
-        assertNotNull(bean);
-        assertEquals(locPort, bean.getPort());
-        LogWriterUtils.getLogWriter().info("Log of Locator" + bean.viewLog());
-        LogWriterUtils.getLogWriter().info("BindAddress" + bean.getBindAddress());
-        assertEquals(isPeer, bean.isPeerLocator());
-        return null;
-      }
-    });
-  }
-
-  /**
-   * Creates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  protected void remoteLocatorMBeanExist(VM vm, final DistributedMember member) {
-
-    vm.invoke(new SerializableCallable("Locator MBean created") {
-
-      public Object call() throws Exception {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        ManagementService service = ManagementService
-            .getExistingManagementService(cache);
-        assertNotNull(service);
-        LocatorMXBean bean = MBeanUtil.getLocatorMbeanProxy(member);
-        assertNotNull(bean);
+  private void verifyRemoteLocatorMXBeanProxy(final VM managerVM, final DistributedMember locatorMember) {
+    managerVM.invoke("verifyRemoteLocatorMXBeanProxy", () -> {
+      //ManagementService service = this.managementTestRule.getExistingManagementService();
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      ManagementService service = ManagementService.getExistingManagementService(cache);
+      assertThat(service).isNotNull();
 
-        LogWriterUtils.getLogWriter().info("Log of Locator" + bean.viewLog());
-        LogWriterUtils.getLogWriter().info("BindAddress" + bean.getBindAddress());
+      // LocatorMXBean locatorMXBean = MBeanUtil.getLocatorMbeanProxy(locatorMember); // TODO
+      LocatorMXBean locatorMXBean = awaitLockServiceMXBeanProxy(locatorMember);
+      assertThat(locatorMXBean).isNotNull();
 
-        return null;
-      }
+      //        LogWriterUtils.getLogWriter().info("Log of Locator" + bean.viewLog());
+      //        LogWriterUtils.getLogWriter().info("BindAddress" + bean.getBindAddress());
     });
   }
 
-  /**
-   * Creates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
-   */
-  protected void listManagers(VM vm, final int locPort, final boolean isPeer) {
-
-    vm.invoke(new SerializableCallable("List Managers") {
-
-      public Object call() throws Exception {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+  private void verifyListManagers(final VM locatorVM) {
+    locatorVM.invoke("verifyListManagers", () -> {
+      //ManagementService service = this.managementTestRule.getExistingManagementService();
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      ManagementService service = ManagementService.getExistingManagementService(cache);
+      assertThat(service).isNotNull();
 
-        ManagementService service = ManagementService
-            .getExistingManagementService(cache);
-        assertNotNull(service);
-        final LocatorMXBean bean = service.getLocalLocatorMXBean();
-        assertNotNull(bean);
+      LocatorMXBean locatorMXBean = service.getLocalLocatorMXBean();
+      assertThat(locatorMXBean).isNotNull();
 
-        Wait.waitForCriterion(new WaitCriterion() {
+      await().until(() -> assertThat(locatorMXBean.listManagers()).hasSize(1));
+    });
+  }
 
-          public String description() {
-            return "Waiting for the managers List";
-          }
+  private void verifyListPotentialManagers(final VM locatorVM) {
+    locatorVM.invoke("verifyListPotentialManagers", () -> {
+      //ManagementService service = this.managementTestRule.getExistingManagementService();
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      ManagementService service = ManagementService.getExistingManagementService(cache);
+      assertThat(service).isNotNull();
 
-          public boolean done() {
+      //LocatorMXBean locatorMXBean = service.getLocalLocatorMXBean();
+      LocatorMXBean locatorMXBean = awaitLockServiceMXBean();
+      assertThat(locatorMXBean).isNotNull();
 
-            boolean done = bean.listManagers().length == 1;
-            return done;
-          }
+      await("listPotentialManagers has size 3").until(() -> assertThat(locatorMXBean.listPotentialManagers()).hasSize(3));
+    });
+  }
 
-        }, MAX_WAIT, 500, true);
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(MAX_WAIT_MILLIS, MILLISECONDS);
+  }
 
-        return null;
-      }
-    });
+  private ConditionFactory await(final String alias) {
+    return Awaitility.await(alias).atMost(MAX_WAIT_MILLIS, MILLISECONDS);
   }
 
   /**
-   * Creates a persistent region
-   * 
-   * @param vm
-   *          reference to VM
+   * Await and return a LocatorMXBean proxy for a specific member.
    */
-  protected void listWillingManagers(VM vm, final int locPort,
-      final boolean isPeer) {
-
-    vm.invoke(new SerializableCallable("List Willing Managers") {
-
-      public Object call() throws Exception {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-
-        ManagementService service = ManagementService
-            .getExistingManagementService(cache);
-        assertNotNull(service);
-        final LocatorMXBean bean = service.getLocalLocatorMXBean();
-        assertNotNull(bean);
-
-        Wait.waitForCriterion(new WaitCriterion() {
+  private LocatorMXBean awaitLockServiceMXBeanProxy(final DistributedMember member) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    ObjectName locatorMBeanName = service.getLocatorMBeanName(member);
 
-          public String description() {
-            return "Waiting for the Willing managers List";
-          }
+    await().until(() -> assertThat(service.getMBeanProxy(locatorMBeanName, LocatorMXBean.class)).isNotNull());
 
-          public boolean done() {
+    return service.getMBeanProxy(locatorMBeanName, LocatorMXBean.class);
+  }
 
-            boolean done = bean.listPotentialManagers().length == 3;
-            return done;
-          }
+  /**
+   * Await creation of local LocatorMXBean.
+   */
+  private LocatorMXBean awaitLockServiceMXBean() {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
 
-        }, MAX_WAIT, 500, true);
+    await().until(() -> assertThat(service.getLocalLocatorMXBean()).isNotNull());
 
-        return null;
-      }
-    });
+    return service.getLocalLocatorMXBean();
   }
-  
-  /** get the host name to use for a server cache in client/server dunit
-   * testing
-   * @param host
-   * @return the host name
-   */
+
   public static String getServerHostName(Host host) {
     return System.getProperty(DistributionConfig.GEMFIRE_PREFIX + "server-bind-address") != null ?
-        System.getProperty(DistributionConfig.GEMFIRE_PREFIX + "server-bind-address")
-        : host.getHostName();
+      System.getProperty(DistributionConfig.GEMFIRE_PREFIX + "server-bind-address") : host.getHostName();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/ManagementTestBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/ManagementTestBase.java b/geode-core/src/test/java/org/apache/geode/management/ManagementTestBase.java
index 77514eb..b4ee5be 100644
--- a/geode-core/src/test/java/org/apache/geode/management/ManagementTestBase.java
+++ b/geode-core/src/test/java/org/apache/geode/management/ManagementTestBase.java
@@ -19,15 +19,17 @@ package org.apache.geode.management;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
+import java.lang.management.ManagementFactory;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 import java.util.Set;
-import javax.management.MBeanServer;
+
 import javax.management.ObjectName;
 
-import org.apache.geode.LogWriter;
+import org.junit.Rule;
+
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.Region;
@@ -37,39 +39,32 @@ import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.statistics.SampleCollector;
 import org.apache.geode.management.internal.FederatingManager;
 import org.apache.geode.management.internal.LocalManager;
-import org.apache.geode.management.internal.MBeanJMXAdapter;
 import org.apache.geode.management.internal.ManagementStrings;
 import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.Invoke;
+import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
 
 @SuppressWarnings("serial")
-public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
+public abstract class ManagementTestBase extends JUnit4CacheTestCase {
 
   private static final int MAX_WAIT = 70 * 1000;
 
-  /**
-   * log writer instance
-   */
-  private static LogWriter logWriter;
-
-  private static Properties props = new Properties();
-
-  /**
-   * Distributed System
-   */
-  protected static DistributedSystem ds;
+//  protected static DistributedSystem ds;
+  protected static ManagementService managementService;
+//  protected static Cache cache;
 
   /**
    * List containing all the Managed Node VM
@@ -81,23 +76,13 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
    */
   protected static VM managingNode;
 
-  /**
-   * Management Service
-   */
-  protected static ManagementService managementService;
-
   protected static VM managedNode1;
   protected static VM managedNode2;
   protected static VM managedNode3;
   protected static VM locatorVM;
 
-  private static SampleCollector sampleCollector;
-
-  protected static MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
-
-  private static int mcastPort;
-
-  protected static Cache cache;
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties = new DistributedRestoreSystemProperties();
 
   @Override
   public final void postSetUp() throws Exception {
@@ -122,16 +107,14 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
   }
 
   @Override
-  public final void preTearDown() throws Exception {
+  public final void preTearDownCacheTestCase() throws Exception {
     preTearDownManagementTestBase();
 
-    closeAllCache();
-    managementService = null;
-
-    mcastPort = 0;
-    disconnectAllFromDS();
-    props.clear();
+  }
 
+  @Override
+  public final void postTearDownCacheTestCase() throws Exception {
+    managementService = null;
     postTearDownManagementTestBase();
   }
 
@@ -141,99 +124,49 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
   protected void postTearDownManagementTestBase() throws Exception {
   }
 
-  public void closeAllCache() throws Exception {
-    closeCache(managingNode);
-    closeCache(managedNode1);
-    closeCache(managedNode2);
-    closeCache(managedNode3);
-    cache = null;
-  }
-
-  /**
-   * Enable system property gemfire.disableManagement false in each VM.
-   */
-  public void enableManagement() {
-    Invoke.invokeInEveryVM(new SerializableRunnable("Enable Management") {
-      public void run() {
-        System.setProperty(InternalDistributedSystem.DISABLE_MANAGEMENT_PROPERTY, "false");
-      }
-    });
-
-  }
-
-  /**
-   * Disable system property gemfire.disableManagement true in each VM.
-   */
-  public void disableManagement() {
-    Invoke.invokeInEveryVM(new SerializableRunnable("Disable Management") {
-      public void run() {
-        System.setProperty(InternalDistributedSystem.DISABLE_MANAGEMENT_PROPERTY, "true");
-      }
-    });
-
-  }
-
   /**
    * managingNodeFirst variable tests for two different test cases where
    * Managing & Managed Node creation time lines are reversed.
    */
-  public void initManagement(boolean managingNodeFirst) throws Exception {
-
+  protected void initManagement(final boolean managingNodeFirst) throws Exception {
     if (managingNodeFirst) {
       createManagementCache(managingNode);
       startManagingNode(managingNode);
-
       for (VM vm : managedNodeList) {
         createCache(vm);
-
       }
 
     } else {
       for (VM vm : managedNodeList) {
         createCache(vm);
-
       }
       createManagementCache(managingNode);
       startManagingNode(managingNode);
     }
   }
 
-  public void createCache(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Create Cache") {
-      public void run() {
-        createCache(false);
-      }
+  protected void createCache(final VM vm1) throws Exception {
+    vm1.invoke("Create Cache", () -> {
+      createCache(false);
     });
-
   }
 
-  public void createCache(VM vm1, final Properties props) throws Exception {
-    vm1.invoke(new SerializableRunnable("Create Cache") {
-      public void run() {
-        createCache(props);
-      }
+  protected void createCache(final VM vm1, final Properties props) throws Exception {
+    vm1.invoke("Create Cache", () -> {
+      createCache(props);
     });
-
   }
 
-  public Cache createCache(Properties props) {
-    System.setProperty("dunitLogPerTest", "true");
-    props.setProperty(LOG_FILE, getTestMethodName() + "-.log");
-    ds = getSystem(props);
-    cache = CacheFactory.create(ds);
+  private Cache createCache(final Properties props) {
+    Cache cache = getCache(props);
     managementService = ManagementService.getManagementService(cache);
-    logWriter = ds.getLogWriter();
-    assertNotNull(cache);
-    assertNotNull(managementService);
-    return cache;
-  }
 
-  public Cache getCache() {
     return cache;
   }
 
-  public Cache createCache(boolean management) {
-    System.setProperty("dunitLogPerTest", "true");
+  protected Cache createCache(final boolean management) {
+
+    Properties props = new Properties();
     if (management) {
       props.setProperty(JMX_MANAGER, "true");
       props.setProperty(JMX_MANAGER_START, "false");
@@ -243,71 +176,46 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
     props.setProperty(ENABLE_TIME_STATISTICS, "true");
     props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
     props.setProperty(LOG_FILE, getTestMethodName() + "-.log");
-    ds = getSystem(props);
-    cache = CacheFactory.create(ds);
+
+    Cache cache = getCache(props);
     managementService = ManagementService.getManagementService(cache);
-    logWriter = ds.getLogWriter();
-    assertNotNull(cache);
-    assertNotNull(managementService);
+
     return cache;
   }
 
-  public void createManagementCache(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Create Management Cache") {
-      public void run() {
-        createCache(true);
-      }
+  protected void createManagementCache(final VM vm1) throws Exception {
+    vm1.invoke("Create Management Cache", () -> {
+      createCache(true);
     });
   }
 
-  public void closeCache(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Close Cache") {
-      public void run() {
-        GemFireCacheImpl existingInstance = GemFireCacheImpl.getInstance();
-        if (existingInstance != null) {
-          existingInstance.close();
-        }
-        InternalDistributedSystem ds = InternalDistributedSystem
-            .getConnectedInstance();
-        if (ds != null) {
-          ds.disconnect();
-        }
+  protected void closeCache(final VM vm1) throws Exception {
+    vm1.invoke("Close Cache", () -> {
+      GemFireCacheImpl existingInstance = GemFireCacheImpl.getInstance();
+      if (existingInstance != null) {
+        existingInstance.close();
+      }
+      InternalDistributedSystem ds = InternalDistributedSystem.getConnectedInstance();
+      if (ds != null) {
+        ds.disconnect();
       }
     });
-
-  }
-
-  public void closeCache() throws Exception {
-    GemFireCacheImpl existingInstance = GemFireCacheImpl.getInstance();
-    if (existingInstance != null) {
-      existingInstance.close();
-    }
-    InternalDistributedSystem ds = InternalDistributedSystem
-        .getConnectedInstance();
-    if (ds != null) {
-      ds.disconnect();
-    }
   }
 
-  public String getMemberId(final VM vm) {
-    SerializableCallable getMember = new SerializableCallable("getMemberId") {
-      public Object call() throws Exception {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        return cache.getDistributedSystem().getDistributedMember().getId();
-      }
-    };
-    return (String) vm.invoke(getMember);
+  protected String getMemberId(final VM vm) {
+    return vm.invoke("getMemberId", () -> {
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      return cache.getDistributedSystem().getDistributedMember().getId();
+    });
   }
 
-  protected static void waitForProxy(final ObjectName objectName,
-      final Class interfaceClass) {
-
+  protected static void waitForProxy(final ObjectName objectName, final Class interfaceClass) {
     Wait.waitForCriterion(new WaitCriterion() {
+      @Override
       public String description() {
-        return "Waiting for the proxy of " + objectName.getCanonicalName()
-            + " to get propagated to Manager";
+        return "Waiting for the proxy of " + objectName.getCanonicalName() + " to get propagated to Manager";
       }
-
+      @Override
       public boolean done() {
         SystemManagementService service = (SystemManagementService) managementService;
         if (service.getMBeanProxy(objectName, interfaceClass) != null) {
@@ -316,218 +224,54 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
           return false;
         }
       }
-
     }, MAX_WAIT, 500, true);
   }
 
-  protected void runManagementTaskAdhoc() {
-    SystemManagementService service = (SystemManagementService) managementService;
-    service.getLocalManager().runManagementTaskAdhoc();
-  }
-
   /**
    * Marks a VM as Managing
-   *
-   * @throws Exception
    */
-  public void startManagingNode(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Start Being Managing Node") {
-      public void run() {
-        startBeingManagingNode();
-      }
-    });
-
-  }
-
-  public void startBeingManagingNode() {
-    Cache existingCache = GemFireCacheImpl.getInstance();
-    if (existingCache != null && !existingCache.isClosed()) {
+  protected void startManagingNode(final VM vm1) {
+    vm1.invoke("Start Being Managing Node", () -> {
+      Cache existingCache = GemFireCacheImpl.getInstance();
+      //    if (existingCache != null && !existingCache.isClosed()) {
       managementService = ManagementService.getManagementService(existingCache);
       SystemManagementService service = (SystemManagementService) managementService;
       service.createManager();
       service.startManager();
-    }
-  }
-
-  /**
-   * Marks a VM as Managing
-   *
-   * @throws Exception
-   */
-  public void startManagingNodeAsync(VM vm1) throws Exception {
-    vm1.invokeAsync(new SerializableRunnable("Start Being Managing Node") {
-
-      public void run() {
-        Cache existingCache = GemFireCacheImpl.getInstance();
-        if (existingCache != null && !existingCache.isClosed()) {
-          managementService = ManagementService
-              .getManagementService(existingCache);
-          managementService.startManager();
-        }
-
-      }
+      //    }
     });
-
   }
 
   /**
    * Stops a VM as a Managing node
-   *
-   * @throws Exception
-   */
-  public void stopManagingNode(VM vm1) throws Exception {
-    vm1.invoke(new SerializableRunnable("Stop Being Managing Node") {
-      public void run() {
-        Cache existingCache = GemFireCacheImpl.getInstance();
-        if (existingCache != null && !existingCache.isClosed()) {
-          if (managementService.isManager()) {
-            managementService.stopManager();
-          }
-
-        }
-
-      }
-    });
-
-  }
-
-  /**
-   * Check various resources clean up Once a VM stops being managable it should
-   * remove all the artifacts of management namely a) Notification region b)
-   * Monitoring Region c) Management task should stop
-   */
-  public void checkManagedNodeCleanup(VM vm) throws Exception {
-    vm.invoke(new SerializableRunnable("Managing Node Clean up") {
-
-      public void run() {
-        Cache existingCache = GemFireCacheImpl.getInstance();
-        if (existingCache != null) {
-          // Cache is closed
-          assertEquals(true, existingCache.isClosed());
-          // ManagementService should throw exception
-          LocalManager localManager = ((SystemManagementService) managementService)
-              .getLocalManager();
-          // Check Monitoring region destroyed
-          Region monitoringRegion = localManager.getManagementResourceRepo()
-              .getLocalMonitoringRegion();
-          assertEquals(null, monitoringRegion);
-          // check Notification region is destroyed
-          Region notifRegion = localManager.getManagementResourceRepo()
-              .getLocalNotificationRegion();
-          assertEquals(null, notifRegion);
-          // check ManagementTask is stopped
-          assertEquals(true, localManager.getFederationSheduler().isShutdown());
-
-        }
-
-      }
-    });
-
-  }
-
-  /**
-   * Check various resources clean up Once a VM stops being Managing.It should
-   * remove all the artifacts of management namely a) proxies b) Monitoring
-   * Region c) Management task should stop
    */
-
-  public void checkProxyCleanup(VM vm) throws Exception {
-
-    vm.invoke(new SerializableRunnable("Managing Node Clean up") {
-
-      public void run() {
-
-        try {
-          GemFireCacheImpl existingCache = GemFireCacheImpl.getInstance();
-          if (existingCache == null) {
-            return;
-          }
-
-          assertEquals(false, existingCache.isClosed());
-          // ManagementService should not be closed
-
-          Set<DistributedMember> otherMemberSet = existingCache
-              .getDistributionManager().getOtherDistributionManagerIds();
-
-          Iterator<DistributedMember> it = otherMemberSet.iterator();
-          FederatingManager federatingManager = ((SystemManagementService) managementService)
-              .getFederatingManager();
-
-          // check Proxy factory. There should not be any proxies left
-          DistributedMember member;
-          while (it.hasNext()) {
-            member = it.next();
-
-            assertNull(federatingManager.getProxyFactory().findAllProxies(
-                member));
-          }
-
-        } catch (ManagementException e) {
-          Assert.fail("failed with ManagementException", e);
+  protected void stopManagingNode(final VM vm1) {
+    vm1.invoke("Stop Being Managing Node", () -> {
+      Cache existingCache = GemFireCacheImpl.getInstance();
+      if (existingCache != null && !existingCache.isClosed()) {
+        if (managementService.isManager()) {
+          managementService.stopManager();
         }
       }
     });
-
-  }
-
-  /**
-   * All the expected exceptions are checked here
-   *
-   * @param e
-   * @return is failed
-   */
-  public boolean checkManagementExceptions(ManagementException e) {
-
-    if (e.getMessage()
-        .equals(ManagementStrings.Management_Service_CLOSED_CACHE)
-        || e.getMessage().equals(
-        ManagementStrings.Management_Service_MANAGEMENT_SERVICE_IS_CLOSED
-            .toLocalizedString())
-        || e
-        .getMessage()
-        .equals(
-            ManagementStrings.Management_Service_MANAGEMENT_SERVICE_NOT_STARTED_YET
-                .toLocalizedString())
-        || e.getMessage().equals(
-        ManagementStrings.Management_Service_NOT_A_GEMFIRE_DOMAIN_MBEAN
-            .toLocalizedString())
-        || e.getMessage().equals(
-        ManagementStrings.Management_Service_NOT_A_MANAGING_NODE_YET
-            .toLocalizedString())
-        || e
-        .getMessage()
-        .equals(
-            ManagementStrings.Management_Service_OPERATION_NOT_ALLOWED_FOR_CLIENT_CACHE
-                .toLocalizedString())
-        || e.getMessage().equals(
-        ManagementStrings.Management_Service_PROXY_NOT_AVAILABLE
-            .toLocalizedString())) {
-
-      return false;
-    }
-    return true;
   }
 
-  public static List<VM> getManagedNodeList() {
+  protected static List<VM> getManagedNodeList() {
     return managedNodeList;
   }
 
-  public static VM getManagingNode() {
+  protected static VM getManagingNode() {
     return managingNode;
   }
 
-  public static ManagementService getManagementService() {
+  protected static ManagementService getManagementService() {
     return managementService;
   }
 
   /**
    * Creates a Distributed region
-   *
-   * @param vm         reference to VM
-   * @param regionName name of the distributed region
    */
-  protected void createDistributedRegion(VM vm, final String regionName)
-      throws Exception {
+  protected void createDistributedRegion(final VM vm, final String regionName) throws InterruptedException {
     AsyncInvocation future = createDistributedRegionAsync(vm, regionName);
     future.join(MAX_WAIT);
     if (future.isAlive()) {
@@ -540,181 +284,99 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
 
   /**
    * Creates a Local region
-   *
-   * @param vm              reference to VM
-   * @param localRegionName name of the local region
    */
-  protected void createLocalRegion(VM vm, final String localRegionName)
-      throws Exception {
-    SerializableRunnable createLocalRegion = new SerializableRunnable(
-        "Create Local region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        RegionFactory rf = cache
-            .createRegionFactory(RegionShortcut.LOCAL);
-
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Creating Local Region");
-        rf.create(localRegionName);
-
-      }
-    };
-    vm.invoke(createLocalRegion);
+  protected void createLocalRegion(final VM vm, final String localRegionName) throws Exception {
+    vm.invoke("Create Local region", () -> {
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      SystemManagementService service = (SystemManagementService) getManagementService();
+      RegionFactory rf = cache.createRegionFactory(RegionShortcut.LOCAL);
+
+      LogWriterUtils.getLogWriter().info("Creating Local Region");
+      rf.create(localRegionName);
+    });
   }
 
   /**
    * Creates a Sub region
-   *
-   * @param vm reference to VM
-   */
-  protected void createSubRegion(VM vm, final String parentRegionPath, final String subregionName)
-      throws Exception {
-    SerializableRunnable createSubRegion = new SerializableRunnable(
-        "Create Sub region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        Region region = cache.getRegion(parentRegionPath);
-
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Creating Sub Region");
-        region.createSubregion(subregionName, region.getAttributes());
-
-      }
-    };
-    vm.invoke(createSubRegion);
-  }
-
-  /**
-   * Puts in distributed region
-   *
-   * @param vm
    */
-  protected void putInDistributedRegion(final VM vm, final String key,
-      final String value, final String regionPath) {
-    SerializableRunnable put = new SerializableRunnable(
-        "Put In Distributed Region") {
-      public void run() {
-
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Region region = cache.getRegion(regionPath);
-        region.put(key, value);
-
-      }
-    };
-    vm.invoke(put);
+  protected void createSubRegion(final VM vm, final String parentRegionPath, final String subregionName) throws Exception {
+    vm.invoke("Create Sub region", () -> {
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      SystemManagementService service = (SystemManagementService) getManagementService();
+      Region region = cache.getRegion(parentRegionPath);
+
+      LogWriterUtils.getLogWriter().info("Creating Sub Region");
+      region.createSubregion(subregionName, region.getAttributes());
+    });
   }
 
   /**
    * Creates a Distributed Region
-   *
-   * @param vm
    */
-  protected AsyncInvocation createDistributedRegionAsync(final VM vm,
-      final String regionName) {
-    SerializableRunnable createRegion = new SerializableRunnable(
-        "Create Distributed region") {
-      public void run() {
-
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-
-        RegionFactory rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Creating Dist Region");
-        rf.create(regionName);
-
-      }
-    };
-    return vm.invokeAsync(createRegion);
+  private AsyncInvocation createDistributedRegionAsync(final VM vm, final String regionName) {
+    return vm.invokeAsync("Create Distributed region", () -> {
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      SystemManagementService service = (SystemManagementService) getManagementService();
+
+      RegionFactory rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
+      LogWriterUtils.getLogWriter().info("Creating Dist Region");
+      rf.create(regionName);
+    });
   }
 
   /**
    * Creates a partition Region
-   *
-   * @param vm
-   */
-  protected void createPartitionRegion(final VM vm,
-      final String partitionRegionName) {
-    SerializableRunnable createParRegion = new SerializableRunnable(
-        "Create Partitioned region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        RegionFactory rf = cache
-            .createRegionFactory(RegionShortcut.PARTITION_REDUNDANT);
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Creating Par Region");
-        rf.create(partitionRegionName);
-
-      }
-    };
-    vm.invoke(createParRegion);
-  }
-
-  /**
-   * closes a Distributed Region
-   *
-   * @param vm
    */
-  protected void closeRegion(final VM vm, final String regionPath) {
-    SerializableRunnable closeRegion = new SerializableRunnable(
-        "Close Distributed region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-
-        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Closing Dist Region");
-        Region region = cache.getRegion(regionPath);
-        region.close();
-
-      }
-    };
-    vm.invoke(closeRegion);
+  protected void createPartitionRegion(final VM vm, final String partitionRegionName) {
+    vm.invoke("Create Partitioned region", () -> {
+      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+      SystemManagementService service = (SystemManagementService) getManagementService();
+      RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION_REDUNDANT);
+      LogWriterUtils.getLogWriter().info("Creating Par Region");
+      rf.create(partitionRegionName);
+    });
   }
 
-  public void waitForAllMembers(final int expectedCount) {
+  protected void waitForAllMembers(final int expectedCount) {
     ManagementService service = getManagementService();
     final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
 
     assertNotNull(service.getDistributedSystemMXBean());
 
     Wait.waitForCriterion(new WaitCriterion() {
+      @Override
       public String description() {
         return "Waiting All members to intimate DistributedSystemMBean";
       }
-
+      @Override
       public boolean done() {
         if (bean.listMemberObjectNames() != null) {
-
-          org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(
-              "Member Length " + bean.listMemberObjectNames().length);
-
+          LogWriterUtils.getLogWriter().info("Member Length " + bean.listMemberObjectNames().length);
         }
-
         if (bean.listMemberObjectNames().length >= expectedCount) {
           return true;
         } else {
           return false;
         }
-
       }
-
     }, MAX_WAIT, 500, true);
 
     assertNotNull(bean.getManagerObjectName());
   }
 
-  public static void waitForRefresh(final int expectedRefreshCount,
-      final ObjectName objectName) {
+  protected static void waitForRefresh(final int expectedRefreshCount, final ObjectName objectName) {
     final ManagementService service = getManagementService();
 
-    final long currentTime = System.currentTimeMillis();
-
     Wait.waitForCriterion(new WaitCriterion() {
-      int actualRefreshCount = 0;
-      long lastRefreshTime = service.getLastUpdateTime(objectName);
+      private int actualRefreshCount = 0;
+      private long lastRefreshTime = service.getLastUpdateTime(objectName);
 
+      @Override
       public String description() {
         return "Waiting For Proxy Refresh Count = " + expectedRefreshCount;
       }
 
+      @Override
       public boolean done() {
         long newRefreshTime = service.getLastUpdateTime(objectName);
         if (newRefreshTime > lastRefreshTime) {
@@ -727,12 +389,10 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
         }
         return false;
       }
-
     }, MAX_WAIT, 500, true);
-
   }
 
-  public DistributedMember getMember(final VM vm) {
+  protected DistributedMember getMember(final VM vm) {
     SerializableCallable getMember = new SerializableCallable("Get Member") {
       public Object call() {
         GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
@@ -742,4 +402,13 @@ public abstract class ManagementTestBase extends JUnit4DistributedTestCase {
     };
     return (DistributedMember) vm.invoke(getMember);
   }
+
+  protected boolean mbeanExists(final ObjectName objectName) {
+    return ManagementFactory.getPlatformMBeanServer().isRegistered(objectName);
+  }
+
+  protected <T> T getMBeanProxy(final ObjectName objectName, Class<T> interfaceClass) {
+    SystemManagementService service = (SystemManagementService)ManagementService.getManagementService(getCache());
+    return service.getMBeanProxy(objectName, interfaceClass);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/ManagementTestRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/ManagementTestRule.java b/geode-core/src/test/java/org/apache/geode/management/ManagementTestRule.java
new file mode 100644
index 0000000..630c95e
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/ManagementTestRule.java
@@ -0,0 +1,430 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.management;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.test.dunit.Host.*;
+import static org.assertj.core.api.Assertions.*;
+
+import java.io.Serializable;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Field;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+import org.junit.rules.MethodRule;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.Statement;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.DistributedSystemDisconnectedException;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.management.internal.SystemManagementService;
+import org.apache.geode.test.dunit.Invoke;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.dunit.standalone.DUnitLauncher;
+
+/**
+ * Overriding MethodRule is only way to get {@code Object target}
+ */
+@SuppressWarnings("unused")
+public class ManagementTestRule implements MethodRule, Serializable {
+
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  private final int managersCount;
+  private final int membersCount;
+  private final boolean start;
+  private final boolean managersFirst;
+  private final boolean createManagers;
+  private final boolean createMembers;
+
+  private JUnit4CacheTestCase helper;
+
+  private VM[] managers;
+  private VM[] members;
+
+  protected ManagementTestRule(final Builder builder) {
+    this.helper = new JUnit4CacheTestCase() {
+    };
+    this.managersCount = builder.managersCount;
+    this.membersCount = builder.membersCount;
+    this.start = builder.start;
+    this.managersFirst = builder.managersFirst;
+    this.createManagers = builder.createManagers;
+    this.createMembers = builder.createMembers;
+  }
+
+  public DistributedMember getDistributedMember() {
+    return getCache().getDistributedSystem().getDistributedMember();
+  }
+
+  public DistributedMember getDistributedMember(final VM vm) {
+    return vm.invoke("getDistributedMember", () -> getDistributedMember());
+  }
+
+  public void createManagers() {
+    for (VM manager : this.managers) {
+      manager.invoke(() -> createManager(true));
+    }
+  }
+
+  public void createMembers() {
+    for (VM member : this.members) {
+      member.invoke(() -> createMember());
+    }
+  }
+
+  public void createManager() {
+    createManager(true);
+  }
+
+  public void createManager(final Properties properties) {
+    createManager(properties, true);
+  }
+
+  public void createManager(final boolean start) {
+    createManager(new Properties(), start);
+  }
+
+  public void createManager(final Properties properties, final boolean start) {
+    setPropertyIfNotSet(properties, JMX_MANAGER, "true");
+    setPropertyIfNotSet(properties, JMX_MANAGER_START, "false");
+    setPropertyIfNotSet(properties, JMX_MANAGER_PORT, "0");
+    setPropertyIfNotSet(properties, HTTP_SERVICE_PORT, "0");
+    setPropertyIfNotSet(properties, ENABLE_TIME_STATISTICS, "true");
+    setPropertyIfNotSet(properties, STATISTIC_SAMPLING_ENABLED, "true");
+
+    this.helper.getCache(properties);
+
+    if (start) {
+      startManager();
+    }
+  }
+
+  public void createManager(final VM managerVM) {
+    managerVM.invoke("createManager", () -> createManager());
+  }
+
+  public void createManager(final VM managerVM, final boolean start) {
+    managerVM.invoke("createManager", () -> createManager(start));
+  }
+
+  public void createManager(final VM managerVM, final Properties properties) {
+    managerVM.invoke("createManager", () -> createManager(properties, true));
+  }
+
+  public void createManager(final VM managerVM, final Properties properties, final boolean start) {
+    managerVM.invoke("createManager", () -> createManager(properties, start));
+  }
+
+  public void createMember() {
+    createMember(new Properties());
+  }
+
+  public void createMember(final Properties properties) {
+    setPropertyIfNotSet(properties, JMX_MANAGER, "false");
+    setPropertyIfNotSet(properties, ENABLE_TIME_STATISTICS, "true");
+    setPropertyIfNotSet(properties, STATISTIC_SAMPLING_ENABLED, "true");
+
+    System.out.println("KIRK: creating " + properties.getProperty(NAME));
+    this.helper.getCache(properties);
+  }
+
+  public void createMember(final VM memberVM) {
+    Properties properties = new Properties();
+    properties.setProperty(NAME, "memberVM-" + memberVM.getPid());
+    memberVM.invoke("createMember", () -> createMember(properties));
+  }
+
+  public void createMember(final VM memberVM, final Properties properties) throws Exception {
+    memberVM.invoke("createMember", () -> createMember(properties));
+  }
+
+  public Cache getCache() {
+//    Cache cache = GemFireCacheImpl.getInstance();
+//    if (cache != null && !cache.isClosed()) {
+//      return cache;
+//    }
+    return this.helper.getCache();
+  }
+
+  public boolean hasCache() {
+//    Cache cache = GemFireCacheImpl.getInstance();
+//    if (cache != null && !cache.isClosed()) {
+//      return true;
+//    }
+    return this.helper.hasCache();
+  }
+
+  public Cache basicGetCache() {
+//    Cache cache = GemFireCacheImpl.getInstance();
+//    if (cache != null && !cache.isClosed()) {
+//      return cache;
+//    }
+    return this.helper.basicGetCache();
+  }
+
+  public ManagementService getManagementService() {
+    assertThat(hasCache()).isTrue();
+    return ManagementService.getManagementService(basicGetCache());
+  }
+
+  public SystemManagementService getSystemManagementService() {
+    assertThat(hasCache()).isTrue();
+    return (SystemManagementService) ManagementService.getManagementService(basicGetCache());
+  }
+
+  public ManagementService getExistingManagementService() {
+    assertThat(hasCache()).isTrue();
+    return ManagementService.getExistingManagementService(basicGetCache());
+  }
+
+  public void startManager() {
+    SystemManagementService service = getSystemManagementService();
+    service.createManager();
+    service.startManager();
+  }
+
+  public void startManager(final VM managerVM) {
+    managerVM.invoke("startManager", () -> startManager());
+  }
+
+  public void stopManager() {
+    if (getManagementService().isManager()) {
+      getManagementService().stopManager();
+    }
+  }
+
+  public void stopManager(final VM managerVM) {
+    managerVM.invoke("stopManager", () -> stopManager());
+  }
+
+  public Set<DistributedMember> getOtherNormalMembers() {
+    Set<DistributedMember> allMembers = new HashSet<>(getAllNormalMembers());
+    allMembers.remove(getDistributedMember());
+    return allMembers;
+  }
+
+  public Set<DistributedMember> getAllNormalMembers() {
+    return getDistributionManager().getNormalDistributionManagerIds(); // excludes LOCATOR_DM_TYPE
+  }
+
+  private DM getDistributionManager() {
+    return ((GemFireCacheImpl)getCache()).getDistributionManager();
+  }
+
+  public void disconnectAllFromDS() {
+    stopManagerQuietly();
+    Invoke.invokeInEveryVM("stopManager", () -> stopManagerQuietly());
+    JUnit4DistributedTestCase.disconnectFromDS();
+    Invoke.invokeInEveryVM("disconnectFromDS", () -> JUnit4DistributedTestCase.disconnectFromDS());
+  }
+
+  private void setPropertyIfNotSet(final Properties properties, final String key, final String value) {
+    if (!properties.containsKey(key)) {
+      properties.setProperty(key, value);
+    }
+  }
+
+  private void stopManagerQuietly() {
+    try {
+      if (hasCache() && !basicGetCache().isClosed()) {
+        stopManager();
+      }
+    } catch (DistributedSystemDisconnectedException | NullPointerException ignore) {
+    }
+  }
+
+  @Override
+  public Statement apply(final Statement base, final FrameworkMethod method, final Object target) {
+    return new Statement() {
+      @Override
+      public void evaluate() throws Throwable {
+        setUp(target);
+        try {
+          base.evaluate();
+        } finally {
+          tearDown();
+        }
+      }
+    };
+  }
+
+  private void setUp(final Object target) throws Exception {
+    DUnitLauncher.launchIfNeeded();
+    JUnit4DistributedTestCase.disconnectAllFromDS();
+
+    int whichVM = 0;
+
+    this.managers = new VM[this.managersCount];
+    for (int i = 0; i < this.managersCount; i++) {
+      this.managers[i] = getHost(0).getVM(whichVM);
+      whichVM++;
+    }
+
+    this.members = new VM[this.membersCount];
+    for (int i = 0; i < this.membersCount; i++) {
+      this.members[i] = getHost(0).getVM(whichVM);
+      whichVM++;
+    }
+
+    if (this.start) {
+      start();
+    }
+
+    processAnnotations(target);
+  }
+
+  private void start() {
+    if (this.createManagers && this.managersFirst) {
+      createManagers();
+    }
+    if (this.createMembers) {
+      createMembers();
+    }
+    if (this.createManagers && !this.managersFirst) {
+      createManagers();
+    }
+  }
+
+  private void tearDown() throws Exception {
+    JUnit4DistributedTestCase.disconnectAllFromDS();
+  }
+
+  private void processAnnotations(final Object target) {
+    try {
+      Class<?> clazz = target.getClass();
+
+      Field[] fields = clazz.getDeclaredFields();
+      for (Field field : fields) {
+        boolean alreadyAssigned = false;
+        for (Annotation annotation : field.getAnnotations()) {
+          if (annotation.annotationType().equals(Manager.class)) {
+            // annotated with @Manager
+            throwIfAlreadyAssigned(field, alreadyAssigned);
+            assignManagerField(target, field);
+            alreadyAssigned = true;
+          }
+          if (annotation.annotationType().equals(Member.class)) {
+            // annotated with @Manager
+            throwIfAlreadyAssigned(field, alreadyAssigned);
+            assignMemberField(target, field);
+            alreadyAssigned = true;
+          }
+        }
+      }
+    } catch (IllegalAccessException e) {
+      throw new Error(e);
+    }
+  }
+
+  private void throwIfAlreadyAssigned(final Field field, final boolean alreadyAssigned) {
+    if (alreadyAssigned) {
+      throw new IllegalStateException("Field " + field.getName() + " is already annotated with " + field.getAnnotations());
+    }
+  }
+
+  private void assignManagerField(final Object target, final Field field) throws IllegalAccessException {
+    throwIfNotSameType(field, VM.class);
+
+    field.setAccessible(true);
+    if (field.getType().isArray()) {
+      field.set(target, this.managers);
+    } else {
+      field.set(target, this.managers[0]);
+    }
+  }
+
+  private void assignMemberField(final Object target, final Field field) throws IllegalAccessException {
+    throwIfNotSameType(field, VM.class);
+
+    field.setAccessible(true);
+    if (field.getType().isArray()) {
+      field.set(target, this.members);
+    } else {
+      field.set(target, this.members[0]);
+    }
+  }
+
+  private void throwIfNotSameType(final Field field, final Class clazz) {
+    if (!field.getType().equals(clazz) && // non-array
+        !field.getType().getComponentType().equals(clazz)) { // array
+      throw new IllegalArgumentException("Field " + field.getName() + " is not same type as " + clazz.getName());
+    }
+  }
+
+  public static class Builder {
+
+    private boolean start = false;
+
+    private boolean createManagers = true;
+
+    private boolean createMembers = true;
+
+    private int managersCount = 1;
+
+    private int membersCount = 3;
+
+    private boolean managersFirst = true;
+
+    protected Builder() {
+    }
+
+    public Builder createManagers(final boolean value) {
+      this.createManagers = value;
+      return this;
+    }
+
+    public Builder createMembers(final boolean value) {
+      this.createMembers = value;
+      return this;
+    }
+
+    public Builder withManagers(final int count) {
+      this.managersCount = count;
+      return this;
+    }
+
+    public Builder withMembers(final int count) {
+      this.membersCount = count;
+      return this;
+    }
+
+    public Builder managersFirst(final boolean value) {
+      this.managersFirst = value;
+      return this;
+    }
+
+    public Builder start(final boolean value) {
+      this.start = value;
+      return this;
+    }
+
+    public ManagementTestRule build() {
+      return new ManagementTestRule(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/Manager.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/Manager.java b/geode-core/src/test/java/org/apache/geode/management/Manager.java
new file mode 100644
index 0000000..0de158e
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/Manager.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.management;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+@Target(FIELD)
+@Retention(RUNTIME)
+@Documented
+public @interface Manager {
+  String name() default "";
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/Member.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/Member.java b/geode-core/src/test/java/org/apache/geode/management/Member.java
new file mode 100644
index 0000000..e90d278
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/Member.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.management;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+@Target(FIELD)
+@Retention(RUNTIME)
+@Documented
+public @interface Member {
+  String name() default "";
+}


[03/50] [abbrv] incubator-geode git commit: GEODE-1981: Wrapping user ResultCollector in synchronized wrapper

Posted by kl...@apache.org.
GEODE-1981: Wrapping user ResultCollector in synchronized wrapper

When executing a function from a client, we can be adding results to the
result collector from multiple threads. Our docs claim the user should
not have to synchronize their result collector. One code path was already
synchronizing on the collector when adding results. However, if the
function returned an exception we were not synchronizing.

Adding a SynchronizedResultCollector and wrapping the users collector in
that to ensure that there will be no unsynchronized access of the result
collector.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f2c3ca48
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f2c3ca48
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f2c3ca48

Branch: refs/heads/feature/GEODE-1930
Commit: f2c3ca489cc0826a96c30c4ffa9464cab8402b94
Parents: c2ddc96
Author: Dan Smith <up...@apache.org>
Authored: Wed Oct 12 13:54:28 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 13 11:13:18 2016 -0700

----------------------------------------------------------------------
 .../client/internal/ExecuteFunctionOp.java      |  4 +-
 .../ExecuteRegionFunctionSingleHopOp.java       |  2 -
 .../cache/execute/ServerFunctionExecutor.java   |  5 +-
 .../execute/ServerRegionFunctionExecutor.java   |  3 +-
 .../util/SynchronizedResultCollector.java       | 57 ++++++++++++++++++++
 5 files changed, 63 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2c3ca48/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteFunctionOp.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteFunctionOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteFunctionOp.java
index 6597b68..55b0fb0 100755
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteFunctionOp.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteFunctionOp.java
@@ -569,9 +569,7 @@ public class ExecuteFunctionOp {
               else {
                 DistributedMember memberID = (DistributedMember)((ArrayList)resultResponse)
                     .get(1);
-                synchronized (resultCollector) {
-                  resultCollector.addResult(memberID, result);                    
-                }
+                resultCollector.addResult(memberID, result);
                 FunctionStats.getFunctionStats(this.functionId)
                     .incResultsReceived();
               }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2c3ca48/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java
index 51ea8e4..f94c598 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java
@@ -396,10 +396,8 @@ public class ExecuteRegionFunctionSingleHopOp {
               else {
                 DistributedMember memberID = (DistributedMember)((ArrayList)resultResponse)
                     .get(1);
-                synchronized (this.resultCollector) {
                   this.resultCollector
                       .addResult(memberID, result);
-                }
                 FunctionStats.getFunctionStats(this.functionId,
                     this.executor.getRegion().getSystem()).incResultsReceived();
               }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2c3ca48/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java
index 4295898..1db6e86 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java
@@ -33,6 +33,7 @@ import org.apache.geode.cache.execute.FunctionException;
 import org.apache.geode.cache.execute.FunctionService;
 import org.apache.geode.cache.execute.ResultCollector;
 import org.apache.geode.internal.cache.TXManagerImpl;
+import org.apache.geode.internal.cache.execute.util.SynchronizedResultCollector;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 /**
  * 
@@ -73,9 +74,9 @@ public class ServerFunctionExecutor extends AbstractExecution {
     this.args = args;
   }
   
-  private ServerFunctionExecutor(ServerFunctionExecutor sfe, ResultCollector rs) {
+  private ServerFunctionExecutor(ServerFunctionExecutor sfe, ResultCollector collector) {
     this(sfe);
-    this.rc = rs;
+    this.rc = collector != null ? new SynchronizedResultCollector(collector): collector;
   }
   
   private ServerFunctionExecutor(ServerFunctionExecutor sfe, MemberMappedArgument argument) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2c3ca48/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java
index b5bc684..5669ad1 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java
@@ -30,6 +30,7 @@ import org.apache.geode.cache.execute.ResultCollector;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.LocalRegion;
 import org.apache.geode.internal.cache.TXStateProxyImpl;
+import org.apache.geode.internal.cache.execute.util.SynchronizedResultCollector;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.LogService;
 
@@ -103,7 +104,7 @@ public class ServerRegionFunctionExecutor extends AbstractExecution {
     this.region = serverRegionFunctionExecutor.region;
     this.filter.clear();
     this.filter.addAll(serverRegionFunctionExecutor.filter);
-    this.rc = rc;
+    this.rc = rc != null ? new SynchronizedResultCollector(rc) : null;
     this.executeOnBucketSet = serverRegionFunctionExecutor.executeOnBucketSet;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2c3ca48/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/SynchronizedResultCollector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/SynchronizedResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/SynchronizedResultCollector.java
new file mode 100644
index 0000000..9e09679
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/util/SynchronizedResultCollector.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.execute.util;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.distributed.DistributedMember;
+
+public class SynchronizedResultCollector<T,S> implements ResultCollector<T, S> {
+
+  public final ResultCollector<T,S> collector;
+
+  public SynchronizedResultCollector(final ResultCollector collector) {
+    this.collector = collector;
+  }
+
+  @Override
+  public synchronized S getResult() throws FunctionException {
+    return collector.getResult();
+  }
+
+  @Override
+  public synchronized S getResult(final long timeout, final TimeUnit unit) throws FunctionException, InterruptedException {
+    return collector.getResult(timeout, unit);
+  }
+
+  @Override
+  public synchronized void addResult(final DistributedMember memberID, final T resultOfSingleExecution) {
+    collector.addResult(memberID, resultOfSingleExecution);
+  }
+
+  @Override
+  public synchronized void endResults() {
+    collector.endResults();
+  }
+
+  @Override
+  public synchronized void clearResults() {
+    collector.clearResults();
+  }
+}


[24/50] [abbrv] incubator-geode git commit: GEODE-2009: add FlakyTest category to testCreateAlterDestroyUpdatesSharedConfig

Posted by kl...@apache.org.
GEODE-2009: add FlakyTest category to testCreateAlterDestroyUpdatesSharedConfig


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7330733e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7330733e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7330733e

Branch: refs/heads/feature/GEODE-1930
Commit: 7330733e31ea9fde5452ae983bb99d255f4ed2fc
Parents: 474ff41
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Oct 17 12:18:35 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Oct 17 16:30:19 2016 -0700

----------------------------------------------------------------------
 .../cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java     | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7330733e/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
index 5fa06d9..effe294 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
@@ -785,6 +785,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
   /**
    * Asserts that creating, altering and destroying regions correctly updates the shared configuration.
    */
+  @Category(FlakyTest.class) // GEODE-2009
   @Test
   public void testCreateAlterDestroyUpdatesSharedConfig() {
     disconnectAllFromDS();


[46/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/OffHeapManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/OffHeapManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/OffHeapManagementDUnitTest.java
index 9fb9ab0..c02a220 100644
--- a/geode-core/src/test/java/org/apache/geode/management/OffHeapManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/OffHeapManagementDUnitTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.geode.management;
 
+import static com.jayway.awaitility.Awaitility.*;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
@@ -23,8 +24,11 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
 import javax.management.Attribute;
 import javax.management.AttributeList;
+import javax.management.JMException;
 import javax.management.MBeanServer;
 import javax.management.Notification;
 import javax.management.NotificationListener;
@@ -38,102 +42,99 @@ import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.Region;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.offheap.OffHeapMemoryStats;
 import org.apache.geode.internal.offheap.OffHeapStorage;
 import org.apache.geode.internal.offheap.OffHeapStoredObject;
 import org.apache.geode.management.internal.MBeanJMXAdapter;
 import org.apache.geode.management.internal.beans.MemberMBean;
 import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
  * Tests the off-heap additions to the RegionMXBean and MemberMXBean JMX interfaces.
  */
-@SuppressWarnings("serial")
 @Category(DistributedTest.class)
+@SuppressWarnings("serial")
 public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
+
   /**
    * Specified assertion operations.
    */
-  private static enum ASSERT_OP {
+  private enum ASSERT_OP {
     EQUAL,
     GREATER_THAN,
     GREATER_THAN_OR_EQUAL,
     LESS_THAN
   }
-  
+
   /**
    * Name of off-heap test region.
    */
   private static final String OFF_HEAP_REGION_NAME = "offHeapRegion";
-  
+
   /**
    * Path of off-heap test region.
    */
   private static final String OFF_HEAP_REGION_PATH = "/" + OFF_HEAP_REGION_NAME;
-  
+
   /**
    * Expected total off-heap reserved memory (1 megabyte).
    */
   private static final long TOTAL_MEMORY = 1048576;
-  
+
   /**
    * Half of expected memory total.
    */
   private static final int HALF_TOTAL_MEMORY = (int) (TOTAL_MEMORY / 2);
-  
+
   /**
    * An arbitrary array size.
    */
   private static final int ALLOCATION_SIZE = 100000;
-  
+
   /**
    * A non-arbitrary array size.
    */
   private static final int NEW_ALLOCATION_SIZE = 400000;
-  
+
   /**
    * Java object serialization overhead.
    */
   private static final int OBJECT_OVERHEAD = 8;
-  
+
   /**
    * A region entry key.
    */
   private static final String KEY = "key";
-  
+
   /**
    * Another region entry key.
    */
   private static final String KEY2 = "key2";
-  
+
   /**
    * Yet another region entry key.
    */
   private static final String KEY3 = "key3";
-  
+
   /**
    * A region entry value.
    */
-  private static final byte[] VALUE = "Proin lobortis enim vel sem congue ut condimentum leo rhoncus. In turpis lorem, rhoncus nec rutrum vel, sodales vitae lacus. Etiam nunc ligula, scelerisque id egestas vitae, gravida non enim. Donec ac ligula purus. Mauris gravida ligula sit amet mi ornare blandit. Aliquam at velit ac enim varius malesuada ut eu tortor. Quisque diam nisi, fermentum vel accumsan at, commodo et velit.".getBytes();
-  
+  private static final byte[] VALUE = "Proin lobortis enim vel sem congue ut condimentum leo rhoncus. In turpis lorem, rhoncus nec rutrum vel, sodales vitae lacus. Etiam nunc ligula, scelerisque id egestas vitae, gravida non enim. Donec ac ligula purus. Mauris gravida ligula sit amet mi ornare blandit. Aliquam at velit ac enim varius malesuada ut eu tortor. Quisque diam nisi, fermentum vel accumsan at, commodo et velit."
+    .getBytes();
+
   /**
    * The expected size of the region entry value in off-heap memory.
    */
   private static final int OBJECT_SIZE = VALUE.length + OBJECT_OVERHEAD;
-  
+
   /**
    * Listens for off-heap JMX notifications.
    */
   private static final OffHeapNotificationListener notificationListener = new OffHeapNotificationListener();
-  
+
   /**
    * Local MBeanServer.
    */
@@ -141,42 +142,35 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
 
   @Override
   public final void postSetUp() throws Exception {
-    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        System.setProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY, "true");
-      }
+    Host.getHost(0).getVM(0).invoke(() -> {
+      System.setProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY, "true");
     });
   }
 
   @Override
   public final void preTearDownCacheTestCase() throws Exception {
-    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        System.clearProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY);
-      }
+    Host.getHost(0).getVM(0).invoke(() -> {
+      System.clearProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY);
     });
   }
-  
+
   /**
    * Tests off-heap additions to the RegionMXBean and MemberMXBean interfaces.
-   * @throws Exception
    */
   @Test
   public void testOffHeapMBeanAttributesAndStats() throws Exception {
-    final VM vm = Host.getHost(0).getVM(0);    
+    final VM vm = Host.getHost(0).getVM(0);
 
     try {
       // Setup off-heap memory for cache
       setSystemPropertiesOnVm(vm, true, getSystemProperties());
-      
+
       // Create our off-heap region
-      assertNotNull(createOffHeapRegionOnVm(vm,OFF_HEAP_REGION_NAME,DataPolicy.REPLICATE));
-      
+      assertNotNull(createOffHeapRegionOnVm(vm, OFF_HEAP_REGION_NAME, DataPolicy.REPLICATE));
+
       // Make sure our off-heap region has off-heap enabled.
       assertOffHeapRegionAttributesOnVm(vm);
-      
+
       // Make sure our starting off heap stats are correct
       assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);
   
@@ -187,19 +181,19 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
       {
         doPutOnVm(vm, KEY, VALUE, OFF_HEAP_REGION_NAME, false);
         assertOffHeapMetricsOnVm(vm, (TOTAL_MEMORY - OBJECT_SIZE), OBJECT_SIZE, 1, 0);
-    
+
         doPutOnVm(vm, KEY2, VALUE, OFF_HEAP_REGION_NAME, false);
         assertOffHeapMetricsOnVm(vm, (TOTAL_MEMORY - (2 * OBJECT_SIZE)), (2 * OBJECT_SIZE), 2, 0);
-    
+
         doPutOnVm(vm, KEY3, VALUE, OFF_HEAP_REGION_NAME, false);
         assertOffHeapMetricsOnVm(vm, (TOTAL_MEMORY - (3 * OBJECT_SIZE)), (3 * OBJECT_SIZE), 3, 0);
-    
+
         doDestroyOnVm(vm, KEY3, OFF_HEAP_REGION_NAME);
         assertOffHeapMetricsOnVm(vm, (TOTAL_MEMORY - (2 * OBJECT_SIZE)), (2 * OBJECT_SIZE), 2, 0);
-    
+
         doDestroyOnVm(vm, KEY2, OFF_HEAP_REGION_NAME);
         assertOffHeapMetricsOnVm(vm, (TOTAL_MEMORY - OBJECT_SIZE), OBJECT_SIZE, 1, 0);
-    
+
         doDestroyOnVm(vm, KEY, OFF_HEAP_REGION_NAME);
         assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);
       }
@@ -207,62 +201,61 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
       doCleanupOnVm(vm);
     }
   }
-  
+
   /**
    * Tests the fragmentation statistic for off-heap memory.
-   * @throws Exception
    */
   @Test
   public void testFragmentationStat() throws Exception {
-    final VM vm = Host.getHost(0).getVM(0);    
+    final VM vm = Host.getHost(0).getVM(0);
 
     try {
       // Setup off-heap memory for cache
       setSystemPropertiesOnVm(vm, true, getSystemProperties());
-      
+
       // Create our off-heap region
-      assertNotNull(createOffHeapRegionOnVm(vm,OFF_HEAP_REGION_NAME,DataPolicy.REPLICATE));
-      vm.invoke(new SerializableRunnable() {      
+      assertNotNull(createOffHeapRegionOnVm(vm, OFF_HEAP_REGION_NAME, DataPolicy.REPLICATE));
+      vm.invoke(new SerializableRunnable() {
         @Override
         public void run() {
           Region region = getCache().getRegion(OFF_HEAP_REGION_NAME);
-          assertNotNull(region);    
+          assertNotNull(region);
         }
       });
-      
+
       // Make sure our off-heap region has off-heap enabled.
       assertOffHeapRegionAttributesOnVm(vm);
-      
+
       // Make sure our starting off heap stats are correct
       assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);
-      
+
       // After allocating large chunk (equal to total memory) 
       // we should still have no fragmentation
       int largeChunk = (int) TOTAL_MEMORY - OffHeapStoredObject.HEADER_SIZE;
       doPutOnVm(vm, KEY, new byte[largeChunk], OFF_HEAP_REGION_NAME, false);
       // No compaction has run, so fragmentation should be zero
-      assertFragmentationStatOnVm(vm,0,ASSERT_OP.EQUAL);
-      
+      assertFragmentationStatOnVm(vm, 0, ASSERT_OP.EQUAL);
+
       // Allocate more memory to trigger compaction
       doPutOnVm(vm, KEY, new byte[ALLOCATION_SIZE], OFF_HEAP_REGION_NAME, true);
       // When total memory is used no fragmentation
-      assertFragmentationStatOnVm(vm,0,ASSERT_OP.EQUAL);
-      
+      assertFragmentationStatOnVm(vm, 0, ASSERT_OP.EQUAL);
+
       // After freeing all memory we should have no fragmentation
       doDestroyOnVm(vm, KEY, OFF_HEAP_REGION_NAME);
-      assertFragmentationStatOnVm(vm,0,ASSERT_OP.EQUAL);
-      
+      assertFragmentationStatOnVm(vm, 0, ASSERT_OP.EQUAL);
+
       // Allocate HALF_TOTAL_MEMORY twice and release one to create one fragment
       int halfChunk = HALF_TOTAL_MEMORY - OffHeapStoredObject.HEADER_SIZE;
       doPutOnVm(vm, KEY + "0", new byte[halfChunk], OFF_HEAP_REGION_NAME, false);
       doPutOnVm(vm, KEY + "1", new byte[halfChunk], OFF_HEAP_REGION_NAME, false);
       doDestroyOnVm(vm, KEY + "0", OFF_HEAP_REGION_NAME);
-      
+
       // Allocate largeChunk to trigger compaction and fragmentation should be zero 
       // as all free memory is available as one fragment
       doPutOnVm(vm, KEY + "1", new byte[largeChunk], OFF_HEAP_REGION_NAME, true);
-      assertFragmentationStatOnVm(vm,0,ASSERT_OP.EQUAL);
-      
+      assertFragmentationStatOnVm(vm, 0, ASSERT_OP.EQUAL);
+
       // Consume the available fragment as below
       // [16][262120][16][262120][16] = [524288] (HALF_TOTAL_MEMORY)
       int smallChunk = OffHeapStoredObject.MIN_CHUNK_SIZE - OffHeapStoredObject.HEADER_SIZE;
@@ -272,7 +265,7 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
       doPutOnVm(vm, KEY + "S2", new byte[smallChunk], OFF_HEAP_REGION_NAME, false);
       doPutOnVm(vm, KEY + "M2", new byte[mediumChunk], OFF_HEAP_REGION_NAME, false);
       doPutOnVm(vm, KEY + "S3", new byte[smallChunk], OFF_HEAP_REGION_NAME, false);
-      
+
       // free small chunks to create gaps
       doDestroyOnVm(vm, KEY + "S1", OFF_HEAP_REGION_NAME);
       doDestroyOnVm(vm, KEY + "S2", OFF_HEAP_REGION_NAME);
@@ -285,53 +278,52 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
        * Setup a fragmentation attribute monitor
        */
       {
-        setupOffHeapMonitorOnVm(vm,"OffHeapFragmentation",0,0);      
+        setupOffHeapMonitorOnVm(vm, "OffHeapFragmentation", 0, 0);
         clearNotificationListenerOnVm(vm);
       }
 
       // Make sure we have some fragmentation
       assertFragmentationStatOnVm(vm, 100, ASSERT_OP.EQUAL);
-      
+
       // Make sure our fragmentation monitor was triggered
       waitForNotificationListenerOnVm(vm, 5000, 500, true);
     } finally {
       doCleanupOnVm(vm);
-    }      
+    }
   }
 
   /**
-   * Tests the compation time statistic for off-heap memory.
-   * @throws Exception
+   * Tests the compaction time statistic for off-heap memory.
    */
   @Test
   public void testCompactionTimeStat() throws Exception {
-    final VM vm = Host.getHost(0).getVM(0);    
+    final VM vm = Host.getHost(0).getVM(0);
 
     try {
       // Setup off-heap memory for cache
       setSystemPropertiesOnVm(vm, true, getSystemProperties());
-      
+
       // Create our off-heap region
-      assertNotNull(createOffHeapRegionOnVm(vm,OFF_HEAP_REGION_NAME,DataPolicy.REPLICATE));
-      
+      assertNotNull(createOffHeapRegionOnVm(vm, OFF_HEAP_REGION_NAME, DataPolicy.REPLICATE));
+
       // Make sure our off-heap region has off-heap enabled.
       assertOffHeapRegionAttributesOnVm(vm);
-      
+
       // Make sure our starting off heap stats are correct
       assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);
-      
+
       // After allocating large chunck we should still have no compaction time
       doPutOnVm(vm, KEY, new byte[HALF_TOTAL_MEMORY], OFF_HEAP_REGION_NAME, false);
-      assertCompactionTimeStatOnVm(vm,0,ASSERT_OP.EQUAL);
-      
+      assertCompactionTimeStatOnVm(vm, 0, ASSERT_OP.EQUAL);
+
       // After freeing all memory we should have no compaction time
       doDestroyOnVm(vm, KEY, OFF_HEAP_REGION_NAME);
-      assertCompactionTimeStatOnVm(vm,0,ASSERT_OP.EQUAL);
-      
+      assertCompactionTimeStatOnVm(vm, 0, ASSERT_OP.EQUAL);
+
       // Consume all off-heap memory using an allocation size
-      int numAllocations = doConsumeOffHeapMemoryOnVm(vm,ALLOCATION_SIZE);
+      int numAllocations = doConsumeOffHeapMemoryOnVm(vm, ALLOCATION_SIZE);
       assertTrue(numAllocations > 0);
-      
+
       // Randomly free 3 allocations to produce off-heap gaps
       doFreeOffHeapMemoryOnVm(vm, numAllocations, 3);
 
@@ -339,13 +331,13 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
        * Setup a compaction time attribute monitor
        */
       {
-        setupOffHeapMonitorOnVm(vm,"OffHeapCompactionTime",0,0);      
+        setupOffHeapMonitorOnVm(vm, "OffHeapCompactionTime", 0, 0);
         clearNotificationListenerOnVm(vm);
       }
-      
+
       // Allocate enough memory to force compaction which will update compaction time stat
-      doPutOnVm(vm,KEY, new byte[NEW_ALLOCATION_SIZE], OFF_HEAP_REGION_NAME, true);
-      
+      doPutOnVm(vm, KEY, new byte[NEW_ALLOCATION_SIZE], OFF_HEAP_REGION_NAME, true);
+
       // Make sure our compaction time monitor was triggered
       waitForNotificationListenerOnVm(vm, 5000, 500, true);
 
@@ -353,29 +345,29 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
        * Make sure we have some compaction time.  In some environments the 
        * compaction time is reported as 0 due to time sample granularity and compaction speed. 
        */
-      assertCompactionTimeStatOnVm(vm, 0, ASSERT_OP.GREATER_THAN_OR_EQUAL);      
+      assertCompactionTimeStatOnVm(vm, 0, ASSERT_OP.GREATER_THAN_OR_EQUAL);
     } finally {
       doCleanupOnVm(vm);
-    }      
+    }
   }
 
   /**
-   *  Asserts that a monitor assigned to the OffHeapObjects attribute is triggered.
+   * Asserts that a monitor assigned to the OffHeapObjects attribute is triggered.
    */
   @Test
-  public void testOffHeapObjectsMonitoring()  throws Exception {
-    final VM vm = Host.getHost(0).getVM(0);    
+  public void testOffHeapObjectsMonitoring() throws Exception {
+    final VM vm = Host.getHost(0).getVM(0);
 
     try {
       // Setup off-heap memory for cache
       setSystemPropertiesOnVm(vm, true, getSystemProperties());
-      
+
       // Create our off-heap region
-      assertNotNull(createOffHeapRegionOnVm(vm,OFF_HEAP_REGION_NAME,DataPolicy.REPLICATE));
-      
+      assertNotNull(createOffHeapRegionOnVm(vm, OFF_HEAP_REGION_NAME, DataPolicy.REPLICATE));
+
       // Make sure our off-heap region has off-heap enabled.
       assertOffHeapRegionAttributesOnVm(vm);
-      
+
       // Make sure our starting off heap stats are correct
       assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);                
 
@@ -383,37 +375,37 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
        * Tests off-heap objects notification
        */
       {
-        setupOffHeapMonitorOnVm(vm,"OffHeapObjects",0,-1);
-        
+        setupOffHeapMonitorOnVm(vm, "OffHeapObjects", 0, -1);
+
         clearNotificationListenerOnVm(vm);
-        
+
         doPutOnVm(vm, KEY, VALUE, OFF_HEAP_REGION_NAME, false);
-        
+
         waitForNotificationListenerOnVm(vm, 5000, 500, true);
       }
-      
+
     } finally {
       doCleanupOnVm(vm);
-    }      
+    }
   }
-  
+
   /**
    * Asserts that a monitor assigned to the OffHeapFreeSize attribute is triggered.
    */
   @Test
-  public void testOffHeapFreeSizeMonitoring()  throws Exception {
-    final VM vm = Host.getHost(0).getVM(0);    
+  public void testOffHeapFreeSizeMonitoring() throws Exception {
+    final VM vm = Host.getHost(0).getVM(0);
 
     try {
       // Setup off-heap memory for cache
       setSystemPropertiesOnVm(vm, true, getSystemProperties());
-      
+
       // Create our off-heap region
-      assertNotNull(createOffHeapRegionOnVm(vm,OFF_HEAP_REGION_NAME,DataPolicy.REPLICATE));
-      
+      assertNotNull(createOffHeapRegionOnVm(vm, OFF_HEAP_REGION_NAME, DataPolicy.REPLICATE));
+
       // Make sure our off-heap region has off-heap enabled.
       assertOffHeapRegionAttributesOnVm(vm);
-      
+
       // Make sure our starting off heap stats are correct
       assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);                
 
@@ -421,37 +413,37 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
        * Tests off-heap objects notification
        */
       {
-        setupOffHeapMonitorOnVm(vm,"OffHeapFreeSize",TOTAL_MEMORY,TOTAL_MEMORY);
-        
+        setupOffHeapMonitorOnVm(vm, "OffHeapFreeSize", TOTAL_MEMORY, TOTAL_MEMORY);
+
         clearNotificationListenerOnVm(vm);
-        
+
         doPutOnVm(vm, KEY, VALUE, OFF_HEAP_REGION_NAME, false);
-        
+
         waitForNotificationListenerOnVm(vm, 5000, 500, true);
       }
-      
+
     } finally {
       doCleanupOnVm(vm);
-    }      
+    }
   }
 
   /**
    * Asserts that a monitor assigned to the OffHeapAllocatedSize attribute is triggered.
    */
   @Test
-  public void testOffHeapAllocatedSizeMonitoring()  throws Exception {
-    final VM vm = Host.getHost(0).getVM(0);    
+  public void testOffHeapAllocatedSizeMonitoring() throws Exception {
+    final VM vm = Host.getHost(0).getVM(0);
 
     try {
       // Setup off-heap memory for cache
       setSystemPropertiesOnVm(vm, true, getSystemProperties());
-      
+
       // Create our off-heap region
-      assertNotNull(createOffHeapRegionOnVm(vm,OFF_HEAP_REGION_NAME,DataPolicy.REPLICATE));
-      
+      assertNotNull(createOffHeapRegionOnVm(vm, OFF_HEAP_REGION_NAME, DataPolicy.REPLICATE));
+
       // Make sure our off-heap region has off-heap enabled.
       assertOffHeapRegionAttributesOnVm(vm);
-      
+
       // Make sure our starting off heap stats are correct
       assertOffHeapMetricsOnVm(vm, TOTAL_MEMORY, 0, 0, 0);                
 
@@ -459,188 +451,182 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
        * Tests off-heap objects notification
        */
       {
-        setupOffHeapMonitorOnVm(vm,"OffHeapAllocatedSize",0,OBJECT_SIZE);
-        
+        setupOffHeapMonitorOnVm(vm, "OffHeapAllocatedSize", 0, OBJECT_SIZE);
+
         clearNotificationListenerOnVm(vm);
-        
+
         doPutOnVm(vm, KEY, VALUE, OFF_HEAP_REGION_NAME, false);
-        
+
         waitForNotificationListenerOnVm(vm, 5000, 500, true);
       }
-      
+
     } finally {
       doCleanupOnVm(vm);
-    }      
+    }
   }
 
   /**
    * Destroys a number of entries previously allocated.
+   *
    * @param vm a virtual machine
    * @param numAllocations the number of previous off-heap allocations
    * @param numDestroys the number of destroys to perform
    */
-  protected void doFreeOffHeapMemoryOnVm(VM vm,final int numAllocations,final int numDestroys) {
-    vm.invoke(new SerializableRunnable() {      
+  private void doFreeOffHeapMemoryOnVm(final VM vm, final int numAllocations, final int numDestroys) {
+    vm.invoke(new SerializableRunnable() {
       @Override
       public void run() {
-        doFreeOffHeapMemory(numAllocations,numDestroys);
+        doFreeOffHeapMemory(numAllocations, numDestroys);
       }
     });
   }
-  
+
   /**
    * Performs some destroys to free off-heap allocations.
+   *
    * @param numAllocations the number of previous off-heap allocations
    * @param numDestroys the number of destroys to perform
    */
-  protected void doFreeOffHeapMemory(int numAllocations,int numDestroys) {
+  private void doFreeOffHeapMemory(final int numAllocations, final int numDestroys) {
     assertTrue(numDestroys <= numAllocations);
-    
+
     Region region = getCache().getRegion(OFF_HEAP_REGION_NAME);
-    assertNotNull(region);    
-    assertTrue(numDestroys <= region.size());    
-    
+    assertNotNull(region);
+    assertTrue(numDestroys <= region.size());
+
     String key = "KEY0";
     Object value = key;
     int destroyed = 0;
 
-    while(destroyed < numDestroys) {
+    while (destroyed < numDestroys) {
       key = "KEY" + ((int) (Math.random() * numAllocations));
       value = region.get(key);
-      
-      if(null != value) {
+
+      if (null != value) {
         region.destroy(key);
         ++destroyed;
       }
     }
   }
-  
+
   /**
    * Consumes off off-heap memory until the allocation size cannot be satisfied.
+   *
    * @param vm a virtual machine
    * @param allocationSize the number of bytes for each allocation
+   *
    * @return the number of successful puts
    */
-  protected int doConsumeOffHeapMemoryOnVm(VM vm,final int allocationSize) {
-    return (Integer) vm.invoke(new SerializableCallable() {
-      @Override
-      public Object call() {
-        return doConsumeOffHeapMemory(allocationSize);
-      }      
-    });
+  private int doConsumeOffHeapMemoryOnVm(final VM vm, final int allocationSize) {
+    return vm.invoke(() -> doConsumeOffHeapMemory(allocationSize));
   }
-  
+
   /**
    * Consumes off off-heap memory until the allocation size cannot be satisfied.
+   *
    * @param allocationSize the number of bytes for each allocation
+   *
    * @return the number of successful puts
    */
-  protected int doConsumeOffHeapMemory(int allocationSize) { // TODO:KIRK: change this to handle new OutOfOffHeapMemoryException
-    OffHeapMemoryStats stats = ((GemFireCacheImpl) getCache()).getOffHeapStore().getStats();
+  private int doConsumeOffHeapMemory(final int allocationSize) {
     int i = 0;
 
     // Loop until we fail
     try {
-      while(true) {
-        doPut("KEY" + (i++),new byte[allocationSize],OFF_HEAP_REGION_NAME, false);      
+      while (true) { // TODO: put a time limit on this just in case
+        doPut("KEY" + (i++), new byte[allocationSize], OFF_HEAP_REGION_NAME, false);
       }
     } catch (OutOfOffHeapMemoryException e) {
     }
-    
+
     return i;
   }
-    
+
   /**
    * Asserts that the compactionTime stat is available and satisfies an assert operation.
+   *
    * @param vm a virtual machine.
    * @param compactionTime total off heap compaction time.
    * @param op an assert operation.
    */
-  protected void assertCompactionTimeStatOnVm(VM vm,final long compactionTime,final ASSERT_OP op) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        assertCompactionTimeStat(compactionTime, op);
-      }      
-    });
-  }  
-  
+  private void assertCompactionTimeStatOnVm(final VM vm, final long compactionTime, final ASSERT_OP op) {
+    vm.invoke(() -> assertCompactionTimeStat(compactionTime, op));
+  }
+
   /**
    * Asserts that the compactionTime stat is available and satisfies an assert operation.
+   *
    * @param compactionTime total off heap compaction time.
    * @param op an assert operation.
    */
-  protected void assertCompactionTimeStat(long compactionTime,ASSERT_OP op) {
-    ManagementService service = ManagementService.getExistingManagementService(getCache());    
+  private void assertCompactionTimeStat(final long compactionTime, final ASSERT_OP op) {
+    ManagementService service = ManagementService.getExistingManagementService(getCache());
     assertNotNull(service);
-    
+
     assertTrue(service.isManager());
 
-    MemberMXBean memberBean = service.getMemberMXBean();   
+    MemberMXBean memberBean = service.getMemberMXBean();
     assertNotNull(memberBean);
-    
-    switch(op) {
-    case EQUAL:
-      assertEquals(compactionTime,memberBean.getOffHeapCompactionTime());
-      break;
-    case GREATER_THAN:
-      assertTrue(compactionTime < memberBean.getOffHeapCompactionTime());
-      break;
-    case GREATER_THAN_OR_EQUAL:
-      assertTrue(compactionTime <= memberBean.getOffHeapCompactionTime());
-      break;
-    case LESS_THAN:
-      assertTrue(compactionTime > memberBean.getOffHeapCompactionTime());
-      break;
+
+    switch (op) {
+      case EQUAL:
+        assertEquals(compactionTime, memberBean.getOffHeapCompactionTime());
+        break;
+      case GREATER_THAN:
+        assertTrue(compactionTime < memberBean.getOffHeapCompactionTime());
+        break;
+      case GREATER_THAN_OR_EQUAL:
+        assertTrue(compactionTime <= memberBean.getOffHeapCompactionTime());
+        break;
+      case LESS_THAN:
+        assertTrue(compactionTime > memberBean.getOffHeapCompactionTime());
+        break;
     }
   }
-  
+
   /**
    * Asserts that the fragmentation stat is available and satisfies an assert operation.
+   *
    * @param vm a virtual machine
    * @param fragmentation a fragmentation percentage
    * @param op an assertion operation
    */
-  protected void assertFragmentationStatOnVm(VM vm,final int fragmentation, final ASSERT_OP op) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        assertFragmentationStat(fragmentation,op);
-      }      
-    });
+  private void assertFragmentationStatOnVm(final VM vm, final int fragmentation, final ASSERT_OP op) {
+    vm.invoke(() -> assertFragmentationStat(fragmentation, op));
   }
-  
+
   /**
    * Asserts that the fragmentation stat is available and satisfies an assert operation.
+   *
    * @param fragmentation a fragmentation percentage
    * @param op an assertion operation
    */
-  protected void assertFragmentationStat(int fragmentation,ASSERT_OP op) {
-    ManagementService service = ManagementService.getExistingManagementService(getCache());    
+  private void assertFragmentationStat(final int fragmentation, final ASSERT_OP op) {
+    ManagementService service = ManagementService.getExistingManagementService(getCache());
     assertNotNull(service);
-    
+
     assertTrue(service.isManager());
 
-    MemberMXBean memberBean = service.getMemberMXBean();   
+    MemberMXBean memberBean = service.getMemberMXBean();
     assertNotNull(memberBean);
-    
-    switch(op) {
-    case EQUAL:
-      assertEquals(fragmentation,memberBean.getOffHeapFragmentation());
-      break;
-    case GREATER_THAN:
-      assertTrue(fragmentation < memberBean.getOffHeapFragmentation());
-      break;
-    case LESS_THAN:
-      assertTrue(fragmentation > memberBean.getOffHeapFragmentation());
-      break;
+
+    switch (op) {
+      case EQUAL:
+        assertEquals(fragmentation, memberBean.getOffHeapFragmentation());
+        break;
+      case GREATER_THAN:
+        assertTrue(fragmentation < memberBean.getOffHeapFragmentation());
+        break;
+      case LESS_THAN:
+        assertTrue(fragmentation > memberBean.getOffHeapFragmentation());
+        break;
     }
   }
-  
+
   /**
    * Returns off-heap system properties for enabling off-heap and the JMX system.
    */
-  protected Properties getSystemProperties() {
+  private Properties getSystemProperties() {
     Properties props = getDistributedSystemProperties();
 
     props.setProperty(OFF_HEAP_MEMORY_SIZE, "1m");
@@ -653,15 +639,11 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
 
   /**
    * Removes off heap region on vm and disconnects.
-   * @param vm a virutal machine.
+   *
+   * @param vm a virtual machine.
    */
-  protected void doCleanupOnVm(VM vm) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        cleanup();
-      }      
-    });
+  private void doCleanupOnVm(final VM vm) {
+    vm.invoke(() -> cleanup());
   }
 
   /**
@@ -670,339 +652,310 @@ public class OffHeapManagementDUnitTest extends JUnit4CacheTestCase {
   protected void cleanup() {
     Cache existingCache = basicGetCache();
 
-    if(null != existingCache && !existingCache.isClosed()) {
+    if (null != existingCache && !existingCache.isClosed()) {
       Region region = getCache().getRegion(OFF_HEAP_REGION_NAME);
 
       if (null != region) {
         region.destroyRegion();
       }
     }
-    
+
     disconnectFromDS();
   }
-  
+
   /**
    * Asserts that the off heap region data is available and enabled for a VM.
    */
-  @SuppressWarnings("serial")
-  protected void assertOffHeapRegionAttributesOnVm(VM vm) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        assertOffHeapRegionAttributes();
-      }      
-    });
+  private void assertOffHeapRegionAttributesOnVm(final VM vm) {
+    vm.invoke(() -> assertOffHeapRegionAttributes());
   }
-  
+
   /**
    * Asserts that the off heap region data is available and enabled.
    */
-  protected void assertOffHeapRegionAttributes() {
-    ManagementService service = ManagementService.getExistingManagementService(getCache());    
+  private void assertOffHeapRegionAttributes() {
+    ManagementService service = ManagementService.getExistingManagementService(getCache());
     assertNotNull(service);
-    
+
     assertTrue(service.isManager());
-    
+
     RegionMXBean regionBean = service.getLocalRegionMBean(OFF_HEAP_REGION_PATH);
     assertNotNull(regionBean);
-    
+
     RegionAttributesData regionData = regionBean.listRegionAttributes();
     assertNotNull(regionData);
-    
+
     assertTrue(regionData.getOffHeap());
   }
-  
+
   /**
    * Asserts that OffHeapMetrics match input parameters for a VM.
+   *
    * @param vm a virtual machine.
    * @param freeMemory total off-heap free memory in bytes.
    * @param allocatedMemory allocated (or used) off-heap memory in bytes.
    * @param objects number of objects stored in off-heap memory.
    * @param fragmentation the fragmentation percentage.
    */
-  protected void assertOffHeapMetricsOnVm(VM vm,final long freeMemory,final long allocatedMemory,final long objects,final int fragmentation) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        assertOffHeapMetrics(freeMemory, allocatedMemory, objects, fragmentation);
-      }      
-    });
+  private void assertOffHeapMetricsOnVm(final VM vm,
+                                        final long freeMemory,
+                                        final long allocatedMemory,
+                                        final long objects,
+                                        final int fragmentation) {
+    vm.invoke(() -> assertOffHeapMetrics(freeMemory, allocatedMemory, objects, fragmentation));
   }
-  
+
   /**
    * Asserts that OffHeapMetrics match input parameters.
+   *
    * @param freeMemory total off-heap free memory in bytes.
    * @param allocatedMemory allocated (or used) off-heap memory in bytes.
    * @param objects number of objects stored in off-heap memory.
    * @param fragmentation the fragmentation percentage.
    */
-  protected void assertOffHeapMetrics(long freeMemory,long allocatedMemory,long objects, int fragmentation) {
-    ManagementService service = ManagementService.getExistingManagementService(getCache());    
+  private void assertOffHeapMetrics(final long freeMemory, final long allocatedMemory, final long objects, final int fragmentation) {
+    ManagementService service = ManagementService.getExistingManagementService(getCache());
     assertNotNull(service);
-    
+
     assertTrue(service.isManager());
 
-    MemberMXBean memberBean = service.getMemberMXBean();   
+    MemberMXBean memberBean = service.getMemberMXBean();
     assertNotNull(memberBean);
-    
-    assertEquals(freeMemory,memberBean.getOffHeapFreeMemory());
-    assertEquals(allocatedMemory,memberBean.getOffHeapUsedMemory());
-    assertEquals(objects,memberBean.getOffHeapObjects());
-    assertEquals(fragmentation,memberBean.getOffHeapFragmentation());
+
+    assertEquals(freeMemory, memberBean.getOffHeapFreeMemory());
+    assertEquals(allocatedMemory, memberBean.getOffHeapUsedMemory());
+    assertEquals(objects, memberBean.getOffHeapObjects());
+    assertEquals(fragmentation, memberBean.getOffHeapFragmentation());
   }
-  
+
   /**
    * Creates an off-heap region on a vm.
+   *
    * @param vm a virtual machine.
    * @param name a region name.
    * @param dataPolicy a data policy.
+   *
    * @return true if successful.
    */
-  protected boolean createOffHeapRegionOnVm(final VM vm,final String name,final DataPolicy dataPolicy) {
-    return (Boolean) vm.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        return (null != createOffHeapRegion(name, dataPolicy));
-      }      
-    });
+  private boolean createOffHeapRegionOnVm(final VM vm, final String name, final DataPolicy dataPolicy) {
+    return vm.invoke(() -> null != createOffHeapRegion(name, dataPolicy));
   }
-  
+
   /**
    * Creates an off-heap region.
+   *
    * @param name a region name.
    * @param dataPolicy a data policy.
+   *
    * @return the newly created region.
    */
-  protected Region createOffHeapRegion(String name,DataPolicy dataPolicy) {
+  private Region createOffHeapRegion(final String name, final DataPolicy dataPolicy) {
     return getCache().createRegionFactory().setOffHeap(true).setDataPolicy(dataPolicy).create(name);
   }
 
   /**
    * Sets the distributed system properties for a vm.
+   *
    * @param vm a virtual machine.
    * @param management starts the ManagementService when true.
    * @param props distributed system properties.
    */
-  @SuppressWarnings("serial")
-  protected void setSystemPropertiesOnVm(VM vm,final boolean management,final Properties props) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        setSystemProperties(management,props);
-      }      
-    });
+  private void setSystemPropertiesOnVm(final VM vm, final boolean management, final Properties props) {
+    vm.invoke(() -> setSystemProperties(management, props));
   }
 
   /**
    * Sets the distributed system properties.
+   *
    * @param management starts the ManagementService when true.
    * @param props distributed system properties.
    */
-  protected void setSystemProperties(boolean management,Properties props) {
+  private void setSystemProperties(final boolean management, final Properties props) {
     getSystem(props);
-    
-    if(management) {
+
+    if (management) {
       ManagementService service = ManagementService.getManagementService(getCache());
-      if(!service.isManager()) {
+      if (!service.isManager()) {
         service.startManager();
       }
     }
   }
-  
+
   /**
    * Performs a destroy operation on a vm.
+   *
    * @param vm a virtual machine.
    * @param key the region entry to destroy.
    * @param regionName a region name.
    */
-  protected void doDestroyOnVm(final VM vm,final Object key,final String regionName) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        doDestroy(key,regionName);
-      }      
-    });
+  private void doDestroyOnVm(final VM vm, final Object key, final String regionName) {
+    vm.invoke(() -> doDestroy(key, regionName));
   }
-  
+
   /**
    * Performs a destroy operation.
+   *
    * @param key the region entry to destroy.
    * @param regionName a region name.
    */
-  protected void doDestroy(Object key, String regionName) {
-    Region region =  getCache().getRegion(regionName);
+  private void doDestroy(final Object key, final String regionName) {
+    Region region = getCache().getRegion(regionName);
     assertNotNull(region);
-    
+
     region.destroy(key);
   }
-  
+
   /**
    * Performs a put operation on a vm.
+   *
    * @param vm a virtual machine.
    * @param key region entry key.
    * @param value region entry value.
    * @param regionName a region name.
    */
-  protected void doPutOnVm(final VM vm,final Object key,final Object value,final String regionName, final boolean expectException) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        doPut(key,value,regionName,expectException);
-      }      
-    });
+  private void doPutOnVm(final VM vm,
+                         final Object key,
+                         final Object value,
+                         final String regionName,
+                         final boolean expectException) {
+    vm.invoke(() -> doPut(key, value, regionName, expectException));
   }
-  
+
   /**
    * Performs a put operation.
+   *
    * @param key region entry key.
    * @param value region entry value.
    * @param regionName a region name.
    */
-  protected void doPut(Object key, Object value, String regionName,boolean expectException) {
-    Region region =  getCache().getRegion(regionName);
+  private void doPut(final Object key, final Object value, final String regionName, final boolean expectException) {
+    Region region = getCache().getRegion(regionName);
     assertNotNull(region);
-    
+
     try {
       region.put(key, value);
+      if (expectException) {
+        fail("Expected OutOfOffHeapMemoryException");
+      }
     } catch (OutOfOffHeapMemoryException e) {
-      if(!expectException)
+      if (!expectException) {
         throw e;
+      }
     }
   }
 
   /**
    * Adds an off-heap notification listener to the MemberMXBean for a vm.
+   *
    * @param vm a virtual machine.
    */
-  protected void addOffHeapNotificationListenerOnVm(VM vm) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        addOffHeapNotificationListener();
-      }      
-    });
+  private void addOffHeapNotificationListenerOnVm(final VM vm) {
+    vm.invoke(() -> addOffHeapNotificationListener());
   }
-  
+
   /**
    * Adds an off-heap notification listener to the MemberMXBean.
    */
-  protected void addOffHeapNotificationListener() {
-    ManagementService service = ManagementService.getExistingManagementService(getCache());    
+  private void addOffHeapNotificationListener() {
+    ManagementService service = ManagementService.getExistingManagementService(getCache());
     assertNotNull(service);
-    
+
     assertTrue(service.isManager());
 
-    MemberMXBean memberBean = service.getMemberMXBean();   
+    MemberMXBean memberBean = service.getMemberMXBean();
     assertNotNull(memberBean);
-    
+
     assertTrue(memberBean instanceof MemberMBean);
-    
-    ((MemberMBean) memberBean).addNotificationListener(notificationListener, null, null);    
+
+    ((MemberMBean) memberBean).addNotificationListener(notificationListener, null, null);
   }
-    
+
   /**
    * Creates and adds a generic GaugeMonitor for an attribute of the MemberMXBean on a VM.
+   *
    * @param vm a virtual machine.
    * @param attribute the attribute to monitor.
    * @param highThreshold the high threshold trigger.
    * @param lowThreshold the low threshold trigger.
    */
-  protected void setupOffHeapMonitorOnVm(VM vm,final String attribute,final long highThreshold,final long lowThreshold) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        setupOffHeapMonitor(attribute,highThreshold,lowThreshold);
-      }      
-    });
+  private void setupOffHeapMonitorOnVm(final VM vm,
+                                       final String attribute,
+                                       final long highThreshold,
+                                       final long lowThreshold) {
+    vm.invoke(() -> setupOffHeapMonitor(attribute, highThreshold, lowThreshold));
   }
-  
+
   /**
    * Creates and adds a generic GaugeMonitor for an attribute of the MemberMXBean.
+   *
    * @param attribute the attribute to monitor.
    * @param highThreshold the high threshold trigger.
    * @param lowThreshold the low threshold trigger.
    */
-  protected void setupOffHeapMonitor(String attribute,long highThreshold,long lowThreshold) {
+  private void setupOffHeapMonitor(final String attribute, final long highThreshold, final long lowThreshold) throws JMException {
     ObjectName memberMBeanObjectName = MBeanJMXAdapter.getMemberMBeanName(InternalDistributedSystem.getConnectedInstance().getDistributedMember());
     assertNotNull(memberMBeanObjectName);
-    
-    try {
-      ObjectName offHeapMonitorName = new ObjectName("monitors:type=Gauge,attr=" + attribute);      
-      mbeanServer.createMBean("javax.management.monitor.GaugeMonitor", offHeapMonitorName);
-      
-      AttributeList al = new AttributeList();
-      al.add(new Attribute("ObservedObject", memberMBeanObjectName));
-      al.add(new Attribute("GranularityPeriod", 500));
-      al.add(new Attribute("ObservedAttribute", attribute));
-      al.add(new Attribute("Notify", true));
-      al.add(new Attribute("NotifyHigh", true));
-      al.add(new Attribute("NotifyLow", true));
-      al.add(new Attribute("HighTheshold",highThreshold));
-      al.add(new Attribute("LowThreshold",lowThreshold));
-      
-      mbeanServer.setAttributes(offHeapMonitorName, al);      
-      mbeanServer.addNotificationListener(offHeapMonitorName, notificationListener, null, null);
-      mbeanServer.invoke(offHeapMonitorName, "start", new Object[]{}, new String[]{});
-    } catch (Exception e) {
-      fail(e.getMessage());
-    }        
+
+    ObjectName offHeapMonitorName = new ObjectName("monitors:type=Gauge,attr=" + attribute);
+    mbeanServer.createMBean("javax.management.monitor.GaugeMonitor", offHeapMonitorName);
+
+    AttributeList al = new AttributeList();
+    al.add(new Attribute("ObservedObject", memberMBeanObjectName));
+    al.add(new Attribute("GranularityPeriod", 500));
+    al.add(new Attribute("ObservedAttribute", attribute));
+    al.add(new Attribute("Notify", true));
+    al.add(new Attribute("NotifyHigh", true));
+    al.add(new Attribute("NotifyLow", true));
+    al.add(new Attribute("HighTheshold", highThreshold));
+    al.add(new Attribute("LowThreshold", lowThreshold));
+
+    mbeanServer.setAttributes(offHeapMonitorName, al);
+    mbeanServer.addNotificationListener(offHeapMonitorName, notificationListener, null, null);
+    mbeanServer.invoke(offHeapMonitorName, "start", new Object[] {}, new String[] {});
   }
 
   /**
    * Waits to receive MBean notifications.
+   *
    * @param vm a virtual machine.
    * @param wait how long to wait for in millis.
    * @param interval the polling interval to check for notifications.
    * @param throwOnTimeout throws an exception on timeout if true.
    */
-  protected void waitForNotificationListenerOnVm(VM vm, final long wait,final long interval,final boolean throwOnTimeout) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        Wait.waitForCriterion(new WaitCriterion() {          
-          @Override
-          public boolean done() {
-            return (notificationListener.getNotificationSize() > 0);
-          }
-          
-          @Override
-          public String description() {
-            return "Awaiting Notification Listener";
-          }
-        }, wait, interval, throwOnTimeout);
-      }      
-    });
+  private void waitForNotificationListenerOnVm(final VM vm,
+                                               final long wait,
+                                               final long interval,
+                                               final boolean throwOnTimeout) {
+    vm.invoke(() -> await("Awaiting Notification Listener").atMost(wait, TimeUnit.MILLISECONDS).until(() -> assertTrue(notificationListener.getNotificationSize() > 0)));
   }
-  
+
   /**
    * Clears received notifications.
+   *
    * @param vm a virtual machine.
    */
-  protected void clearNotificationListenerOnVm(VM vm) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        notificationListener.clear();
-      }      
-    });
-  }
-}
-/**
- * Collects MBean Notifications.
- */
-class OffHeapNotificationListener implements NotificationListener {
-  List<Notification> notificationList = Collections.synchronizedList(new ArrayList<Notification>());
-  
-  @Override
-  public void handleNotification(Notification notification, Object handback) {
-    this.notificationList.add(notification);
-  }    
-  
-  public void clear() {
-    this.notificationList.clear();
+  private void clearNotificationListenerOnVm(final VM vm) {
+    vm.invoke(() -> notificationListener.clear());
   }
-  
-  public int getNotificationSize() {
-    return this.notificationList.size();
+
+  /**
+   * Collects MBean Notifications.
+   */
+  private static class OffHeapNotificationListener implements NotificationListener {
+
+    List<Notification> notificationList = Collections.synchronizedList(new ArrayList<Notification>());
+
+    @Override
+    public void handleNotification(final Notification notification, final Object handback) {
+      this.notificationList.add(notification);
+    }
+
+    public void clear() {
+      this.notificationList.clear();
+    }
+
+    public int getNotificationSize() {
+      return this.notificationList.size();
+    }
   }
 }



[09/50] [abbrv] incubator-geode git commit: GEODE-1952: Change Project G to Apache G; other edits

Posted by kl...@apache.org.
GEODE-1952: Change Project G to Apache G; other edits


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/131e99ee
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/131e99ee
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/131e99ee

Branch: refs/heads/feature/GEODE-1930
Commit: 131e99eeb7e2971f0aa51ffdf23624138cca5652
Parents: d8afffb
Author: Joey McAllister <jm...@pivotal.io>
Authored: Wed Oct 12 17:56:09 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Fri Oct 14 14:51:03 2016 -0700

----------------------------------------------------------------------
 geode-docs/CONTRIBUTE.md | 48 +++++++++----------------------------------
 geode-docs/README.md     |  2 +-
 2 files changed, 11 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/131e99ee/geode-docs/CONTRIBUTE.md
----------------------------------------------------------------------
diff --git a/geode-docs/CONTRIBUTE.md b/geode-docs/CONTRIBUTE.md
index 7345cb3..979474e 100644
--- a/geode-docs/CONTRIBUTE.md
+++ b/geode-docs/CONTRIBUTE.md
@@ -1,54 +1,26 @@
-# Contributing to the Project Geode Documentation
+# Contributing to the Apache Geode Documentation
 
-Project Geode welcomes your contributions to the community's documentation efforts. You can participate by writing new content, reviewing and editing existing content, or fixing bugs. This document covers the following topics:
+Apache Geode welcomes your contributions to the community's documentation efforts. You can participate by writing new content, reviewing and editing existing content, or fixing bugs. This document covers the following topics:
 
-- [How to Contribute](#contribute)
-- [Document Source Files and Tools](#source_tools)
-- [Writing Guidelines](#guidelines)
+- [Working with Markdown Files](#working-with-markdown-files)
+- [Working with Images and Graphics](#working-with-images-and-graphics)
+- [Writing Guidelines](#writing-guidelines)
 
-[]()
-
-## How to Contribute
-
-We use the fork-and-pull collaboration method on GitHub:
-
-1. In your GitHub account, fork the `apache/incubator-geode` repository.
-2. Create a local clone of your fork.
-3. Make changes in the `geode-docs` directory and commit them in your fork.
-4. In the `apache/incubator-geode` repository, create a pull request.
-
-See [Using Pull Requests](https://help.github.com/articles/using-pull-requests/) on GitHub for more about the fork-and-pull collaboration method.
-
-[]()
-
-## Document Source Files and Tools
-
-Project Geode documentation source files are written in markdown. Image files include .gif and .png graphics and editable image files in the open-source SVG format.
-
-- [Working with Markdown Files](#markdown)
-- [Working with Images and Graphics](#images)
-
-[]()
-
-### Working with Markdown Files
+## Working with Markdown Files
 
 You can edit markdown files in any text editor. For more, read [Daring Fireball's Markdown Syntax page](https://daringfireball.net/projects/markdown/syntax).
 
-[]()
-
-### Working with Images and Graphics
+## Working with Images and Graphics
 
-Image files in .gif or .png format are in the `docs/images` directory in the Project Geode docs repo. Images in .svg format are in the `docs/images_svg` directory.
+Image files in .gif or .png format are in the `docs/images` directory in the Apache Geode docs repo. Images in .svg format are in the `docs/images_svg` directory.
 
-Most of the Project Geode image files have been converted to the open source SVG format. You can insert SVG images directly into an XML topic and modify images using a SVG editor.
+Most of the Apache Geode image files have been converted to the open source SVG format. You can insert SVG images directly into an XML topic and modify images using a SVG editor.
 
 The Wikipedia page [Comparison of Vector Graphics Editors](http://en.wikipedia.org/wiki/Comparison_of_vector_graphics_editors) provides a list and comparison of commercial and free vector graphics editors. Note, however, that not all of these programs support the SVG format.
 
-[]()
-
 ## Writing Guidelines
 
-The most important advice we can provide for working with the Project Geode docs is to spend some time becoming familiar with the existing source files and the structure of the project directory. In particular, note the following conventions and tips:
+The most important advice we can provide for working with the Apache Geode docs is to spend some time becoming familiar with the existing source files and the structure of the project directory. In particular, note the following conventions and tips:
 
 - Top-level subdirectories organize topics into "books": basic_config, configuring, developing, etc.
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/131e99ee/geode-docs/README.md
----------------------------------------------------------------------
diff --git a/geode-docs/README.md b/geode-docs/README.md
index 9f83260..9fb5870 100644
--- a/geode-docs/README.md
+++ b/geode-docs/README.md
@@ -1,4 +1,4 @@
-# Project Geode End-User Documentation
+# Apache Geode End-User Documentation
 
 Apache Geode provides the full source for end-user documentation in markdown format. The latest check-ins to `incubator-geode/geode-docs` are regularly built and published to http://geode.incubator.apache.org/docs/. Users can build the markdown into an HTML user guide using [Bookbinder](https://github.com/pivotal-cf/bookbinder) and the instructions below.
 


[49/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
index 74e0850..4ccb716 100644
--- a/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
@@ -16,13 +16,25 @@
  */
 package org.apache.geode.management;
 
+import static java.util.concurrent.TimeUnit.*;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.apache.geode.test.dunit.Assert.*;
+import static org.apache.geode.test.dunit.Host.*;
+import static org.apache.geode.test.dunit.IgnoredException.*;
+import static org.apache.geode.test.dunit.Invoke.*;
+import static org.apache.geode.test.dunit.NetworkUtils.*;
+import static org.assertj.core.api.Assertions.*;
 
+import java.io.Serializable;
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.Properties;
 
+import javax.management.ObjectName;
+
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -31,22 +43,17 @@ import org.apache.geode.cache.EntryEvent;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionFactory;
 import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientCache;
 import org.apache.geode.cache.client.ClientCacheFactory;
 import org.apache.geode.cache.client.ClientRegionFactory;
 import org.apache.geode.cache.client.ClientRegionShortcut;
 import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier;
 import org.apache.geode.internal.cache.tier.sockets.CacheClientProxy;
-import org.apache.geode.internal.i18n.LocalizedStrings;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.FlakyTest;
 
@@ -54,391 +61,343 @@ import org.apache.geode.test.junit.categories.FlakyTest;
  * Client health stats check
  */
 @Category(DistributedTest.class)
-@SuppressWarnings("serial")
-public class ClientHealthStatsDUnitTest extends JUnit4DistributedTestCase {
-
-  private static final String k1 = "k1";
-  private static final String k2 = "k2";
-  private static final String client_k1 = "client-k1";
-  private static final String client_k2 = "client-k2";
-
-  /** name of the test region */
-  private static final String REGION_NAME = "ClientHealthStatsDUnitTest_Region";
-
-  private static VM client = null;
-  private static VM client2 = null;
-  private static VM managingNode = null;
-
-  private static ManagementTestBase helper = new ManagementTestBase(){};
-  
-  private static int numOfCreates = 0;
-  private static int numOfUpdates = 0;
-  private static int numOfInvalidates = 0;
-  private static boolean lastKeyReceived = false;
-  
-  private static GemFireCacheImpl cache = null;
-
-  private VM server = null;
-
-  @Override
-  public final void postSetUp() throws Exception {
-    disconnectAllFromDS();
-
-    final Host host = Host.getHost(0);
-    managingNode = host.getVM(0);
-    server = host.getVM(1);
-    client = host.getVM(2);
-    client2 = host.getVM(3);
-
-    IgnoredException.addIgnoredException("Connection reset");
-  }
+@SuppressWarnings({ "serial", "unused" })
+public class ClientHealthStatsDUnitTest implements Serializable {
+
+  private static final int NUMBER_PUTS = 100;
+
+  private static final String KEY1 = "KEY1";
+  private static final String KEY2 = "KEY2";
+  private static final String VALUE1 = "VALUE1";
+  private static final String VALUE2 = "VALUE2";
+
+  private static final String REGION_NAME = ClientHealthStatsDUnitTest.class.getSimpleName() + "_Region";
+
+  // client1VM and client2VM VM fields
+  private static ClientCache clientCache;
+
+  // TODO: assert following values in each client VM
+  private static int numOfCreates;
+  private static int numOfUpdates;
+  private static int numOfInvalidates;
+  private static boolean lastKeyReceived;
+
+  private VM managerVM;
+  private VM serverVM;
+  private VM client1VM;
+  private VM client2VM;
+
+  private String hostName;
+
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().build();
 
-  @Override
-  public final void preTearDown() throws Exception {
-    reset();
-    helper.closeCache(managingNode);
-    helper.closeCache(client);
-    helper.closeCache(client2);
-    helper.closeCache(server);
+  @Before
+  public void before() throws Exception {
+    this.hostName = getServerHostName(getHost(0));
 
-    disconnectAllFromDS();
+    this.managerVM = getHost(0).getVM(0);
+    this.serverVM = getHost(0).getVM(1);
+    this.client1VM = getHost(0).getVM(2);
+    this.client2VM = getHost(0).getVM(3);
+
+    addIgnoredException("Connection reset");
   }
 
-  private static void reset() throws Exception {
-    lastKeyReceived = false;
-    numOfCreates = 0;
-    numOfUpdates = 0;
-    numOfInvalidates = 0;
+  @After
+  public void after() throws Exception {
+    invokeInEveryVM(() -> {
+      lastKeyReceived = false;
+      numOfCreates = 0;
+      numOfUpdates = 0;
+      numOfInvalidates = 0;
+      clientCache = null;
+    });
   }
 
   @Test
   public void testClientHealthStats_SubscriptionEnabled() throws Exception {
-    helper.createManagementCache(managingNode);
-    helper.startManagingNode(managingNode);
+    this.managementTestRule.createManager(this.managerVM, false);
+    this.managementTestRule.startManager(this.managerVM);
+
+    int port = this.serverVM.invoke(() -> createServerCache());
 
-    int port = (Integer) server.invoke(() -> ClientHealthStatsDUnitTest.createServerCache());
+    this.client1VM.invoke(() -> createClientCache(this.hostName, port, 1, true));
+    this.client2VM.invoke(() -> createClientCache(this.hostName, port, 2, true));
 
-    DistributedMember serverMember = helper.getMember(server);
+    this.client1VM.invoke(() -> put());
+    this.client2VM.invoke(() -> put());
 
-    client.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 1, true, false));
-    
-    client2.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 2, true, false));
+    DistributedMember serverMember = this.managementTestRule.getDistributedMember(this.serverVM);
+    this.managerVM.invoke(() -> verifyClientStats(serverMember, port, 2));
 
-    client.invoke(() -> ClientHealthStatsDUnitTest.put());
-    client2.invoke(() -> ClientHealthStatsDUnitTest.put());
-    
-    managingNode.invoke(() -> ClientHealthStatsDUnitTest.verifyClientStats(serverMember, port, 2));
-    helper.stopManagingNode(managingNode);
+    this.managementTestRule.stopManager(this.managerVM);
   }
-  
+
   @Test
   public void testClientHealthStats_SubscriptionDisabled() throws Exception {
-    helper.createManagementCache(managingNode);
-    helper.startManagingNode(managingNode);
+    this.managementTestRule.createManager(this.managerVM, false);
+    this.managementTestRule.startManager(this.managerVM);
 
-    int port = (Integer) server.invoke(() -> ClientHealthStatsDUnitTest.createServerCache());
+    int port = this.serverVM.invoke(() -> createServerCache());
 
-    DistributedMember serverMember = helper.getMember(server);
+    this.client1VM.invoke(() -> createClientCache(this.hostName, port, 1, false));
+    this.client2VM.invoke(() -> createClientCache(this.hostName, port, 2, false));
 
-    client.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 1, false, false));
-    
-    client2.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 2, false, false));
+    this.client1VM.invoke(() -> put());
+    this.client2VM.invoke(() -> put());
 
-    client.invoke(() -> ClientHealthStatsDUnitTest.put());
-    client2.invoke(() -> ClientHealthStatsDUnitTest.put());
-    
-    managingNode.invoke(() -> ClientHealthStatsDUnitTest.verifyClientStats(serverMember, port, 0));
-    helper.stopManagingNode(managingNode);
+    DistributedMember serverMember = this.managementTestRule.getDistributedMember(this.serverVM);
+    this.managerVM.invoke(() -> verifyClientStats(serverMember, port, 0));
+    this.managementTestRule.stopManager(this.managerVM);
   }
-  
+
   @Test
   public void testClientHealthStats_DurableClient() throws Exception {
-    helper.createManagementCache(managingNode);
-    helper.startManagingNode(managingNode);
-
-    int port = (Integer) server.invoke(() -> ClientHealthStatsDUnitTest.createServerCache());
-
-    DistributedMember serverMember = helper.getMember(server);
-
-    client.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 1, true, true));
-    
-    client2.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 2, true, true));
-
-    client.invoke(() -> ClientHealthStatsDUnitTest.put());
-    client2.invoke(() -> ClientHealthStatsDUnitTest.put());
-    
-    client.invoke(() -> ClientHealthStatsDUnitTest.closeClientCache());
-    
-    client2.invoke(() -> ClientHealthStatsDUnitTest.closeClientCache());
-    
-    managingNode.invoke(() -> ClientHealthStatsDUnitTest.verifyClientStats(serverMember, port, 2));
-    helper.stopManagingNode(managingNode);
+    this.managementTestRule.createManager(this.managerVM, false);
+    this.managementTestRule.startManager(this.managerVM);
+
+    int port = this.serverVM.invoke(() -> createServerCache());
+
+    this.client1VM.invoke(() -> createClientCache(this.hostName, port, 1, true));
+    this.client2VM.invoke(() -> createClientCache(this.hostName, port, 2, true));
+
+    this.client1VM.invoke(() -> put());
+    this.client2VM.invoke(() -> put());
+
+    this.client1VM.invoke(() -> clientCache.close(true));
+    this.client2VM.invoke(() -> clientCache.close(true));
+
+    DistributedMember serverMember = this.managementTestRule.getDistributedMember(this.serverVM);
+    this.managerVM.invoke(() -> verifyClientStats(serverMember, port, 2));
+    this.managementTestRule.stopManager(this.managerVM);
   }
-  
-  @Category(FlakyTest.class) // GEODE-337
+
   @Test
   public void testStatsMatchWithSize() throws Exception {
-    // start a server
-    int port = (Integer) server.invoke(() -> ClientHealthStatsDUnitTest.createServerCache());
-    // create durable client, with durable RI
-    client.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 1, true, false));
-    // do puts on server from three different threads, pause after 500 puts each.
-    server.invoke(() -> ClientHealthStatsDUnitTest.doPuts());
-    // close durable client
-    client.invoke(() -> ClientHealthStatsDUnitTest.closeClientCache());
-    
-    server.invoke("verifyProxyHasBeenPaused", () -> verifyProxyHasBeenPaused() );
-    // resume puts on server, add another 100.
-    server.invokeAsync(() -> ClientHealthStatsDUnitTest.resumePuts());
-    // start durable client
-    client.invoke(() -> ClientHealthStatsDUnitTest.createClientCache(server.getHost(), port, 1, true, false));
-    // wait for full queue dispatch
-    client.invoke(() -> ClientHealthStatsDUnitTest.waitForLastKey());
-    // verify the stats
-    server.invoke(() -> ClientHealthStatsDUnitTest.verifyStats(port));
-  }
-  
-  private static void verifyProxyHasBeenPaused() {	  
-	  
-	  WaitCriterion criterion = new WaitCriterion() {
-      
-      @Override
-      public boolean done() {
-        CacheClientNotifier ccn = CacheClientNotifier.getInstance();
-        Collection<CacheClientProxy> ccProxies = ccn.getClientProxies();
-        
-        Iterator<CacheClientProxy> itr =  ccProxies.iterator();
-        
-        while(itr.hasNext()) {
-          CacheClientProxy ccp = itr.next(); 
-          System.out.println("proxy status " + ccp.getState());
-          if(ccp.isPaused())
-            return true;
-        }
-        return false;
-      }
-      
-      @Override
-      public String description() {
-        return "Proxy has not paused yet";
-      }
-    };
-    
-    Wait.waitForCriterion(criterion, 15 * 1000, 200, true);	  
+    int port = this.serverVM.invoke(() -> createServerCache()); // start a serverVM
+
+    this.client1VM.invoke(() -> createClientCache(this.hostName, port, 1, true)); // create durable client1VM, with durable RI
+
+    this.serverVM.invoke(() -> doPuts()); // do puts on serverVM from three different threads, pause after 500 puts each.
+
+    this.client1VM.invoke(() -> clientCache.close(true)); // close durable client1VM
+
+    this.serverVM.invoke(() -> await().atMost(2, MINUTES).until(() -> cacheClientProxyHasBeenPause()));
+
+    this.serverVM.invoke(() -> resumePuts()); // resume puts on serverVM, add another 100.
+
+    this.client1VM.invoke(() -> createClientCache(this.hostName, port, 1, true)); // start durable client1VM
+
+    this.client1VM.invoke(() -> await().atMost(1, MINUTES).until(() -> lastKeyReceived)); // wait for full queue dispatch
+
+    this.serverVM.invoke(() -> verifyStats(port)); // verify the stats
   }
 
-  private static int createServerCache() throws Exception {
-    Cache cache = helper.createCache(false);
+  /**
+   * Invoked in serverVM
+   */
+  private boolean cacheClientProxyHasBeenPause() {
+    CacheClientNotifier clientNotifier = CacheClientNotifier.getInstance(); // TODO
+    //CacheClientNotifier clientNotifier = ((CacheServerImpl)this.managementTestRule.getCache().getCacheServers().get(0)).getAcceptor().getCacheClientNotifier();
 
-    RegionFactory<String, String> rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
-    rf.setConcurrencyChecksEnabled(false);
-    rf.create(REGION_NAME);
+    Collection<CacheClientProxy> clientProxies = clientNotifier.getClientProxies();
 
-    CacheServer server1 = cache.addCacheServer();
-    server1.setPort(0);
-    server1.start();
-    return server1.getPort();
+    for (CacheClientProxy clientProxy: clientProxies) {
+      if (clientProxy.isPaused()) {
+        return true;
+      }
+    }
+    return false;
   }
 
-  private static void closeClientCache() throws Exception {
-    cache.close(true);
+  /**
+   * Invoked in serverVM
+   */
+  private int createServerCache() throws Exception {
+    Cache cache = this.managementTestRule.getCache();
+
+    RegionFactory<String, String> regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+    regionFactory.setConcurrencyChecksEnabled(false);
+    regionFactory.create(REGION_NAME);
+
+    CacheServer cacheServer = cache.addCacheServer();
+    cacheServer.setPort(0);
+    cacheServer.start();
+    return cacheServer.getPort();
   }
 
-  private static void createClientCache(Host host, Integer port, int clientNum, boolean subscriptionEnabled, boolean durable) throws Exception {
+  /**
+   * Invoked in client1VM and client2VM
+   */
+  private void createClientCache(final String hostName,
+                                 final Integer port,
+                                 final int clientNum,
+                                 final boolean subscriptionEnabled) throws Exception {
     Properties props = new Properties();
-    props.setProperty(DURABLE_CLIENT_ID, "durable-"+clientNum);
-    props.setProperty(DURABLE_CLIENT_TIMEOUT, "300000");
-    props.setProperty(LOG_LEVEL, "info");
-    props.setProperty(STATISTIC_ARCHIVE_FILE, getTestMethodName() + "_client_" + clientNum + ".gfs");
     props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
 
-    ClientCacheFactory ccf = new ClientCacheFactory(props);
-    if(subscriptionEnabled){
-      ccf.setPoolSubscriptionEnabled(true);
-      ccf.setPoolSubscriptionAckInterval(50);
-      ccf.setPoolSubscriptionRedundancy(0);
-    }
-    
-    if(durable){
-      ccf.set(DURABLE_CLIENT_ID, "DurableClientId_" + clientNum);
-      ccf.set(DURABLE_CLIENT_TIMEOUT, "" + 300);
+    ClientCacheFactory cacheFactory = new ClientCacheFactory(props);
+    if (subscriptionEnabled) {
+      cacheFactory.setPoolSubscriptionEnabled(true);
+      cacheFactory.setPoolSubscriptionAckInterval(50);
+      cacheFactory.setPoolSubscriptionRedundancy(0);
     }
 
-    ccf.addPoolServer(host.getHostName(), port);
-    cache = (GemFireCacheImpl) ccf.create();
+    cacheFactory.set(DURABLE_CLIENT_ID, "DurableClientId_" + clientNum);
+    cacheFactory.set(DURABLE_CLIENT_TIMEOUT, "" + 30000);
+
+    cacheFactory.addPoolServer(hostName, port);
+    clientCache = cacheFactory.create();
 
-    ClientRegionFactory<String, String> crf = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
-    crf.setConcurrencyChecksEnabled(false);
+    ClientRegionFactory<String, String> regionFactory = clientCache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
+    regionFactory.setConcurrencyChecksEnabled(false);
 
-    crf.addCacheListener(new CacheListenerAdapter<String, String>() {
-      public void afterInvalidate(EntryEvent<String, String> event) {
-        cache.getLoggerI18n().fine(
-            "Invalidate Event: " + event.getKey() + ", " + event.getNewValue());
+    regionFactory.addCacheListener(new CacheListenerAdapter<String, String>() {
+      @Override
+      public void afterInvalidate(final EntryEvent<String, String> event) {
         numOfInvalidates++;
       }
-      public void afterCreate(EntryEvent<String, String> event) {
-        if (((String) event.getKey()).equals("last_key")) {
+
+      @Override
+      public void afterCreate(final EntryEvent<String, String> event) {
+        if ("last_key".equals(event.getKey())) {
           lastKeyReceived = true;
         }
-        cache.getLoggerI18n().fine(
-            "Create Event: " + event.getKey() + ", " + event.getNewValue());
         numOfCreates++;
       }
-      public void afterUpdate(EntryEvent<String, String> event) {
-        cache.getLoggerI18n().fine(
-            "Update Event: " + event.getKey() + ", " + event.getNewValue());
+
+      @Override
+      public void afterUpdate(final EntryEvent<String, String> event) {
         numOfUpdates++;
       }
     });
 
-    Region<String, String> r = crf.create(REGION_NAME);
-    if(subscriptionEnabled){
-      r.registerInterest("ALL_KEYS", true);
-      cache.readyForEvents();
+    Region<String, String> region = regionFactory.create(REGION_NAME);
+    if (subscriptionEnabled) {
+      region.registerInterest("ALL_KEYS", true);
+      clientCache.readyForEvents();
     }
   }
 
-  private static void doPuts() throws Exception {
-    Cache cache = GemFireCacheImpl.getInstance();
-    final Region<String, String> r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
-    Thread t1 = new Thread(new Runnable() {
-      public void run() {
-        for (int i = 0; i < 500; i++) {
-          r.put("T1_KEY_"+i, "VALUE_"+i);
-        }
+  /**
+   * Invoked in serverVM
+   */
+  private void doPuts() throws Exception {
+    Cache cache = this.managementTestRule.getCache();
+    Region<String, String> region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
+
+    Thread thread1 = new Thread(() -> {
+      for (int i = 0; i < NUMBER_PUTS; i++) {
+        region.put("T1_KEY_" + i, "VALUE_" + i);
       }
     });
-    Thread t2 = new Thread(new Runnable() {
-      public void run() {
-        for (int i = 0; i < 500; i++) {
-          r.put("T2_KEY_"+i, "VALUE_"+i);
-        }
+    Thread thread2 = new Thread(() -> {
+      for (int i = 0; i < NUMBER_PUTS; i++) {
+        region.put("T2_KEY_" + i, "VALUE_" + i);
       }
     });
-    Thread t3 = new Thread(new Runnable() {
-      public void run() {
-        for (int i = 0; i < 500; i++) {
-          r.put("T3_KEY_"+i, "VALUE_"+i);
-        }
+    Thread thread3 = new Thread(() -> {
+      for (int i = 0; i < NUMBER_PUTS; i++) {
+        region.put("T3_KEY_" + i, "VALUE_" + i);
       }
     });
 
-    t1.start();
-    t2.start();
-    t3.start();
+    thread1.start();
+    thread2.start();
+    thread3.start();
 
-    t1.join();
-    t2.join();
-    t3.join();
+    thread1.join();
+    thread2.join();
+    thread3.join();
   }
 
-  private static void resumePuts() {
-    Cache cache = GemFireCacheImpl.getInstance();
-    Region<String, String> r = cache.getRegion(Region.SEPARATOR + REGION_NAME);
-    for (int i = 0; i < 100; i++) {
-      r.put("NEWKEY_"+i, "NEWVALUE_"+i);
+  /**
+   * Invoked in serverVM
+   */
+  private void resumePuts() {
+    Cache cache = this.managementTestRule.getCache();
+    Region<String, String> region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
+
+    for (int i = 0; i < NUMBER_PUTS; i++) {
+      region.put("NEWKEY_" + i, "NEWVALUE_" + i);
     }
-    r.put("last_key", "last_value");
+    region.put("last_key", "last_value");
   }
 
-  private static void waitForLastKey() {
-    WaitCriterion wc = new WaitCriterion() {
-      @Override
-      public boolean done() {
-        return lastKeyReceived;
-      }
-      @Override
-      public String description() {
-        return "Did not receive last key.";
-      }
-    };
-    Wait.waitForCriterion(wc, 60*1000, 500, true);
-  }
+  /**
+   * Invoked in managerVM
+   */
+  private void verifyClientStats(final DistributedMember serverMember, final int serverPort, final int numSubscriptions) throws Exception {
+    ManagementService service = this.managementTestRule.getManagementService();
+    CacheServerMXBean cacheServerMXBean = awaitCacheServerMXBean(serverMember, serverPort);
 
-  private static DistributedMember getMember() throws Exception {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    return cache.getDistributedSystem().getDistributedMember();
-  }
+    String[] clientIds = cacheServerMXBean.getClientIds();
+    assertThat(clientIds).hasSize(2);
 
-  private static void verifyClientStats(DistributedMember serverMember, int serverPort, int numSubscriptions) {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    try {
-      ManagementService service = ManagementService.getExistingManagementService(cache);
-      CacheServerMXBean bean = MBeanUtil.getCacheServerMbeanProxy(serverMember, serverPort);
-
-      String[] clientIds = bean.getClientIds();
-      assertTrue(clientIds.length == 2);
-      System.out.println("<ExpectedString> ClientId-1 of the Server is  " + clientIds[0] + "</ExpectedString> ");
-      System.out.println("<ExpectedString> ClientId-2 of the Server is  " + clientIds[1] + "</ExpectedString> ");
-      
-      ClientHealthStatus[] clientStatuses = bean.showAllClientStats();
-
-      ClientHealthStatus clientStatus1 = bean.showClientStats(clientIds[0]);
-      ClientHealthStatus clientStatus2 = bean.showClientStats(clientIds[1]);
-      assertNotNull(clientStatus1);
-      assertNotNull(clientStatus2);
-      System.out.println("<ExpectedString> ClientStats-1 of the Server is  " + clientStatus1 + "</ExpectedString> ");
-      System.out.println("<ExpectedString> ClientStats-2 of the Server is  " + clientStatus2 + "</ExpectedString> ");
-
-      System.out.println("<ExpectedString> clientStatuses " + clientStatuses + "</ExpectedString> ");
-      assertNotNull(clientStatuses);
-      
-      assertTrue(clientStatuses.length == 2);
-      for (ClientHealthStatus status : clientStatuses) {
-        System.out.println("<ExpectedString> ClientStats of the Server is  " + status + "</ExpectedString> ");
-      }
+    ClientHealthStatus[] clientStatuses = cacheServerMXBean.showAllClientStats();
 
-      DistributedSystemMXBean dsBean = service.getDistributedSystemMXBean();
-      assertEquals(2, dsBean.getNumClients());
-      assertEquals(numSubscriptions, dsBean.getNumSubscriptions());
+    ClientHealthStatus clientStatus1 = cacheServerMXBean.showClientStats(clientIds[0]);
+    ClientHealthStatus clientStatus2 = cacheServerMXBean.showClientStats(clientIds[1]);
+    assertThat(clientStatus1).isNotNull();
+    assertThat(clientStatus2).isNotNull();
 
-    } catch (Exception e) {
-      fail("Error while verifying cache server from remote member", e);
-    }
+    assertThat(clientStatuses).isNotNull().hasSize(2);
+
+    DistributedSystemMXBean dsBean = service.getDistributedSystemMXBean();
+    assertThat(dsBean.getNumClients()).isEqualTo(2);
+    assertThat(dsBean.getNumSubscriptions()).isEqualTo(numSubscriptions);
   }
 
-  private static void put() {
-    Cache cache = GemFireCacheImpl.getInstance();
-    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
-    assertNotNull(r1);
-
-    r1.put(k1, client_k1);
-    assertEquals(r1.getEntry(k1).getValue(), client_k1);
-    r1.put(k2, client_k2);
-    assertEquals(r1.getEntry(k2).getValue(), client_k2);
-    try {
-      Thread.sleep(10000);
-    } catch (Exception e) {
-      // sleep
-    }
-    r1.clear();
-
-    r1.put(k1, client_k1);
-    assertEquals(r1.getEntry(k1).getValue(), client_k1);
-    r1.put(k2, client_k2);
-    assertEquals(r1.getEntry(k2).getValue(), client_k2);
-    r1.clear();
-    try {
-      Thread.sleep(10000);
-    } catch (Exception e) {
-      // sleep
-    }
+  /**
+   * Invoked in client1VM and client2VM
+   */
+  private void put() throws Exception {
+    Cache cache = (Cache)clientCache;
+    Region<String, String> region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
+
+    region.put(KEY1, VALUE1);
+    assertThat(region.getEntry(KEY1).getValue()).isEqualTo(VALUE1);
+
+    region.put(KEY2, VALUE2);
+    assertThat(region.getEntry(KEY2).getValue()).isEqualTo(VALUE2);
+
+    region.clear();
+
+    region.put(KEY1, VALUE1);
+    assertThat(region.getEntry(KEY1).getValue()).isEqualTo(VALUE1);
+
+    region.put(KEY2, VALUE2);
+    assertThat(region.getEntry(KEY2).getValue()).isEqualTo(VALUE2);
+
+    region.clear();
   }
 
-  private static void verifyStats(int serverPort) throws Exception {
-    Cache cache = GemFireCacheImpl.getInstance();
-    ManagementService service = ManagementService.getExistingManagementService(cache);
+  /**
+   * Invoked in serverVM
+   */
+  private void verifyStats(final int serverPort) throws Exception {
+    ManagementService service = this.managementTestRule.getManagementService();
     CacheServerMXBean serverBean = service.getLocalCacheServerMXBean(serverPort);
-    CacheClientNotifier ccn = CacheClientNotifier.getInstance();
-    CacheClientProxy ccp = ccn.getClientProxies().iterator().next();
-    cache.getLoggerI18n().info(LocalizedStrings.DEBUG, "getQueueSize() " + ccp.getQueueSize());
-    cache.getLoggerI18n().info(LocalizedStrings.DEBUG, "getQueueSizeStat() " + ccp.getQueueSizeStat());
-    cache.getLoggerI18n().info(LocalizedStrings.DEBUG, "getEventsEnqued() " + ccp.getHARegionQueue().getStatistics().getEventsEnqued());
-    cache.getLoggerI18n().info(LocalizedStrings.DEBUG, "getEventsDispatched() " + ccp.getHARegionQueue().getStatistics().getEventsDispatched());
-    cache.getLoggerI18n().info(LocalizedStrings.DEBUG, "getEventsRemoved() " + ccp.getHARegionQueue().getStatistics().getEventsRemoved());
-    cache.getLoggerI18n().info(LocalizedStrings.DEBUG, "getNumVoidRemovals() " + ccp.getHARegionQueue().getStatistics().getNumVoidRemovals());
-    assertEquals(ccp.getQueueSize(), ccp.getQueueSizeStat());
+
+    CacheClientNotifier clientNotifier = CacheClientNotifier.getInstance();
+    CacheClientProxy clientProxy = clientNotifier.getClientProxies().iterator().next();
+    assertThat(clientProxy.getQueueSizeStat()).isEqualTo(clientProxy.getQueueSize());
+
     ClientQueueDetail queueDetails = serverBean.showClientQueueDetails()[0];
-    assertEquals(queueDetails.getQueueSize(), ccp.getQueueSizeStat());
+    assertThat(clientProxy.getQueueSizeStat()).isEqualTo((int)queueDetails.getQueueSize());
+  }
+
+  private CacheServerMXBean awaitCacheServerMXBean(final DistributedMember serverMember, final int port) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    ObjectName objectName = service.getCacheServerMBeanName(port, serverMember);
+
+    await().until(() -> assertThat(service.getMBeanProxy(objectName, CacheServerMXBean.class)).isNotNull());
+
+    return service.getMBeanProxy(objectName, CacheServerMXBean.class);
+  }
+
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/CompositeTypeTestDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/CompositeTypeTestDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/CompositeTypeTestDUnitTest.java
index 914aef0..683fefd 100644
--- a/geode-core/src/test/java/org/apache/geode/management/CompositeTypeTestDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/CompositeTypeTestDUnitTest.java
@@ -16,162 +16,86 @@
  */
 package org.apache.geode.management;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
+import static java.util.concurrent.TimeUnit.*;
+import static org.assertj.core.api.Assertions.*;
 
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.FlakyTest;
+import java.io.Serializable;
 
-import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 
-import org.apache.geode.internal.cache.GemFireCacheImpl;
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
 import org.apache.geode.management.internal.MBeanJMXAdapter;
-import org.apache.geode.management.internal.ManagementConstants;
 import org.apache.geode.management.internal.SystemManagementService;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.junit.categories.DistributedTest;
 
 @Category(DistributedTest.class)
-public class CompositeTypeTestDUnitTest extends ManagementTestBase {
+@SuppressWarnings({ "serial", "unused" })
+public class CompositeTypeTestDUnitTest implements Serializable {
 
-  public CompositeTypeTestDUnitTest() {
-    super();
-    // TODO Auto-generated constructor stub
-  }
+  @Manager
+  private VM managerVM;
 
-  /**
-   * 
-   */
-  private static final long serialVersionUID = 1L;
-  
-  private static ObjectName objectName;
+  @Member
+  private VM memberVM;
+
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().start(true).build();
 
-  @Category(FlakyTest.class) // GEODE-1492
   @Test
   public void testCompositeTypeGetters() throws Exception{
-    
-    initManagement(false);
-    String member = getMemberId(managedNode1);
-    member = MBeanJMXAdapter.makeCompliantName(member);
-    
-    registerMBeanWithCompositeTypeGetters(managedNode1,member);
-
-    
-    checkMBeanWithCompositeTypeGetters(managingNode,member);
-    
+    registerMBeanWithCompositeTypeGetters(this.memberVM);
+
+    String memberName = MBeanJMXAdapter.makeCompliantName(getMemberId(this.memberVM));
+    verifyMBeanWithCompositeTypeGetters(this.managerVM, memberName);
   }
 
-  
-  /**
-   * Creates a Local region
-   *
-   * @param vm
-   *          reference to VM
-   */
-  protected void registerMBeanWithCompositeTypeGetters(VM vm,final String memberID)
-      throws Exception {
-    SerializableRunnable regMBean = new SerializableRunnable(
-        "Register CustomMBean with composite Type") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService) getManagementService();
- 
-        try {
-          ObjectName objectName = new ObjectName("GemFire:service=custom,type=composite");
-          CompositeTestMXBean mbean =  new CompositeTestMBean();
-          objectName = service.registerMBean(mbean, objectName);
-          service.federate(objectName, CompositeTestMXBean.class, false);
-        } catch (MalformedObjectNameException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        } catch (NullPointerException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        }
-
-        
-
-      }
-    };
-    vm.invoke(regMBean);
+  private void registerMBeanWithCompositeTypeGetters(final VM memberVM) throws Exception {
+    memberVM.invoke("registerMBeanWithCompositeTypeGetters", () -> {
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
+
+      ObjectName objectName = new ObjectName("GemFire:service=custom,type=composite");
+      CompositeTestMXBean compositeTestMXBean = new CompositeTestMBean();
+
+      objectName = service.registerMBean(compositeTestMXBean, objectName);
+      service.federate(objectName, CompositeTestMXBean.class, false);
+    });
   }
-  
-  
-  /**
-   * Creates a Local region
-   *
-   * @param vm
-   *          reference to VM
-   */
-  protected void checkMBeanWithCompositeTypeGetters(VM vm,final String memberID)
-      throws Exception {
-    SerializableRunnable checkMBean = new SerializableRunnable(
-        "Check CustomMBean with composite Type") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        final SystemManagementService service = (SystemManagementService) getManagementService();
-
-        try {
-          final ObjectName objectName = new ObjectName("GemFire:service=custom,type=composite,member="+memberID);
-          
-          Wait.waitForCriterion(new WaitCriterion() {
-            public String description() {
-              return "Waiting for Composite Type MBean";
-            }
-
-            public boolean done() {
-              CompositeTestMXBean bean = service.getMBeanInstance(objectName, CompositeTestMXBean.class);
-              boolean done = (bean != null);
-              return done;
-            }
-
-          },  ManagementConstants.REFRESH_TIME*4, 500, true);
-
-          
-          CompositeTestMXBean bean = service.getMBeanInstance(objectName, CompositeTestMXBean.class);
-          
-          CompositeStats listData = bean.listCompositeStats();
-          
-          System.out.println("connectionStatsType = "+listData.getConnectionStatsType());
-          System.out.println("connectionsOpened = "+listData.getConnectionsOpened());
-          System.out.println("connectionsClosed = "+listData.getConnectionsClosed());
-          System.out.println("connectionsAttempted = "+listData.getConnectionsAttempted());
-          System.out.println("connectionsFailed = "+listData.getConnectionsFailed());
-          
-          CompositeStats getsData = bean.getCompositeStats();
-          System.out.println("connectionStatsType = "+getsData.getConnectionStatsType());
-          System.out.println("connectionsOpened = "+getsData.getConnectionsOpened());
-          System.out.println("connectionsClosed = "+getsData.getConnectionsClosed());
-          System.out.println("connectionsAttempted = "+getsData.getConnectionsAttempted());
-          System.out.println("connectionsFailed = "+getsData.getConnectionsFailed());
-          
-          CompositeStats[] arrayData = bean.getCompositeArray();
-          Integer[] intArrayData = bean.getIntegerArray();
-          Thread.sleep(2*60*1000);
-        } catch (MalformedObjectNameException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        } catch (NullPointerException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        } catch (InterruptedException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        }
-        
-        
-
-      }
-    };
-    vm.invoke(checkMBean);
+
+  private void verifyMBeanWithCompositeTypeGetters(final VM managerVM, final String memberId) throws Exception {
+    managerVM.invoke("verifyMBeanWithCompositeTypeGetters", () -> {
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
+      ObjectName objectName = new ObjectName("GemFire:service=custom,type=composite,member=" + memberId);
+
+      await().until(() -> service.getMBeanInstance(objectName, CompositeTestMXBean.class) != null);
+
+      CompositeTestMXBean compositeTestMXBean = service.getMBeanInstance(objectName, CompositeTestMXBean.class);
+      assertThat(compositeTestMXBean).isNotNull();
+
+      CompositeStats listCompositeStatsData = compositeTestMXBean.listCompositeStats();
+      assertThat(listCompositeStatsData).isNotNull();
+
+      CompositeStats getCompositeStatsData = compositeTestMXBean.getCompositeStats();
+      assertThat(getCompositeStatsData).isNotNull();
+
+      CompositeStats[] getCompositeArrayData = compositeTestMXBean.getCompositeArray();
+      assertThat(getCompositeArrayData).isNotNull().isNotEmpty();
+
+      Integer[] getIntegerArrayData = compositeTestMXBean.getIntegerArray();
+      assertThat(getIntegerArrayData).isNotNull().isNotEmpty();
+    });
   }
 
-  
+  private String getMemberId(final VM memberVM) {
+    return memberVM.invoke("getMemberId", () -> this.managementTestRule.getDistributedMember().getId());
+  }
+
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/DLockManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/DLockManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/DLockManagementDUnitTest.java
index 890ee04..c4806fb 100644
--- a/geode-core/src/test/java/org/apache/geode/management/DLockManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/DLockManagementDUnitTest.java
@@ -16,467 +16,272 @@
  */
 package org.apache.geode.management;
 
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.geode.internal.process.ProcessUtils.*;
+import static org.assertj.core.api.Assertions.*;
 
+import java.io.Serializable;
 import java.util.Map;
 import java.util.Set;
 
 import javax.management.ObjectName;
 
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.locks.DLockService;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.management.internal.MBeanJMXAdapter;
 import org.apache.geode.management.internal.SystemManagementService;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
-import org.apache.geode.test.junit.categories.FlakyTest;
+import org.apache.geode.test.junit.categories.DistributedTest;
 
 @Category(DistributedTest.class)
-public class DLockManagementDUnitTest extends ManagementTestBase {
+@SuppressWarnings({ "serial", "unused" })
+public class DLockManagementDUnitTest implements Serializable {
 
-  private static final long serialVersionUID = 1L;
+  private static final int MAX_WAIT_MILLIS = 120 * 1000; // 2 MINUTES
 
-  private static final String LOCK_SERVICE_NAME = "testLockService";
-  
-  // This must be bigger than the dunit ack-wait-threshold for the revoke
-  // tests. The command line is setting the ack-wait-threshold to be
-  // 60 seconds.
-  private static final int MAX_WAIT = 70 * 1000;
+  private static final String LOCK_SERVICE_NAME = DLockManagementDUnitTest.class.getSimpleName() + "_testLockService";
 
-  public DLockManagementDUnitTest() {
-    super();
+  @Manager
+  private VM managerVM;
 
-  }
+  @Member
+  private VM[] memberVMs;
 
-  /**
-   * Distributed Lock Service test
-   * 
-   * @throws Exception
-   */
-  @Category(FlakyTest.class) // GEODE-173: eats exceptions, HeadlessGFSH, time sensitive, waitForCriterions
-  @Test
-  public void testDLockMBean() throws Throwable {
-    
-    initManagement(false);
-    
-    VM[] managedNodes = new VM[getManagedNodeList()
-        .size()];
-    VM managingNode = getManagingNode();
-    
-    getManagedNodeList().toArray(managedNodes);
-
-    createGrantorLockService(managedNodes[0]);
-
-    createLockService(managedNodes[1]);
-
-    createLockService(managedNodes[2]);
-    
-    for (VM vm : getManagedNodeList()) {
-      verifyLockData(vm);
-    }
-    verifyLockDataRemote(managingNode);
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().managersFirst(false).start(true).build();
 
-    for (VM vm : getManagedNodeList()) {
-      closeLockService(vm);
-    }
-  }
-  
-  /**
-   * Distributed Lock Service test
-   * 
-   * @throws Exception
-   */
-  @Category(FlakyTest.class) // GEODE-553: waitForCriterion, eats exceptions, HeadlessGFSH
   @Test
-  public void testDLockAggregate() throws Throwable {
-    initManagement(false);
-    VM[] managedNodes = new VM[getManagedNodeList()
-        .size()];
-    VM managingNode = getManagingNode();
-    
-    getManagedNodeList().toArray(managedNodes);
-
-    createGrantorLockService(managedNodes[0]);
-
-    createLockService(managedNodes[1]);
-
-    createLockService(managedNodes[2]);
-    
-    checkAggregate(managingNode, 3);
-    DistributedMember member = getMember(managedNodes[2]);
-    checkNavigation(managingNode, member);
-    
-    createLockService(managingNode);
-    checkAggregate(managingNode, 4);
-   
-
-    for (VM vm : getManagedNodeList()) {
-      closeLockService(vm);
+  public void testLockServiceMXBean() throws Throwable {
+    createLockServiceGrantor(this.memberVMs[0]);
+    createLockService(this.memberVMs[1]);
+    createLockService(this.memberVMs[2]);
+
+    for (VM memberVM : this.memberVMs) {
+      verifyLockServiceMXBeanInMember(memberVM);
     }
-    ensureProxyCleanup(managingNode);
-    checkAggregate(managingNode, 1);
-    closeLockService(managingNode);
-    checkAggregate(managingNode, 0);
+    verifyLockServiceMXBeanInManager(this.managerVM);
 
+    for (VM memberVM : this.memberVMs) {
+      closeLockService(memberVM);
+    }
   }
-  
-  public void ensureProxyCleanup(final VM vm) {
-
-    SerializableRunnable ensureProxyCleanup = new SerializableRunnable(
-        "Ensure Proxy cleanup") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-        final SystemManagementService service = (SystemManagementService) getManagementService();
- 
-
-        for (final DistributedMember member : otherMemberSet) {
-          RegionMXBean bean = null;
-          try {
 
-            Wait.waitForCriterion(new WaitCriterion() {
-
-              LockServiceMXBean bean = null;
+  @Test
+  public void testDistributedLockServiceMXBean() throws Throwable {
+    createLockServiceGrantor(this.memberVMs[0]);
+    createLockService(this.memberVMs[1]);
+    createLockService(this.memberVMs[2]);
 
-              public String description() {
-                return "Waiting for the proxy to get deleted at managing node";
-              }
+    verifyDistributedLockServiceMXBean(this.managerVM, 3);
 
-              public boolean done() {
-                ObjectName objectName = service.getRegionMBeanName(member, LOCK_SERVICE_NAME);
-                bean = service.getMBeanProxy(objectName, LockServiceMXBean.class);
-                boolean done = (bean == null);
-                return done;
-              }
+    DistributedMember member = this.managementTestRule.getDistributedMember(this.memberVMs[2]);
+    verifyFetchOperations(this.managerVM, member);
 
-            }, MAX_WAIT, 500, true);
+    createLockService(this.managerVM);
+    verifyDistributedLockServiceMXBean(this.managerVM, 4);
 
-          } catch (Exception e) {
-            throw new AssertionError("could not remove proxies in required time", e);
+    for (VM memberVM : this.memberVMs) {
+      closeLockService(memberVM);
+    }
+    verifyProxyCleanupInManager(this.managerVM);
+    verifyDistributedLockServiceMXBean(this.managerVM, 1);
 
-          }
-          assertNull(bean);
+    closeLockService(this.managerVM);
+    verifyDistributedLockServiceMXBean(this.managerVM, 0);
+  }
 
-        }
+  private void verifyProxyCleanupInManager(final VM managerVM) {
+    managerVM.invoke("verifyProxyCleanupInManager", () -> {
+      Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
+      SystemManagementService service = this.managementTestRule.getSystemManagementService();
 
+      for (final DistributedMember member : otherMembers) {
+        ObjectName objectName = service.getRegionMBeanName(member, LOCK_SERVICE_NAME);
+        await().until(() -> assertThat(service.getMBeanProxy(objectName, LockServiceMXBean.class)).isNull());
       }
-    };
-    vm.invoke(ensureProxyCleanup);
+    });
   }
 
-  /**
-   * Creates a grantor lock service
-   * 
-   * @param vm
-   */
-  @SuppressWarnings("serial")
-  protected void createGrantorLockService(final VM vm) {
-    SerializableRunnable createGrantorLockService = new SerializableRunnable(
-        "Create Grantor LockService") {
-      public void run() {
-        GemFireCacheImpl cache  = GemFireCacheImpl.getInstance();
-        assertNull(DistributedLockService.getServiceNamed(LOCK_SERVICE_NAME));
+  private void createLockServiceGrantor(final VM memberVM) {
+    memberVM.invoke("createLockServiceGrantor", () -> {
+      assertThat(DistributedLockService.getServiceNamed(LOCK_SERVICE_NAME)).isNull();
 
-        DLockService service = (DLockService) DistributedLockService.create(
-            LOCK_SERVICE_NAME, cache.getDistributedSystem());
+      DLockService lockService = (DLockService) DistributedLockService.create(LOCK_SERVICE_NAME, this.managementTestRule.getCache().getDistributedSystem());
+      DistributedMember grantor = lockService.getLockGrantorId().getLockGrantorMember();
+      assertThat(grantor).isNotNull();
 
-        assertSame(service, DistributedLockService
-            .getServiceNamed(LOCK_SERVICE_NAME));
+      LockServiceMXBean lockServiceMXBean = awaitLockServiceMXBean(LOCK_SERVICE_NAME);
 
-        InternalDistributedMember grantor = service.getLockGrantorId()
-            .getLockGrantorMember();
+      assertThat(lockServiceMXBean).isNotNull();
+      assertThat(lockServiceMXBean.isDistributed()).isTrue();
+      assertThat(lockServiceMXBean.getName()).isEqualTo(LOCK_SERVICE_NAME);
+      assertThat(lockServiceMXBean.isLockGrantor()).isTrue();
+      assertThat(lockServiceMXBean.fetchGrantorMember()).isEqualTo(this.managementTestRule.getDistributedMember().getId());
+    });
+  }
 
-        assertNotNull(grantor);
+  private void createLockService(final VM anyVM) {
+    anyVM.invoke("createLockService", () -> {
+      assertThat(DistributedLockService.getServiceNamed(LOCK_SERVICE_NAME)).isNull();
 
-        LogWriterUtils.getLogWriter().info("In identifyLockGrantor - grantor is " + grantor);
+      DistributedLockService.create(LOCK_SERVICE_NAME, this.managementTestRule.getCache().getDistributedSystem());
 
-       
+      LockServiceMXBean lockServiceMXBean = awaitLockServiceMXBean(LOCK_SERVICE_NAME);
 
-        ManagementService mgmtService = getManagementService();
+      assertThat(lockServiceMXBean).isNotNull();
+      assertThat(lockServiceMXBean.isDistributed()).isTrue();
+      assertThat(lockServiceMXBean.isLockGrantor()).isFalse();
+    });
+  }
 
-        LockServiceMXBean bean = mgmtService
-            .getLocalLockServiceMBean(LOCK_SERVICE_NAME);
+  private void closeLockService(final VM anyVM) {
+    anyVM.invoke("closeLockService", () -> {
+      assertThat(DistributedLockService.getServiceNamed(LOCK_SERVICE_NAME)).isNotNull();
+      DistributedLockService.destroy(LOCK_SERVICE_NAME);
 
-        assertNotNull(bean);
+      awaitLockServiceMXBeanIsNull(LOCK_SERVICE_NAME);
 
-        assertTrue(bean.isDistributed());
+      ManagementService service = this.managementTestRule.getManagementService();
+      LockServiceMXBean lockServiceMXBean = service.getLocalLockServiceMBean(LOCK_SERVICE_NAME);
+      assertThat(lockServiceMXBean).isNull();
+    });
+  }
 
-        assertEquals(bean.getName(), LOCK_SERVICE_NAME);
+  private void verifyLockServiceMXBeanInMember(final VM memberVM) {
+    memberVM.invoke("verifyLockServiceMXBeanInManager", () -> {
+      DistributedLockService lockService = DistributedLockService.getServiceNamed(LOCK_SERVICE_NAME);
+      lockService.lock("lockObject_" + identifyPid(), MAX_WAIT_MILLIS, -1);
 
-        assertTrue(bean.isLockGrantor());
+      ManagementService service = this.managementTestRule.getManagementService();
+      LockServiceMXBean lockServiceMXBean = service.getLocalLockServiceMBean(LOCK_SERVICE_NAME);
+      assertThat(lockServiceMXBean).isNotNull();
 
-        assertEquals(cache.getDistributedSystem().getMemberId(), bean
-            .fetchGrantorMember());
+      String[] listHeldLock = lockServiceMXBean.listHeldLocks();
+      assertThat(listHeldLock).hasSize(1);
 
-      }
-    };
-    vm.invoke(createGrantorLockService);
+      Map<String, String> lockThreadMap = lockServiceMXBean.listThreadsHoldingLock();
+      assertThat(lockThreadMap).hasSize(1);
+    });
   }
 
   /**
-   * Creates a named lock service
-   * @param vm
+   * Verify lock data from remote Managing node
    */
-  @SuppressWarnings("serial")
-  protected void createLockService(final VM vm) {
-    SerializableRunnable createLockService = new SerializableRunnable(
-        "Create LockService") {
-      public void run() {
-        assertNull(DistributedLockService.getServiceNamed(LOCK_SERVICE_NAME));
-        GemFireCacheImpl cache  = GemFireCacheImpl.getInstance();
-        DistributedLockService service = DistributedLockService.create(
-            LOCK_SERVICE_NAME, cache.getDistributedSystem());
-
-        assertSame(service, DistributedLockService
-            .getServiceNamed(LOCK_SERVICE_NAME));
+  private void verifyLockServiceMXBeanInManager(final VM managerVM) throws Exception {
+    managerVM.invoke("verifyLockServiceMXBeanInManager", () -> {
+      Set<DistributedMember> otherMembers = this.managementTestRule.getOtherNormalMembers();
 
-        
+      for (DistributedMember member : otherMembers) {
+        LockServiceMXBean lockServiceMXBean = awaitLockServiceMXBeanProxy(member, LOCK_SERVICE_NAME);
+        assertThat(lockServiceMXBean).isNotNull();
 
-        ManagementService mgmtService = getManagementService();
+        String[] listHeldLock = lockServiceMXBean.listHeldLocks();
+        assertThat(listHeldLock).hasSize(1);
 
-        LockServiceMXBean bean = mgmtService
-            .getLocalLockServiceMBean(LOCK_SERVICE_NAME);
+        Map<String, String> lockThreadMap = lockServiceMXBean.listThreadsHoldingLock();
+        assertThat(lockThreadMap).hasSize(1);
+      }
+    });
+  }
 
-        assertNotNull(bean);
+  private void verifyFetchOperations(final VM memberVM, final DistributedMember member) {
+    memberVM.invoke("verifyFetchOperations", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
 
-        assertTrue(bean.isDistributed());
+      DistributedSystemMXBean distributedSystemMXBean = awaitDistributedSystemMXBean();
+      ObjectName distributedLockServiceMXBeanName = MBeanJMXAdapter.getDistributedLockServiceName(LOCK_SERVICE_NAME);
+      assertThat(distributedSystemMXBean.fetchDistributedLockServiceObjectName(LOCK_SERVICE_NAME)).isEqualTo(distributedLockServiceMXBeanName);
 
-        assertFalse(bean.isLockGrantor());
-      }
-    };
-    vm.invoke(createLockService);
+      ObjectName lockServiceMXBeanName = MBeanJMXAdapter.getLockServiceMBeanName(member.getId(), LOCK_SERVICE_NAME);
+      assertThat(distributedSystemMXBean.fetchLockServiceObjectName(member.getId(), LOCK_SERVICE_NAME)).isEqualTo(lockServiceMXBeanName);
+    });
   }
 
   /**
-   * Closes a named lock service
-   * @param vm
+   * Verify Aggregate MBean
    */
-  @SuppressWarnings("serial")
-  protected void closeLockService(final VM vm) {
-    SerializableRunnable closeLockService = new SerializableRunnable(
-        "Close LockService") {
-      public void run() {
+  private void verifyDistributedLockServiceMXBean(final VM managerVM, final int memberCount) {
+    managerVM.invoke("verifyDistributedLockServiceMXBean", () -> {
+      ManagementService service = this.managementTestRule.getManagementService();
 
-        DistributedLockService service = DistributedLockService
-            .getServiceNamed(LOCK_SERVICE_NAME);
-
-        DistributedLockService.destroy(LOCK_SERVICE_NAME);
-
-        ManagementService mgmtService = getManagementService();
-
-        LockServiceMXBean bean = null;
-        try {
+      if (memberCount == 0) {
+        await().until(() -> assertThat(service.getDistributedLockServiceMXBean(LOCK_SERVICE_NAME)).isNull());
+        return;
+      }
 
-          bean = mgmtService.getLocalLockServiceMBean(LOCK_SERVICE_NAME);
+      DistributedLockServiceMXBean distributedLockServiceMXBean = awaitDistributedLockServiceMXBean(LOCK_SERVICE_NAME, memberCount);
+      assertThat(distributedLockServiceMXBean).isNotNull();
+      assertThat(distributedLockServiceMXBean.getName()).isEqualTo(LOCK_SERVICE_NAME);
+    });
+  }
 
-        } catch (ManagementException mgs) {
+  private DistributedSystemMXBean awaitDistributedSystemMXBean() {
+    ManagementService service = this.managementTestRule.getManagementService();
 
-        }
-        assertNull(bean);
+    await().until(() -> assertThat(service.getDistributedSystemMXBean()).isNotNull());
 
-      }
-    };
-    vm.invoke(closeLockService);
+    return service.getDistributedSystemMXBean();
   }
 
   /**
-   * Lock data related verifications
-   * @param vm
+   * Await and return a DistributedRegionMXBean proxy with specified member
+   * count.
    */
-  @SuppressWarnings("serial")
-  protected void verifyLockData(final VM vm) {
-    SerializableRunnable verifyLockData = new SerializableRunnable(
-        "Verify LockService") {
-      public void run() {
-
-        DistributedLockService service = DistributedLockService
-            .getServiceNamed(LOCK_SERVICE_NAME);
-
-        final String LOCK_OBJECT = "lockObject_" + vm.getPid();
-
-        Wait.waitForCriterion(new WaitCriterion() {
-          DistributedLockService service = null;
-
-          public String description() {
-            return "Waiting for the lock service to be initialised";
-          }
-
-          public boolean done() {
-            DistributedLockService service = DistributedLockService
-                .getServiceNamed(LOCK_SERVICE_NAME);
-            boolean done = service != null;
-            return done;
-          }
-
-        }, MAX_WAIT, 500, true);
+  private DistributedLockServiceMXBean awaitDistributedLockServiceMXBean(final String lockServiceName, final int memberCount) {
+    ManagementService service = this.managementTestRule.getManagementService();
 
-        service.lock(LOCK_OBJECT, 1000, -1);
-        
+    await().until(() -> {
+      assertThat(service.getDistributedLockServiceMXBean(lockServiceName)).isNotNull();
+      assertThat(service.getDistributedLockServiceMXBean(lockServiceName).getMemberCount()).isEqualTo(memberCount);
+    });
 
-        ManagementService mgmtService = getManagementService();
-
-        LockServiceMXBean bean = null;
-        try {
+    return service.getDistributedLockServiceMXBean(lockServiceName);
+  }
 
-          bean = mgmtService.getLocalLockServiceMBean(LOCK_SERVICE_NAME);
+  /**
+   * Await and return a LockServiceMXBean proxy for a specific member and
+   * lockServiceName.
+   */
+  private LockServiceMXBean awaitLockServiceMXBeanProxy(final DistributedMember member, final String lockServiceName) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    ObjectName lockServiceMXBeanName = service.getLockServiceMBeanName(member, lockServiceName);
 
-        } catch (ManagementException mgs) {
+    await().until(() -> assertThat(service.getMBeanProxy(lockServiceMXBeanName, LockServiceMXBean.class)).isNotNull());
 
-        }
-        assertNotNull(bean);
-        String[] listHeldLock = bean.listHeldLocks();
-        assertEquals(listHeldLock.length, 1);
-        LogWriterUtils.getLogWriter().info("List Of Lock Object is  " + listHeldLock[0]);
-        Map<String, String> lockThreadMap = bean.listThreadsHoldingLock();
-        assertEquals(lockThreadMap.size(), 1);
-        LogWriterUtils.getLogWriter().info(
-            "List Of Lock Thread is  " + lockThreadMap.toString());
-      }
-    };
-    vm.invoke(verifyLockData);
+    return service.getMBeanProxy(lockServiceMXBeanName, LockServiceMXBean.class);
   }
 
   /**
-   * Verify lock data from remote Managing node
-   * @param vm
+   * Await creation of local LockServiceMXBean for specified lockServiceName.
    */
-  @SuppressWarnings("serial")
-  protected void verifyLockDataRemote(final VM vm) {
-    SerializableRunnable verifyLockDataRemote = new SerializableRunnable(
-        "Verify LockService Remote") {
-      public void run() {
-
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-
-        for (DistributedMember member : otherMemberSet) {
-          LockServiceMXBean bean = null;
-          try {
-            bean = MBeanUtil.getLockServiceMbeanProxy(member, LOCK_SERVICE_NAME);
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , LockServiceMBean Should not be null", e);
-
-          }
-          assertNotNull(bean);
-          String[] listHeldLock = bean.listHeldLocks();
-          assertEquals(listHeldLock.length, 1);
-          LogWriterUtils.getLogWriter().info("List Of Lock Object is  " + listHeldLock[0]);
-          Map<String, String> lockThreadMap = bean.listThreadsHoldingLock();
-          assertEquals(lockThreadMap.size(), 1);
-          LogWriterUtils.getLogWriter().info(
-              "List Of Lock Thread is  " + lockThreadMap.toString());
-        }
+  private LockServiceMXBean awaitLockServiceMXBean(final String lockServiceName) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
 
-      }
-    };
-    vm.invoke(verifyLockDataRemote);
-  }
-  
-  protected void checkNavigation(final VM vm,
-      final DistributedMember lockServiceMember) {
-    SerializableRunnable checkNavigation = new SerializableRunnable(
-        "Check Navigation") {
-      public void run() {
-
-        final ManagementService service = getManagementService();
-
-        DistributedSystemMXBean disMBean = service.getDistributedSystemMXBean();
-        try {
-          ObjectName expected = MBeanJMXAdapter
-              .getDistributedLockServiceName(LOCK_SERVICE_NAME);
-          ObjectName actual = disMBean
-              .fetchDistributedLockServiceObjectName(LOCK_SERVICE_NAME);
-          assertEquals(expected, actual);
-        } catch (Exception e) {
-          throw new AssertionError("Lock Service Navigation Failed ", e);
-        }
-
-        try {
-          ObjectName expected = MBeanJMXAdapter.getLockServiceMBeanName(
-              lockServiceMember.getId(), LOCK_SERVICE_NAME);
-          ObjectName actual = disMBean.fetchLockServiceObjectName(
-              lockServiceMember.getId(), LOCK_SERVICE_NAME);
-          assertEquals(expected, actual);
-        } catch (Exception e) {
-          throw new AssertionError("Lock Service Navigation Failed ", e);
-        }
+    await().until(() -> assertThat(service.getLocalLockServiceMBean(lockServiceName)).isNotNull());
 
-      }
-    };
-    vm.invoke(checkNavigation);
+    return service.getLocalLockServiceMBean(lockServiceName);
   }
 
   /**
-   * Verify Aggregate MBean
-   * @param vm
+   * Await destruction of local LockServiceMXBean for specified
+   * lockServiceName.
    */
-  @SuppressWarnings("serial")
-  protected void checkAggregate(final VM vm, final int expectedMembers) {
-    SerializableRunnable checkAggregate = new SerializableRunnable(
-        "Verify Aggregate MBean") {
-      public void run() {
-        
-        final ManagementService service = getManagementService();
-        if (expectedMembers == 0) {
-          try {
-            Wait.waitForCriterion(new WaitCriterion() {
-
-              DistributedLockServiceMXBean bean = null;
-
-              public String description() {
-                return "Waiting for the proxy to get deleted at managing node";
-              }
-
-              public boolean done() {
-                bean = service
-                    .getDistributedLockServiceMXBean(LOCK_SERVICE_NAME);
-
-                boolean done = (bean == null);
-                return done;
-              }
-
-            }, MAX_WAIT, 500, true);
-
-          } catch (Exception e) {
-            throw new AssertionError("could not remove Aggregate Bean in required time", e);
-
-          }
-          return;
-        }
-
-        DistributedLockServiceMXBean bean = null;
-          try {
-            bean = MBeanUtil.getDistributedLockMbean(LOCK_SERVICE_NAME, expectedMembers);
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , LockServiceMBean Should not be null", e);
-
-          }
-          assertNotNull(bean);
-          assertEquals(bean.getName(),LOCK_SERVICE_NAME);
-   
-      }
-    };
-    vm.invoke(checkAggregate);
+  private void awaitLockServiceMXBeanIsNull(final String lockServiceName) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+
+    await().until(() -> assertThat(service.getLocalLockServiceMBean(lockServiceName)).isNull());
+  }
+
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(MAX_WAIT_MILLIS, MILLISECONDS);
   }
 }
+


[05/50] [abbrv] incubator-geode git commit: GEODE-2000 Now ClientMembershipListener returns host on which CacheServer is bind

Posted by kl...@apache.org.
GEODE-2000 Now ClientMembershipListener returns host on which
CacheServer is bind


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8a080323
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8a080323
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8a080323

Branch: refs/heads/feature/GEODE-1930
Commit: 8a080323070dbbc1d7037612d0d8e1188dcf1507
Parents: f2c3ca4
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Wed Oct 12 15:18:21 2016 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Thu Oct 13 14:38:14 2016 -0700

----------------------------------------------------------------------
 .../membership/InternalDistributedMember.java   |  7 ++++-
 .../internal/cache/tier/sockets/HandShake.java  |  8 +++---
 .../AutoConnectionSourceImplJUnitTest.java      | 27 ++++++++++++++++++++
 3 files changed, 38 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8a080323/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java
index 775fa24..3c16bb3 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java
@@ -1031,6 +1031,7 @@ public class InternalDistributedMember
     int port = in.readInt();
 
     this.hostName = DataSerializer.readString(in);
+    
     this.hostName = SocketCreator.resolve_dns? SocketCreator.getCanonicalHostName(inetAddr, hostName) : inetAddr.getHostAddress();
 
     int flags = in.readUnsignedByte();
@@ -1210,7 +1211,11 @@ public class InternalDistributedMember
   }
 
   public String getHost() {
-    return this.netMbr.getInetAddress().getCanonicalHostName();
+    return this.hostName;
+  }
+  
+  public void setHost(String h) {
+    this.hostName = h;
   }
 
   public int getProcessId() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8a080323/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HandShake.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HandShake.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HandShake.java
index d63dfa0..885b61b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HandShake.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/HandShake.java
@@ -1320,7 +1320,7 @@ public class HandShake implements ClientHandShake
       int qSize = dis.readInt();
 
       // Read the server member
-      member = readServerMember(dis);
+      member = readServerMember(dis, location);
       serverQStatus = new ServerQueueStatus(epType, qSize,member);
 
       // Read the message (if any)
@@ -1439,7 +1439,7 @@ public class HandShake implements ClientHandShake
     return sqs;
   }
 
-  protected DistributedMember readServerMember(DataInputStream p_dis) throws IOException {
+  public static DistributedMember readServerMember(DataInputStream p_dis, ServerLocation serverLocation) throws IOException {
 
     byte[] memberBytes = DataSerializer.readByteArray(p_dis);
     ByteArrayInputStream bais = new ByteArrayInputStream(memberBytes);
@@ -1449,7 +1449,9 @@ public class HandShake implements ClientHandShake
       dis = new VersionedDataInputStream(dis, v);
     }
     try {
-      return (DistributedMember)DataSerializer.readObject(dis);
+      InternalDistributedMember ids = (InternalDistributedMember)DataSerializer.readObject(dis);
+      ids.setHost(serverLocation.getHostName());
+      return ids;
     }
     catch (EOFException e) {
       throw e;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8a080323/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
index fb4b970..ab6f626 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
@@ -17,6 +17,7 @@
 package org.apache.geode.cache.client.internal;
 
 import org.apache.geode.CancelCriterion;
+import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.*;
 import org.apache.geode.cache.client.NoAvailableLocatorsException;
 import org.apache.geode.cache.client.SubscriptionNotEnabledException;
@@ -24,22 +25,29 @@ import org.apache.geode.cache.client.internal.locator.ClientConnectionRequest;
 import org.apache.geode.cache.client.internal.locator.ClientConnectionResponse;
 import org.apache.geode.cache.client.internal.locator.LocatorListResponse;
 import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.PoolStatHelper;
 import org.apache.geode.distributed.internal.ServerLocation;
 import org.apache.geode.distributed.internal.SharedConfiguration;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.distributed.internal.tcpserver.TcpClient;
 import org.apache.geode.distributed.internal.tcpserver.TcpHandler;
 import org.apache.geode.distributed.internal.tcpserver.TcpServer;
 import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.internal.HeapDataOutputStream;
+import org.apache.geode.internal.Version;
 import org.apache.geode.internal.cache.PoolStats;
+import org.apache.geode.internal.cache.tier.sockets.HandShake;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.ConnectException;
 import java.net.InetAddress;
@@ -140,6 +148,25 @@ public class AutoConnectionSourceImplJUnitTest {
   }
   
   @Test
+  public void testClientMembershipListenerHostAtClient() throws IOException {
+    String fakeHost = "fake.com";
+    InternalDistributedMember member = new InternalDistributedMember("localhost", 54638);
+    ServerLocation sl = new ServerLocation(fakeHost, 420);
+    
+    HeapDataOutputStream dos = new HeapDataOutputStream( Version.CURRENT);
+    HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
+    DataSerializer.writeObject(member, hdos);
+    DataSerializer.writeByteArray(hdos.toByteArray(), dos);
+    hdos.close();
+    
+    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
+    
+    DistributedMember ret = (DistributedMember)HandShake.readServerMember(dis, sl);
+    
+    assertEquals(fakeHost, ret.getHost());
+  }
+  
+  @Test
   public void testNoServers() throws Exception {
     startFakeLocator();
     handler.nextConnectionResponse = new ClientConnectionResponse(null);


[18/50] [abbrv] incubator-geode git commit: Reverting gradle.properties change that came from the release branch

Posted by kl...@apache.org.
Reverting gradle.properties change that came from the release branch

The release type should be SNAPSHOT on develop


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1fb0d0a9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1fb0d0a9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1fb0d0a9

Branch: refs/heads/feature/GEODE-1930
Commit: 1fb0d0a9d7105e49593dd7366b2337ad2e88417a
Parents: 3068fb6
Author: Dan Smith <up...@apache.org>
Authored: Fri Oct 14 16:33:40 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Fri Oct 14 16:33:40 2016 -0700

----------------------------------------------------------------------
 gradle.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1fb0d0a9/gradle.properties
----------------------------------------------------------------------
diff --git a/gradle.properties b/gradle.properties
index e97463e..ed2cd86 100755
--- a/gradle.properties
+++ b/gradle.properties
@@ -29,7 +29,7 @@ releaseQualifier =
 # -SNAPSHOT - development version
 # <blank>   - release
 # This is only really relevant for Maven artifacts.
-releaseType = 
+releaseType = -SNAPSHOT
 
 # Set the buildId to add build metadata that can be viewed from
 # gfsh or pulse (`gfsh version --full`). Can be set using


[33/50] [abbrv] incubator-geode git commit: GEODE-538: Add check for persistent data recovery

Posted by kl...@apache.org.
GEODE-538: Add check for persistent data recovery

PartitionedRegion.getNodeForBucketReadOrLoad can return an invalid node
if persistent data recovery is in process and a get() targets a bucket
that
hasn't been recoverd yet. This can result in returning an incorrect
value (null) or throwing ConflictingPersistentDataException from a get()
or put() on the region.

This change adds a check for persistent recovery to be completed
before creating the new bucket. If recovery isn't complete then the
operation on the region will fail with a PartitionOfflineException.

Queries on a region while persistent recovery is in progress can also
result in incorrect results so a similar check is added to
DefaultQuery.checkQueryOnPR.

This closes #264


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/11ef3ebb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/11ef3ebb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/11ef3ebb

Branch: refs/heads/feature/GEODE-1930
Commit: 11ef3ebbe30a8340f57776bf4063684b91ccd0a3
Parents: 7511ffa
Author: Ken Howe <kh...@pivotal.io>
Authored: Thu Oct 6 15:02:24 2016 -0700
Committer: Anil <ag...@pivotal.io>
Committed: Wed Oct 19 15:49:33 2016 -0700

----------------------------------------------------------------------
 .../org/apache/geode/cache/query/Query.java     |  12 +
 .../cache/query/internal/DefaultQuery.java      |   6 +-
 .../internal/cache/PRHARedundancyProvider.java  |   9 +-
 .../geode/internal/cache/PartitionedRegion.java |  18 +-
 .../geode/internal/i18n/LocalizedStrings.java   |   1 +
 .../partitioned/PRBasicQueryDUnitTest.java      | 221 ++++++++++
 .../query/partitioned/PRQueryDUnitHelper.java   | 185 +++++++++
 ...tentColocatedPartitionedRegionDUnitTest.java | 411 ++++++++++++++++++-
 8 files changed, 844 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/Query.java b/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
index e27687d..670b262 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
@@ -89,6 +89,9 @@ public interface Query {
    * @throws QueryExecutionLowMemoryException
    *         If the query gets canceled due to low memory conditions and
    *         the resource manager critical heap percentage has been set
+   * @throws PartitionOfflineException
+   *         If persistent data recovery is not complete for a partitioned
+   *         region referred to in the query.
    */
   public Object execute()
     throws FunctionDomainException, TypeMismatchException, NameResolutionException,
@@ -150,6 +153,9 @@ public interface Query {
    * @throws QueryExecutionLowMemoryException
    *         If the query gets canceled due to low memory conditions and
    *         the resource manager critical heap percentage has been set
+   * @throws PartitionOfflineException
+   *         If persistent data recovery is not complete for a partitioned
+   *         region referred to in the query.
    *         
    */
   public Object execute(Object[] params)
@@ -220,6 +226,9 @@ public interface Query {
    * @throws QueryExecutionLowMemoryException
    *         If the query gets canceled due to low memory conditions and
    *         the resource manager critical heap percentage has been set
+   * @throws PartitionOfflineException
+   *         If persistent data recovery is not complete for a partitioned
+   *         region referred to in the query.
    */
   public Object execute(RegionFunctionContext context)
     throws FunctionDomainException, TypeMismatchException, NameResolutionException,
@@ -291,6 +300,9 @@ public interface Query {
    * @throws QueryExecutionLowMemoryException
    *         If the query gets canceled due to low memory conditions and
    *         the resource manager critical heap percentage has been set
+   * @throws PartitionOfflineException
+   *         If persistent data recovery is not complete for a partitioned
+   *         region referred to in the query.
    */
   public Object execute(RegionFunctionContext context, Object[] params)
     throws FunctionDomainException, TypeMismatchException, NameResolutionException,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
index 58df390..8175d82 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
@@ -27,11 +27,14 @@ import org.apache.geode.cache.client.internal.UserAttributes;
 import org.apache.geode.cache.execute.Function;
 import org.apache.geode.cache.execute.RegionFunctionContext;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.cache.query.*;
 import org.apache.geode.cache.query.internal.cq.InternalCqQuery;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.internal.NanoTimer;
 import org.apache.geode.internal.cache.*;
+import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 
 import java.util.*;
@@ -581,7 +584,7 @@ public class DefaultQuery implements Query {
   }
 
 
-  private QueryExecutor checkQueryOnPR(Object[] parameters) throws RegionNotFoundException {
+  private QueryExecutor checkQueryOnPR(Object[] parameters) throws RegionNotFoundException, PartitionOfflineException {
 
     // check for PartititionedRegions. If a PartitionedRegion is referred to in the query,
     // then the following restrictions apply:
@@ -601,6 +604,7 @@ public class DefaultQuery implements Query {
         throw new RegionNotFoundException(LocalizedStrings.DefaultQuery_REGION_NOT_FOUND_0.toLocalizedString(regionPath));
       }
       if (rgn instanceof QueryExecutor) {
+        ((PartitionedRegion)rgn).checkPROffline();
         prs.add((QueryExecutor)rgn);
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
index cfedb67..6245c37 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
@@ -24,6 +24,7 @@ import org.apache.geode.cache.PartitionedRegionStorageException;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.DistributionConfig;
@@ -495,16 +496,20 @@ public class PRHARedundancyProvider
    *           redundancy.
    * @throws PartitionedRegionException
    *           if d-lock can not be acquired to create bucket.
-   * 
+   * @throws PartitionOfflineException
+   *           if persistent data recovery is not complete for a partitioned
+   *           region referred to in the query.
    */
   public InternalDistributedMember
     createBucketAtomically(final int bucketId,
                            final int newBucketSize,
                            final long startTime,
                            final boolean finishIncompleteCreation, String partitionName) throws PartitionedRegionStorageException,
-                                    PartitionedRegionException
+                                    PartitionedRegionException, PartitionOfflineException
   {
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
+    prRegion.checkPROffline();
     
     // If there are insufficient stores throw *before* we try acquiring the
     // (very expensive) bucket lock or the (somewhat expensive) monitor on this

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index baab79f..f7ecdaf 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -28,6 +28,8 @@ import org.apache.geode.cache.client.internal.*;
 import org.apache.geode.cache.execute.*;
 import org.apache.geode.cache.partition.PartitionListener;
 import org.apache.geode.cache.partition.PartitionNotAvailableException;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.cache.query.*;
 import org.apache.geode.cache.query.internal.*;
 import org.apache.geode.cache.query.internal.index.*;
@@ -1397,6 +1399,21 @@ public class PartitionedRegion extends LocalRegion implements
     new UpdateAttributesProcessor(this).distribute(false);
   }
 
+  /**
+   * Throw an exception if persistent data recovery from disk is not complete
+   * for this region.
+   *
+   * @throws PartitionOfflineException
+   */
+  public void checkPROffline() throws PartitionOfflineException {
+    if (getDataPolicy().withPersistence() && !recoveredFromDisk) {
+      Set<PersistentID> persistIds = new HashSet(getRegionAdvisor().advisePersistentMembers().values());
+      persistIds.removeAll(getRegionAdvisor().adviseInitializedPersistentMembers().values());
+      throw new PartitionOfflineException(persistIds, LocalizedStrings.PRHARedundancyProvider_PARTITIONED_REGION_0_OFFLINE_HAS_UNRECOVERED_PERSISTENT_DATA_1
+          .toLocalizedString(new Object[] { getFullPath(), persistIds}));
+    }
+  }
+
   public final void updatePRConfig(PartitionRegionConfig prConfig,
       boolean putOnlyIfUpdated) {
     final Set<Node> nodes = prConfig.getNodes();
@@ -3057,7 +3074,6 @@ public class PartitionedRegion extends LocalRegion implements
       final RetryTimeKeeper snoozer) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     
-//    InternalDistributedSystem ids = (InternalDistributedSystem)this.cache.getDistributedSystem();
     RetryTimeKeeper localSnoozer = snoozer;
     // Prevent early access to buckets that are not completely created/formed
     // and

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java b/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
index 8bfdd68..7d762b8 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
@@ -702,6 +702,7 @@ public class LocalizedStrings {
   public static final StringId AbstractDistributionConfig_CLIENT_CONFLATION_PROP_NAME = new StringId(1839, "Client override for server queue conflation setting");
   public static final StringId PRHARRedundancyProvider_ALLOCATE_ENOUGH_MEMBERS_TO_HOST_BUCKET = new StringId(1840, "allocate enough members to host bucket.");
   public static final StringId PRHARedundancyProvider_TIME_OUT_WAITING_0_MS_FOR_CREATION_OF_BUCKET_FOR_PARTITIONED_REGION_1_MEMBERS_REQUESTED_TO_CREATE_THE_BUCKET_ARE_2 = new StringId(1841, "Time out waiting {0} ms for creation of bucket for partitioned region {1}. Members requested to create the bucket are: {2}");
+  public static final StringId PRHARedundancyProvider_PARTITIONED_REGION_0_OFFLINE_HAS_UNRECOVERED_PERSISTENT_DATA_1 = new StringId(1842, "Partitioned Region {0} is offline due to unrecovered persistent data, {1}");
 
   public static final StringId PUT_0_FAILED_TO_PUT_ENTRY_FOR_REGION_1_KEY_2_VALUE_3 = new StringId(1843, "{0}: Failed to put entry for region {1} key {2} value {3}");
   public static final StringId PUT_0_UNEXPECTED_EXCEPTION = new StringId(1844, "{0}: Unexpected Exception");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
index 8ef907a..224a7e0 100755
--- a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
@@ -29,6 +29,7 @@ import static org.apache.geode.cache.query.Utils.*;
 
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.cache.query.Index;
 import org.apache.geode.cache.query.IndexType;
 import org.apache.geode.cache.query.Query;
@@ -38,6 +39,7 @@ import org.apache.geode.cache.query.data.Portfolio;
 import org.apache.geode.cache.query.data.PortfolioData;
 import org.apache.geode.cache30.CacheSerializableRunnable;
 import org.apache.geode.internal.cache.PartitionedRegionDUnitTestCase;
+import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.VM;
@@ -67,6 +69,8 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     }
   }
 
+  private final static int MAX_SYNC_WAIT = 30 * 1000;
+  
   PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
@@ -153,6 +157,223 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
   
+  /**
+   * A basic dunit test that <br>
+   * 1. Creates a PR and colocated child region Accessor and Data Store with redundantCopies = 0.
+   * 2. Populates the region with test data.
+   * 3. Fires a query on accessor VM and verifies the result. 
+   * 4. Shuts down the caches, then restarts them asynchronously
+   * 5. Attempt the query while the regions are being recovered
+   * @throws Exception
+   */
+  @Test
+  public void testColocatedPRQueryDuringRecovery() throws Exception
+  {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0); 
+    VM vm1 = host.getVM(1);
+    setCacheInVMs(vm0, vm1);
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR Test with DACK Started");
+
+    // Creting PR's on the participating VM's
+    // Creating Accessor node on the VM0.
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+        redundancy, PortfolioData.class, true));
+    // Creating local region on vm0 to compare the results of query.
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+
+    // Creating the Datastores Nodes in the VM1.
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying ----- Creating the Datastore node in the PR");
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+        redundancy, PortfolioData.class, true));
+
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created the Datastore node in the PR");
+
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created PR's across all VM's");
+
+    // Generating portfolio object array to be populated across the PR's & Local
+    // Regions
+
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
+    // Putting the data into the PR's created
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
+        cnt, cntDest));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(name, portfolio,
+        cnt, cntDest));
+    
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Inserted Portfolio data across PR's");
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
+        portfolio, cnt, cntDest));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(localName,
+        portfolio, cnt, cntDest));
+
+    // querying the VM for data and comparing the result with query result of
+    // local region.
+    // querying the VM for data
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
+        name, localName));
+
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 1st pass ENDED");
+
+    // Shut everything down and then restart to test queries during recovery
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    
+    // Re-create the regions - only create the parent regions on the datastores
+    setCacheInVMs(vm0, vm1);
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+        redundancy, PortfolioData.class, true));
+
+    // Creating local region on vm0 to compare the results of query.
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying: re-creating the Datastore node in the PR");
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+        redundancy, PortfolioData.class, true));
+    
+    // Now start the child regions asynchronously so queries will happen during persistent recovery
+    AsyncInvocation vm0PR = vm0.invokeAsync(PRQHelp.getCacheSerializableRunnableForColocatedChildCreate(name,
+        redundancy, PortfolioData.class, true));
+    AsyncInvocation vm1PR = vm1.invokeAsync(PRQHelp.getCacheSerializableRunnableForColocatedChildCreate(name,
+        redundancy, PortfolioData.class, true));
+
+    // delay the query to let the recovery get underway
+    Thread.sleep(100);
+    
+    try {
+      // This is a repeat of the original query from before closing and restarting the datastores. This time
+      // it should fail due to persistent recovery that has not completed.
+      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName, true));
+      fail("Expected PartitionOfflineException when queryiong a region with offline colocated child");
+    } catch (Exception e) {
+      if (!(e.getCause() instanceof PartitionOfflineException)) {
+        e.printStackTrace();
+        throw e;
+      }
+    }
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 2nd pass (after restarting regions) ENDED");
+  }
+
+  /**
+   * A basic dunit test that <br>
+   * 1. Creates a PR and colocated child region Accessor and Data Store with redundantCopies = 0.
+   * 2. Populates the region with test data.
+   * 3. Fires a query on accessor VM and verifies the result. 
+   * 4. Shuts down the caches, then restarts them asynchronously, but don't restart the child region
+   * 5. Attempt the query while the region offline because of the missing child region
+   * @throws Exception
+   */
+  @SuppressWarnings("rawtypes")
+  @Test
+  public void testColocatedPRQueryDuringRecoveryWithMissingColocatedChild() throws Exception
+  {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0); 
+    VM vm1 = host.getVM(1);
+    setCacheInVMs(vm0, vm1);
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR Test with DACK Started");
+
+    // Creting PR's on the participating VM's
+    // Creating Accessor node on the VM0.
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+        redundancy, PortfolioData.class, true));
+    // Creating local region on vm0 to compare the results of query.
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+
+    // Creating the Datastores Nodes in the VM1.
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying ----- Creating the Datastore node in the PR");
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+        redundancy, PortfolioData.class, true));
+
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created the Datastore node in the PR");
+
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created PR's across all VM's");
+
+    // Generating portfolio object array to be populated across the PR's & Local
+    // Regions
+
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
+    // Putting the data into the PR's created
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
+        cnt, cntDest));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(name, portfolio,
+        cnt, cntDest));
+    
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Inserted Portfolio data across PR's");
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
+        portfolio, cnt, cntDest));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(localName,
+        portfolio, cnt, cntDest));
+
+    // querying the VM for data and comparing the result with query result of
+    // local region.
+    // querying the VM for data
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
+        name, localName));
+
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 1st pass ENDED");
+
+    // Shut everything down and then restart to test queries during recovery
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    
+    // Re-create the only the parent region
+    setCacheInVMs(vm0, vm1);
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+        redundancy, PortfolioData.class, true));
+
+    // Creating local region on vm0 to compare the results of query.
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying ----- re-creating the Datastore node in the PR");
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+        redundancy, PortfolioData.class, true));
+
+    try {
+      // This is a repeat of the original query from before closing and restarting the datastores. This time
+      // it should fail due to persistent recovery that has not completed.
+      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName, true));
+      fail("Expected PartitionOfflineException when queryiong a region with offline colocated child");
+    } catch (Exception e) {
+      if (!(e.getCause() instanceof PartitionOfflineException)) {
+        throw e;
+      }
+    }
+    LogWriterUtils.getLogWriter()
+        .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 2nd pass (after restarting regions) ENDED");
+  }
+ 
   @Test
   public void testPRCountStarQuery() throws Exception
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
index cfb4190..9dc90fd 100755
--- a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
@@ -39,6 +39,7 @@ import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStore;
 import org.apache.geode.cache.EntryExistsException;
 import org.apache.geode.cache.EntryNotFoundException;
 import org.apache.geode.cache.PartitionAttributes;
@@ -249,6 +250,190 @@ public class PRQueryDUnitHelper implements Serializable {
     return (CacheSerializableRunnable)createPrRegion;
   }
 
+  /**
+   * This function creates a colocated pair of PR's given the scope & the
+   * redundancy parameters for the parent *
+   *
+   * @param regionName
+   * @param redundancy
+   * @param constraint
+   * @param makePersistent
+   * @return cacheSerializable object
+   */
+  public CacheSerializableRunnable getCacheSerializableRunnableForColocatedPRCreate(
+    final String regionName, final int redundancy, final Class constraint, boolean makePersistent) {
+
+    final String childRegionName = regionName + "Child";
+    final String diskName = "disk";
+    SerializableRunnable createPrRegion;
+    createPrRegion = new CacheSerializableRunnable(regionName) {
+      @Override
+      public void run2() throws CacheException
+      {
+
+        Cache cache = getCache();
+        Region partitionedregion = null;
+        Region childRegion = null;
+        AttributesFactory attr = new AttributesFactory();
+        attr.setValueConstraint(constraint);
+        if (makePersistent) {
+          DiskStore ds = cache.findDiskStore(diskName);
+          if (ds == null) {
+            ds = cache.createDiskStoreFactory().setDiskDirs(JUnit4CacheTestCase.getDiskDirs())
+                .create(diskName);
+          }
+          attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          attr.setDiskStoreName(diskName);
+        } else {
+          attr.setDataPolicy(DataPolicy.PARTITION);
+          attr.setDiskStoreName(null);
+        }
+
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setRedundantCopies(redundancy);
+        attr.setPartitionAttributes(paf.create());
+
+        // parent region
+        partitionedregion = cache.createRegion(regionName, attr.create());
+        assertNotNull(
+            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
+                + regionName + " not in cache", cache.getRegion(regionName));
+        assertNotNull(
+            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null",
+            partitionedregion);
+        assertTrue(
+            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed",
+            !partitionedregion.isDestroyed());
+
+        // child region
+        attr.setValueConstraint(constraint);
+        paf.setColocatedWith(regionName);
+        attr.setPartitionAttributes(paf.create());
+        childRegion = cache.createRegion(childRegionName, attr.create());
+      }
+    };
+
+    return (CacheSerializableRunnable)createPrRegion;
+  }
+
+  /**
+   * This function creates the parent region of colocated pair of PR's given the scope & the
+   * redundancy parameters for the parent *
+   *
+   * @param regionName
+   * @param redundancy
+   * @param constraint
+   * @param makePersistent
+   * @return cacheSerializable object
+   */
+  public CacheSerializableRunnable getCacheSerializableRunnableForColocatedParentCreate(
+    final String regionName, final int redundancy, final Class constraint, boolean makePersistent) {
+
+    final String childRegionName = regionName + "Child";
+    final String diskName = "disk";
+    SerializableRunnable createPrRegion;
+    createPrRegion = new CacheSerializableRunnable(regionName + "-NoChildRegion") {
+      @Override
+      public void run2() throws CacheException
+      {
+
+        Cache cache = getCache();
+        Region partitionedregion = null;
+        Region childRegion = null;
+        AttributesFactory attr = new AttributesFactory();
+        attr.setValueConstraint(constraint);
+        if (makePersistent) {
+          DiskStore ds = cache.findDiskStore(diskName);
+          if (ds == null) {
+            ds = cache.createDiskStoreFactory().setDiskDirs(JUnit4CacheTestCase.getDiskDirs())
+                .create(diskName);
+          }
+          attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          attr.setDiskStoreName(diskName);
+        } else {
+          attr.setDataPolicy(DataPolicy.PARTITION);
+          attr.setDiskStoreName(null);
+        }
+
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setRedundantCopies(redundancy);
+        attr.setPartitionAttributes(paf.create());
+
+        // parent region
+        partitionedregion = cache.createRegion(regionName, attr.create());
+        assertNotNull(
+            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
+                + regionName + " not in cache", cache.getRegion(regionName));
+        assertNotNull(
+            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null",
+            partitionedregion);
+        assertTrue(
+            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed",
+            !partitionedregion.isDestroyed());
+      }
+    };
+
+    return (CacheSerializableRunnable)createPrRegion;
+  }
+
+  /**
+   * This function creates the parent region of colocated pair of PR's given the scope & the
+   * redundancy parameters for the parent *
+   *
+   * @param regionName
+   * @param redundancy
+   * @param constraint
+   * @param isPersistent
+   * @return cacheSerializable object
+   */
+  public CacheSerializableRunnable getCacheSerializableRunnableForColocatedChildCreate(
+    final String regionName, final int redundancy, final Class constraint, boolean isPersistent) {
+
+    final String childRegionName = regionName + "Child";
+    final String diskName = "disk";
+    SerializableRunnable createPrRegion;
+    createPrRegion = new CacheSerializableRunnable(regionName + "-ChildRegion") {
+      @Override
+      public void run2() throws CacheException
+      {
+
+        Cache cache = getCache();
+        Region partitionedregion = null;
+        Region childRegion = null;
+        AttributesFactory attr = new AttributesFactory();
+        attr.setValueConstraint(constraint);
+        if (isPersistent) {
+          DiskStore ds = cache.findDiskStore(diskName);
+          if (ds == null) {
+//            ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs())
+            ds = cache.createDiskStoreFactory().setDiskDirs(org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase.getDiskDirs())
+                .create(diskName);
+          }
+          attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          attr.setDiskStoreName(diskName);
+        } else {
+          attr.setDataPolicy(DataPolicy.PARTITION);
+          attr.setDiskStoreName(null);
+        }
+
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setRedundantCopies(redundancy);
+        attr.setPartitionAttributes(paf.create());
+
+        // skip parent region creation
+        // partitionedregion = cache.createRegion(regionName, attr.create());
+
+        // child region
+        attr.setValueConstraint(constraint);
+        paf.setColocatedWith(regionName);
+        attr.setPartitionAttributes(paf.create());
+        childRegion = cache.createRegion(childRegionName, attr.create());
+      }
+    };
+
+    return (CacheSerializableRunnable)createPrRegion;
+  }
+
   public CacheSerializableRunnable getCacheSerializableRunnableForPRCreateLimitedBuckets(
       final String regionName, final int redundancy, final int buckets) {
         

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
index 0a25228..c15d545 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
@@ -50,7 +50,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.jayway.awaitility.core.ConditionTimeoutException;
-import org.junit.experimental.categories.Category;
 
 import org.apache.geode.admin.internal.AdminDistributedSystemImpl;
 import org.apache.geode.cache.AttributesFactory;
@@ -64,6 +63,7 @@ import org.apache.geode.cache.Region;
 import org.apache.geode.cache.control.RebalanceOperation;
 import org.apache.geode.cache.control.RebalanceResults;
 import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.DistributionMessageObserver;
@@ -72,11 +72,14 @@ import org.apache.geode.internal.FileUtil;
 import org.apache.geode.internal.cache.ColocationLogger;
 import org.apache.geode.internal.cache.InitialImageOperation.RequestImageMessage;
 import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.PartitionedRegionHelper;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
 import org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver;
 import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.test.dunit.LogWriterUtils;
+import org.apache.geode.test.dunit.RMIException;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.SerializableRunnable;
@@ -2088,7 +2091,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     };
     
     //runnable to create PRs
-    SerializableRunnable createPRs = new SerializableRunnable("region1") {
+    SerializableRunnable createPRs = new SerializableRunnable("createPRs") {
       public void run() {
         Cache cache = getCache();
         
@@ -2112,7 +2115,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     };
     
     //runnable to close the cache.
-    SerializableRunnable closeCache = new SerializableRunnable("region1") {
+    SerializableRunnable closeCache = new SerializableRunnable("closeCache") {
       public void run() {
         closeCache();
       }
@@ -2120,7 +2123,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     
     //Runnable to do a bunch of puts handle exceptions
     //due to the fact that member is offline.
-    SerializableRunnable doABunchOfPuts = new SerializableRunnable("region1") {
+    SerializableRunnable doABunchOfPuts = new SerializableRunnable("doABunchOfPuts") {
       public void run() {
         Cache cache = getCache();
         Region region = cache.getRegion(PR_REGION_NAME);
@@ -2200,7 +2203,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
   @Category(FlakyTest.class) // GEODE-506: time sensitive, async actions with 30 sec max
   @Test
   public void testRebalanceWithOfflineChildRegion() throws Throwable {
-    SerializableRunnable createParentPR = new SerializableRunnable() {
+    SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
       public void run() {
         Cache cache = getCache();
         
@@ -2220,7 +2223,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
       }
     };
     
-    SerializableRunnable createChildPR = new SerializableRunnable() {
+    SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
       public void run() {
         Cache cache = getCache();
         
@@ -2325,7 +2328,6 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     };
     
     vm1.invoke(addHook);
-//    vm1.invoke(addHook);
     AsyncInvocation async0;
     AsyncInvocation async1;
     AsyncInvocation async2;
@@ -2335,7 +2337,6 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
       async1 = vm1.invokeAsync(createPRs);
 
       vm1.invoke(waitForHook);
-//      vm1.invoke(waitForHook);
       
       //Now create the parent region on vm-2. vm-2 did not
       //previous host the child region.
@@ -2347,7 +2348,6 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     
     } finally {
       vm1.invoke(removeHook);
-//      vm1.invoke(removeHook);
     }
     
     async0.getResult(MAX_WAIT);
@@ -2473,6 +2473,188 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     closeCache();
   }
 
+  @Test
+  public void testParentRegionGetWithOfflineChildRegion() throws Throwable {
+
+    SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
+      public void run() {
+        String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+        try {
+          Cache cache = getCache();
+          DiskStore ds = cache.findDiskStore("disk");
+          if (ds == null) {
+            ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+          }
+          AttributesFactory af = new AttributesFactory();
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(0);
+          paf.setRecoveryDelay(0);
+          af.setPartitionAttributes(paf.create());
+          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          af.setDiskStoreName("disk");
+          cache.createRegion(PR_REGION_NAME, af.create());
+        } finally {
+          System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+        }
+      }
+    };
+
+    SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
+      public void run() throws InterruptedException {
+        String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+        try {
+          Cache cache = getCache();
+          AttributesFactory af = new AttributesFactory();
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(0);
+          paf.setRecoveryDelay(0);
+          paf.setColocatedWith(PR_REGION_NAME);
+          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          af.setDiskStoreName("disk");
+          af.setPartitionAttributes(paf.create());
+          // delay child region creations to cause a delay in persistent recovery
+          Thread.sleep(100);
+          cache.createRegion("region2", af.create());
+        } finally {
+          System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+        }
+      }
+    };
+
+    boolean caughtException = false;
+    try {
+      // Expect a get() on the un-recovered (due to offline child) parent region to fail
+      regionGetWithOfflineChild(createParentPR, createChildPR, false);
+    } catch (Exception e) {
+      caughtException = true;
+      assertTrue(e instanceof RMIException);
+      assertTrue(e.getCause() instanceof PartitionOfflineException);
+    }
+    if (!caughtException) {
+      fail("Expected TimeoutException from remote");
+    }
+  }
+
+  @Test
+  public void testParentRegionGetWithRecoveryInProgress() throws Throwable {
+    SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
+      public void run() {
+        String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+        try {
+          Cache cache = getCache();
+          DiskStore ds = cache.findDiskStore("disk");
+          if (ds == null) {
+            ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+          }
+          AttributesFactory af = new AttributesFactory();
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(0);
+          paf.setRecoveryDelay(0);
+          af.setPartitionAttributes(paf.create());
+          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          af.setDiskStoreName("disk");
+          cache.createRegion(PR_REGION_NAME, af.create());
+        } finally {
+          System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+        System.out.println("oldRetryTimeout = " + oldRetryTimeout);        }
+      }
+    };
+
+    SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
+      public void run() throws InterruptedException {
+        String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+        try {
+          Cache cache = getCache();
+          AttributesFactory af = new AttributesFactory();
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(0);
+          paf.setRecoveryDelay(0);
+          paf.setColocatedWith(PR_REGION_NAME);
+          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          af.setDiskStoreName("disk");
+          af.setPartitionAttributes(paf.create());
+          cache.createRegion("region2", af.create());
+        } finally {
+          System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+        }
+      }
+    };
+
+    boolean caughtException = false;
+    try {
+      // Expect a get() on the un-recovered (due to offline child) parent region to fail
+      regionGetWithOfflineChild(createParentPR, createChildPR, false);
+    } catch (Exception e) {
+      caughtException = true;
+      assertTrue(e instanceof RMIException);
+      assertTrue(e.getCause() instanceof PartitionOfflineException);
+    }
+    if (!caughtException) {
+      fail("Expected TimeoutException from remote");
+    }
+  }
+
+  @Test
+  public void testParentRegionPutWithRecoveryInProgress() throws Throwable {
+    SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
+      public void run() {
+        String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+        System.out.println("oldRetryTimeout = " + oldRetryTimeout);
+        try {
+          Cache cache = getCache();
+          DiskStore ds = cache.findDiskStore("disk");
+          if (ds == null) {
+            ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+          }
+          AttributesFactory af = new AttributesFactory();
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(0);
+          paf.setRecoveryDelay(0);
+          af.setPartitionAttributes(paf.create());
+          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          af.setDiskStoreName("disk");
+          cache.createRegion(PR_REGION_NAME, af.create());
+        } finally {
+          System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+        }
+      }
+    };
+
+    SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
+      public void run() throws InterruptedException {
+        String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+        try {
+          Cache cache = getCache();
+          AttributesFactory af = new AttributesFactory();
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(0);
+          paf.setRecoveryDelay(0);
+          paf.setColocatedWith(PR_REGION_NAME);
+          af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+          af.setDiskStoreName("disk");
+          af.setPartitionAttributes(paf.create());
+          Thread.sleep(1000);
+          cache.createRegion("region2", af.create());
+        } finally {
+          System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+        }
+      }
+    };
+
+    boolean caughtException = false;
+    try {
+      // Expect a get() on the un-recovered (due to offline child) parent region to fail
+      regionGetWithOfflineChild(createParentPR, createChildPR, false);
+    } catch (Exception e) {
+      caughtException = true;
+      assertTrue(e instanceof RMIException);
+      assertTrue(e.getCause() instanceof PartitionOfflineException);
+    }
+    if (!caughtException) {
+      fail("Expected TimeoutException from remote");
+    }
+  }
+
   /**
    * Create three PRs on a VM, named region1, region2, and region3.
    * The colocated with attribute describes which region region3 
@@ -2523,15 +2705,15 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     vm1.invoke(createParentPR);
     vm0.invoke(createChildPR);
     vm1.invoke(createChildPR);
-    
+
     //Create some buckets.
     createData(vm0, 0, NUM_BUCKETS, "a");
     createData(vm0, 0, NUM_BUCKETS, "a", "region2");
-    
+
     //Close the members
     closeCache(vm1);
     closeCache(vm0);
-    
+
     //Recreate the parent region. Try to make sure that
     //the member with the latest copy of the buckets
     //is the one that decides to throw away it's copy
@@ -2540,18 +2722,17 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
     async0.getResult(MAX_WAIT);
     async1.getResult(MAX_WAIT);
-    
 
     //Now create the parent region on vm-2. vm-2 did not
     //previous host the child region.
     vm2.invoke(createParentPR);
-    
+
     //Rebalance the parent region.
     //This should not move any buckets, because
     //we haven't recovered the child region
     RebalanceResults rebalanceResults = rebalance(vm2);
     assertEquals(0, rebalanceResults.getTotalBucketTransfersCompleted());
-    
+
     //Recreate the child region. 
     async1 = vm1.invokeAsync(createChildPR);
     async0 = vm0.invokeAsync(createChildPR);
@@ -2568,6 +2749,206 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
     createData(vm0, 0, NUM_BUCKETS, "c", "region2");
   }
 
+  /**
+   * Create a colocated pair of persistent regions and populate them with data. Shut down the servers and then
+   * restart them and check the data.
+   * <p>
+   * On the restart, try region operations ({@code get()}) on the parent region before or during persistent recovery.
+   * The {@code concurrentCheckData} argument determines whether the operation from the parent region occurs before
+   * or concurrent with the child region creation and recovery.
+   *
+   * @param createParentPR {@link SerializableRunnable} for creating the parent region on one member
+   * @param createChildPR {@link SerializableRunnable} for creating the child region on one member
+   * @param concurrentCheckData
+   * @throws Throwable
+   */
+  public void regionGetWithOfflineChild(
+      SerializableRunnable createParentPR,
+      SerializableRunnable createChildPR,
+      boolean concurrentCheckData) throws Throwable {
+    Host host = Host.getHost(0);
+    final VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+
+    //Create the PRs on two members
+    vm0.invoke(createParentPR);
+    vm1.invoke(createParentPR);
+    vm0.invoke(createChildPR);
+    vm1.invoke(createChildPR);
+
+    //Create some buckets.
+    createData(vm0, 0, NUM_BUCKETS, "a");
+    createData(vm0, 0, NUM_BUCKETS, "a", "region2");
+
+    //Close the members
+    closeCache(vm1);
+    closeCache(vm0);
+
+    SerializableRunnable checkDataOnParent = (new SerializableRunnable("checkDataOnParent") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(PR_REGION_NAME);
+
+        for (int i = 0; i < NUM_BUCKETS; i++) {
+          assertEquals("For key " + i, "a", region.get(i));
+        }
+      }
+    });
+
+    try {
+      //Recreate the parent region. Try to make sure that
+      //the member with the latest copy of the buckets
+      //is the one that decides to throw away it's copy
+      //by starting it last.
+      AsyncInvocation async0 = vm0.invokeAsync(createParentPR);
+      AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
+      async0.getResult(MAX_WAIT);
+      async1.getResult(MAX_WAIT);
+      //Now create the parent region on vm-2. vm-2 did not
+      //previously host the child region.
+      vm2.invoke(createParentPR);
+
+      AsyncInvocation async2 = null;
+      AsyncInvocation asyncCheck = null;
+      if (concurrentCheckData) {
+        //Recreate the child region.
+        async1 = vm1.invokeAsync(createChildPR);
+        async0 = vm0.invokeAsync(createChildPR);
+        async2 = vm2.invokeAsync(new SerializableRunnable("delay") {
+          @Override
+          public void run() throws InterruptedException {
+            Thread.sleep(100);
+            vm2.invoke(createChildPR);
+          }
+        });
+
+        asyncCheck = vm0.invokeAsync(checkDataOnParent);
+      } else {
+        vm0.invoke(checkDataOnParent);
+      }
+      async0.getResult(MAX_WAIT);
+      async1.getResult(MAX_WAIT);
+      async2.getResult(MAX_WAIT);
+      asyncCheck.getResult(MAX_WAIT);
+      //Validate the data
+      checkData(vm0, 0, NUM_BUCKETS, "a");
+      checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
+      //Make sure we can actually use the buckets in the child region.
+      createData(vm0, 0, NUM_BUCKETS, "c", "region2");
+    } finally {
+      //Close the members
+      closeCache(vm1);
+      closeCache(vm0);
+      closeCache(vm2);
+    }
+  }
+  /**
+   * Create a colocated pair of persistent regions and populate them with data. Shut down the servers and then
+   * restart them.
+   * <p>
+   * On the restart, try region operations ({@code put()}) on the parent region before or during persistent recovery.
+   * The {@code concurrentCreatekData} argument determines whether the operation from the parent region occurs before
+   * or concurrent with the child region creation and recovery.
+   *
+   * @param createParentPR {@link SerializableRunnable} for creating the parent region on one member
+   * @param createChildPR {@link SerializableRunnable} for creating the child region on one member
+   * @param concurrentCreateData
+   * @throws Throwable
+   */
+  public void regionPutWithOfflineChild(
+      SerializableRunnable createParentPR,
+      SerializableRunnable createChildPR,
+      boolean concurrentCreateData) throws Throwable {
+    Host host = Host.getHost(0);
+    final VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+
+    SerializableRunnable checkDataOnParent = (new SerializableRunnable("checkDataOnParent") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(PR_REGION_NAME);
+
+        for (int i = 0; i < NUM_BUCKETS; i++) {
+          assertEquals("For key " + i, "a", region.get(i));
+        }
+      }
+    });
+
+    SerializableRunnable createDataOnParent = new SerializableRunnable("createDataOnParent") {
+
+      public void run() {
+        Cache cache = getCache();
+        LogWriterUtils.getLogWriter().info("creating data in " + PR_REGION_NAME);
+        Region region = cache.getRegion(PR_REGION_NAME);
+
+        for (int i = 0; i < NUM_BUCKETS; i++) {
+          region.put(i, "c");
+          assertEquals("For key " + i, "c", region.get(i));
+        }
+      }
+    };
+
+    //Create the PRs on two members
+    vm0.invoke(createParentPR);
+    vm1.invoke(createParentPR);
+    vm0.invoke(createChildPR);
+    vm1.invoke(createChildPR);
+
+    //Create some buckets.
+    createData(vm0, 0, NUM_BUCKETS, "a");
+    createData(vm0, 0, NUM_BUCKETS, "a", "region2");
+
+    //Close the members
+    closeCache(vm1);
+    closeCache(vm0);
+
+    try {
+      //Recreate the parent region. Try to make sure that
+      //the member with the latest copy of the buckets
+      //is the one that decides to throw away it's copy
+      //by starting it last.
+      AsyncInvocation async0 = vm0.invokeAsync(createParentPR);
+      AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
+      async0.getResult(MAX_WAIT);
+      async1.getResult(MAX_WAIT);
+      //Now create the parent region on vm-2. vm-2 did not
+      //previous host the child region.
+      vm2.invoke(createParentPR);
+
+      AsyncInvocation async2 = null;
+      AsyncInvocation asyncPut = null;
+      if (concurrentCreateData) {
+        //Recreate the child region.
+        async1 = vm1.invokeAsync(createChildPR);
+        async0 = vm0.invokeAsync(createChildPR);
+        async2 = vm2.invokeAsync(createChildPR);
+
+        Thread.sleep(100);
+        asyncPut = vm0.invokeAsync(createDataOnParent);
+      } else {
+        vm0.invoke(createDataOnParent);
+      }
+      async0.getResult(MAX_WAIT);
+      async1.getResult(MAX_WAIT);
+      async2.getResult(MAX_WAIT);
+      asyncPut.getResult(MAX_WAIT);
+      //Validate the data
+      checkData(vm0, 0, NUM_BUCKETS, "c");
+      checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
+      //Make sure we can actually use the buckets in the child region.
+      createData(vm0, 0, NUM_BUCKETS, "c", "region2");
+    } finally {
+      //Close the members
+      closeCache(vm1);
+      closeCache(vm0);
+      closeCache(vm2);
+    }
+  }
+
   private RebalanceResults rebalance(VM vm) {
     return (RebalanceResults) vm.invoke(new SerializableCallable() {
       


[45/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/QueryDataDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/QueryDataDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/QueryDataDUnitTest.java
index f4b135e..7f93c0d 100644
--- a/geode-core/src/test/java/org/apache/geode/management/QueryDataDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/QueryDataDUnitTest.java
@@ -16,866 +16,655 @@
  */
 package org.apache.geode.management;
 
-import static org.apache.geode.cache.query.Utils.createPortfoliosAndPositions;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
+import static com.jayway.jsonpath.matchers.JsonPathMatchers.*;
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.geode.cache.FixedPartitionAttributes.*;
+import static org.apache.geode.cache.query.Utils.*;
+import static org.apache.geode.management.internal.ManagementConstants.*;
+import static org.apache.geode.management.internal.ManagementStrings.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Calendar;
 import java.util.Date;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+
+import javax.management.ObjectName;
+
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryOperation;
 import org.apache.geode.cache.FixedPartitionAttributes;
 import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionResolver;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionFactory;
 import org.apache.geode.cache.RegionShortcut;
-import org.apache.geode.cache.query.data.Portfolio;
-import org.apache.geode.cache.query.dunit.QueryUsingFunctionContextDUnitTest;
-import org.apache.geode.cache30.CacheSerializableRunnable;
+import org.apache.geode.cache.query.data.Portfolio; // TODO
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.internal.cache.BucketRegion;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.PartitionedRegionHelper;
-import org.apache.geode.internal.cache.partitioned.fixed.SingleHopQuarterPartitionResolver;
-import org.apache.geode.management.internal.ManagementConstants;
-import org.apache.geode.management.internal.ManagementStrings;
+import org.apache.geode.internal.cache.partitioned.fixed.SingleHopQuarterPartitionResolver; // TODO
 import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.management.internal.beans.BeanUtilFuncs;
 import org.apache.geode.management.internal.cli.json.TypedJson;
 import org.apache.geode.pdx.PdxInstance;
 import org.apache.geode.pdx.PdxInstanceFactory;
 import org.apache.geode.pdx.internal.PdxInstanceFactoryImpl;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.SerializableRunnable;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.DistributedUseJacksonForJsonPathRule;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.FlakyTest;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 
 /**
- * 
- * 
+ * Distributed tests for {@link DistributedSystemMXBean#queryData(String, String, int)}.
+ * <p>
+ * <pre>
+ * Test Basic Json Strings for Partitioned Regions
+ * Test Basic Json Strings for Replicated Regions
+ * Test for all Region Types
+ * Test for primitive types
+ * Test for Nested Objects
+ * Test for Enums
+ * Test for collections
+ * Test for huge collection
+ * Test PDX types
+ * Test different projects type e.g. SelectResult, normal bean etc..
+ * Test Colocated Regions
+ * Test for Limit ( both row count and Depth)
+ * ORDER by orders
+ * Test all attributes are covered in an complex type
+ * </pre>
  */
-
-// 1) Test Basic Json Strings for Partitioned Regions
-// Test Basic Json Strings for Replicated Regions
-// Test for all Region Types
-// Test for primitive types
-// Test for Nested Objects
-// Test for Enums
-// Test for collections
-// Test for huge collection
-// Test PDX types
-// Test different projects type e.g. SelectResult, normal bean etc..
-// Test Colocated Regions
-// Test for Limit ( both row count and Depth)
-// ORDER by orders
-// Test all attributes are covered in an complex type
-
 @Category(DistributedTest.class)
-public class QueryDataDUnitTest extends ManagementTestBase {
-
-  private static final long serialVersionUID = 1L;
-
-  private static final int MAX_WAIT = 100 * 1000;
-
-  private static final int cntDest = 30;
-
-  private static final int cnt = 0;
-
-  // PR 5 is co-located with 4
-  static String PartitionedRegionName1 = "TestPartitionedRegion1"; // default
-                                                                   // name
-  static String PartitionedRegionName2 = "TestPartitionedRegion2"; // default
-                                                                   // name
-  static String PartitionedRegionName3 = "TestPartitionedRegion3"; // default
-                                                                   // name
-  static String PartitionedRegionName4 = "TestPartitionedRegion4"; // default
-                                                                   // name
-  static String PartitionedRegionName5 = "TestPartitionedRegion5"; // default
-                                                                   // name
-
-  
-  static String repRegionName = "TestRepRegion"; // default name
-  static String repRegionName2 = "TestRepRegion2"; // default name
-  static String repRegionName3 = "TestRepRegion3"; // default name
-  static String repRegionName4 = "TestRepRegion4"; // default name
-  static String localRegionName = "TestLocalRegion"; // default name
-
-  public static String[] queries = new String[] {
-      "select * from /" + PartitionedRegionName1 + " where ID>=0",
-      "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName2 + " r2 where r1.ID = r2.ID",
-      "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName2
-          + " r2 where r1.ID = r2.ID AND r1.status = r2.status",
-      "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName2 + " r2, /"
-          + PartitionedRegionName3 + " r3 where r1.ID = r2.ID and r2.ID = r3.ID",
-      "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName2 + " r2, /"
-          + PartitionedRegionName3 + " r3  , /" + repRegionName
-          + " r4 where r1.ID = r2.ID and r2.ID = r3.ID and r3.ID = r4.ID",
-      "Select * from /" + PartitionedRegionName4 + " r4 , /" + PartitionedRegionName5 + " r5 where r4.ID = r5.ID" };
-
-  public static String[] nonColocatedQueries = new String[] {
-      "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName4 + " r4 where r1.ID = r4.ID",
-      "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName4 + " r4 , /"
-          + PartitionedRegionName5 + " r5 where r1.ID = r42.ID and r4.ID = r5.ID" };
-
-  public static String[] queriesForRR = new String[] { "<trace> select * from /" + repRegionName + " where ID>=0",
-      "Select * from /" + repRegionName + " r1, /" + repRegionName2 + " r2 where r1.ID = r2.ID",
-      "select * from /" + repRegionName3 + " where ID>=0" };
-  
-  public static String[] queriesForLimit = new String[] { "select * from /" + repRegionName4 };
-
-
-  public QueryDataDUnitTest() {
-    super();
-  }
-
-  @Override
-  protected final void postSetUpManagementTestBase() throws Exception {
-    initManagement(false);
-  }
-
-  private void initCommonRegions(){
-    createRegionsInNodes();
-    fillValuesInRegions();
-  }
+@SuppressWarnings({ "serial", "unused" })
+public class QueryDataDUnitTest implements Serializable {
 
-  /**
-   * This function puts portfolio objects into the created Region (PR or Local)
-   * *
-   */
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRPuts(final String regionName,
-      final Object[] portfolio, final int from, final int to) {
-    SerializableRunnable puts = new CacheSerializableRunnable("Region Puts") {
-      @Override
-      public void run2() throws CacheException {
-        Cache cache = CacheFactory.getAnyInstance();
-        Region region = cache.getRegion(regionName);
-        for (int j = from; j < to; j++)
-          region.put(new Integer(j), portfolio[j]);
-        LogWriterUtils.getLogWriter()
-            .info(
-                "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
-                    + regionName);
-      }
-    };
-    return (CacheSerializableRunnable) puts;
-  }
+  private static final int NUM_OF_BUCKETS = 20;
 
-  /**
-   * This function puts PDX objects into the created Region (REPLICATED) *
-   */
-  public CacheSerializableRunnable getCacheSerializableRunnableForPDXPuts(final String regionName) {
-    SerializableRunnable puts = new CacheSerializableRunnable("Region Puts") {
-      @Override
-      public void run2() throws CacheException {
-        putPdxInstances(regionName);
+  // PARTITIONED_REGION_NAME5 is co-located with PARTITIONED_REGION_NAME4
+  private static final String PARTITIONED_REGION_NAME1 = "PARTITIONED_REGION_NAME1";
+  private static final String PARTITIONED_REGION_NAME2 = "PARTITIONED_REGION_NAME2";
+  private static final String PARTITIONED_REGION_NAME3 = "PARTITIONED_REGION_NAME3";
+  private static final String PARTITIONED_REGION_NAME4 = "PARTITIONED_REGION_NAME4";
+  private static final String PARTITIONED_REGION_NAME5 = "PARTITIONED_REGION_NAME5";
 
-      }
-    };
-    return (CacheSerializableRunnable) puts;
-  }
-  
-  /**
-   * This function puts big collections to created Region (REPLICATED) *
-   */
-  public CacheSerializableRunnable getCacheSerializableRunnableForBigCollPuts(final String regionName) {
-    SerializableRunnable bigPuts = new CacheSerializableRunnable("Big Coll Puts") {
-      @Override
-      public void run2() throws CacheException {
-        putBigInstances(regionName);
+  private static final String REPLICATE_REGION_NAME1 = "REPLICATE_REGION_NAME1";
+  private static final String REPLICATE_REGION_NAME2 = "REPLICATE_REGION_NAME2";
+  private static final String REPLICATE_REGION_NAME3 = "REPLICATE_REGION_NAME3";
+  private static final String REPLICATE_REGION_NAME4 = "REPLICATE_REGION_NAME4";
 
-      }
-    };
-    return (CacheSerializableRunnable) bigPuts;
-  }
+  private static final String LOCAL_REGION_NAME = "LOCAL_REGION_NAME";
 
-  public void fillValuesInRegions() {
-    // Create common Portflios and NewPortfolios
-    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+  private static final String BIG_COLLECTION_ELEMENT_ = "BIG_COLLECTION_ELEMENT_";
+  private static final String BIG_COLLECTION_ = "BIG_COLLECTION_";
 
-    // Fill local region
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(localRegionName, portfolio, cnt, cntDest));
+  private static final String[] QUERIES = new String[] {
+    "SELECT * FROM /" + PARTITIONED_REGION_NAME1 + " WHERE ID >= 0",
+    "SELECT * FROM /" + PARTITIONED_REGION_NAME1 + " r1, /" + PARTITIONED_REGION_NAME2 + " r2 WHERE r1.ID = r2.ID",
+    "SELECT * FROM /" + PARTITIONED_REGION_NAME1 + " r1, /" + PARTITIONED_REGION_NAME2 + " r2 WHERE r1.ID = r2.ID AND r1.status = r2.status",
+    "SELECT * FROM /" + PARTITIONED_REGION_NAME1 + " r1, /" + PARTITIONED_REGION_NAME2 + " r2, /" + PARTITIONED_REGION_NAME3 + " r3 WHERE r1.ID = r2.ID AND r2.ID = r3.ID",
+    "SELECT * FROM /" + PARTITIONED_REGION_NAME1 + " r1, /" + PARTITIONED_REGION_NAME2 + " r2, /" + PARTITIONED_REGION_NAME3 + " r3, /" + REPLICATE_REGION_NAME1 + " r4 WHERE r1.ID = r2.ID AND r2.ID = r3.ID AND r3.ID = r4.ID",
+    "SELECT * FROM /" + PARTITIONED_REGION_NAME4 + " r4, /" + PARTITIONED_REGION_NAME5 + " r5 WHERE r4.ID = r5.ID"
+  };
 
-    // Fill replicated region
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(repRegionName, portfolio, cnt, cntDest));
-    managedNode2.invoke(getCacheSerializableRunnableForPRPuts(repRegionName2, portfolio, cnt, cntDest));
+  private static final String[] QUERIES_FOR_REPLICATED = new String[] {
+    "<TRACE> SELECT * FROM /" + REPLICATE_REGION_NAME1 + " WHERE ID >= 0",
+    "SELECT * FROM /" + REPLICATE_REGION_NAME1 + " r1, /" + REPLICATE_REGION_NAME2 + " r2 WHERE r1.ID = r2.ID",
+    "SELECT * FROM /" + REPLICATE_REGION_NAME3 + " WHERE ID >= 0"
+  };
 
-    // Fill Partition Region
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(PartitionedRegionName1, portfolio, cnt, cntDest));
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(PartitionedRegionName2, portfolio, cnt, cntDest));
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(PartitionedRegionName3, portfolio, cnt, cntDest));
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(PartitionedRegionName4, portfolio, cnt, cntDest));
-    managedNode1.invoke(getCacheSerializableRunnableForPRPuts(PartitionedRegionName5, portfolio, cnt, cntDest));
+  private static final String[] QUERIES_FOR_LIMIT = new String[] {
+    "SELECT * FROM /" + REPLICATE_REGION_NAME4
+  };
 
-    managedNode1.invoke(getCacheSerializableRunnableForPDXPuts(repRegionName3));
+  private DistributedMember member1;
+  private DistributedMember member2;
+  private DistributedMember member3;
 
-  }
+  @Manager
+  private VM managerVM;
 
-  public void putPdxInstances(String regionName) throws CacheException {
-    PdxInstanceFactory pf = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
-    Region r = getCache().getRegion(regionName);
-    pf.writeInt("ID", 111);
-    pf.writeString("status", "active");
-    pf.writeString("secId", "IBM");
-    PdxInstance pi = pf.create();
-    r.put("IBM", pi);
-
-    pf = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
-    pf.writeInt("ID", 222);
-    pf.writeString("status", "inactive");
-    pf.writeString("secId", "YHOO");
-    pi = pf.create();
-    r.put("YHOO", pi);
-
-    pf = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
-    pf.writeInt("ID", 333);
-    pf.writeString("status", "active");
-    pf.writeString("secId", "GOOGL");
-    pi = pf.create();
-    r.put("GOOGL", pi);
-
-    pf = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
-    pf.writeInt("ID", 111);
-    pf.writeString("status", "inactive");
-    pf.writeString("secId", "VMW");
-    pi = pf.create();
-    r.put("VMW", pi);
-  }
-  
-  public void putBigInstances(String regionName) throws CacheException {
-    Region r = getCache().getRegion(regionName);
-
-    for(int i = 0 ; i < 1200 ; i++){
-      List<String> bigColl1 = new ArrayList<String>();
-      for(int j = 0; j< 200 ; j++){
-        bigColl1.add("BigColl_1_ElemenNo_"+j);
-      }
-      r.put("BigColl_1_"+i, bigColl1);
-    }
-    
-  }
+  @Member
+  private VM[] memberVMs;
 
-  private void createRegionsInNodes() {
+  @Rule
+  public DistributedUseJacksonForJsonPathRule useJacksonForJsonPathRule = new DistributedUseJacksonForJsonPathRule();
 
-    // Create local Region on servers
-    managedNode1.invoke(() -> QueryUsingFunctionContextDUnitTest.createLocalRegion());
+  @Rule
+  public ManagementTestRule managementTestRule = ManagementTestRule.builder().managersFirst(false).start(true).build();
 
-    // Create ReplicatedRegion on servers
-    managedNode1.invoke(() -> QueryUsingFunctionContextDUnitTest.createReplicatedRegion());
-    managedNode2.invoke(() -> QueryUsingFunctionContextDUnitTest.createReplicatedRegion());
-    managedNode3.invoke(() -> QueryUsingFunctionContextDUnitTest.createReplicatedRegion());
-    try {
-      this.createDistributedRegion(managedNode2, repRegionName2);
-      this.createDistributedRegion(managedNode1, repRegionName3);
-      this.createDistributedRegion(managedNode1, repRegionName4);
-    } catch (Exception e1) {
-      fail("Test Failed while creating region " + e1.getMessage());
-    }
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
 
-    // Create two colocated PartitionedRegions On Servers.
-    managedNode1.invoke(() -> QueryUsingFunctionContextDUnitTest.createColocatedPR());
-    managedNode2.invoke(() -> QueryUsingFunctionContextDUnitTest.createColocatedPR());
-    managedNode3.invoke(() -> QueryUsingFunctionContextDUnitTest.createColocatedPR());
-
-    this.managingNode.invoke(new SerializableRunnable("Wait for all Region Proxies to get replicated") {
-
-      public void run() {
-        Cache cache = getCache();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-
-        try {
-          MBeanUtil.getDistributedRegionMbean("/" + PartitionedRegionName1, 3);
-          MBeanUtil.getDistributedRegionMbean("/" + PartitionedRegionName2, 3);
-          MBeanUtil.getDistributedRegionMbean("/" + PartitionedRegionName3, 3);
-          MBeanUtil.getDistributedRegionMbean("/" + PartitionedRegionName4, 3);
-          MBeanUtil.getDistributedRegionMbean("/" + PartitionedRegionName5, 3);
-          MBeanUtil.getDistributedRegionMbean("/" + repRegionName, 3);
-          MBeanUtil.getDistributedRegionMbean("/" + repRegionName2, 1);
-          MBeanUtil.getDistributedRegionMbean("/" + repRegionName3, 1);
-          MBeanUtil.getDistributedRegionMbean("/" + repRegionName4, 1);
-        } catch (Exception e) {
-          fail("Region proxies not replicated in time");
-        }
-      }
-    });
+  @Before
+  public void before() throws Exception {
+    this.member1 = this.managementTestRule.getDistributedMember(this.memberVMs[0]);
+    this.member2 = this.managementTestRule.getDistributedMember(this.memberVMs[1]);
+    this.member3 = this.managementTestRule.getDistributedMember(this.memberVMs[2]);
 
+    createRegionsInNodes();
+    generateValuesInRegions();
   }
 
-  // disabled for bug 49698, serialization problems introduced by r44615
   @Test
   public void testQueryOnPartitionedRegion() throws Exception {
-
-    final DistributedMember member1 = getMember(managedNode1);
-    final DistributedMember member2 = getMember(managedNode2);
-    final DistributedMember member3 = getMember(managedNode3);
-    
-    initCommonRegions();
-    
-    
-    this.managingNode.invoke(new SerializableRunnable("testQueryOnPartitionedRegion") {
-
-      public void run() {
-        Cache cache = getCache();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-
-        assertNotNull(bean);
-
-        try {
-          for (int i = 0; i < queries.length; i++) {
-            String jsonString = null;
-            if (i == 0) {
-              jsonString = bean.queryData(queries[i], null, 10);
-              if (jsonString.contains("result") && !jsonString.contains("No Data Found")) {
-               
-                //getLogWriter().info("testQueryOnPartitionedRegion" + queries[i] + " is = " + jsonString);
-                JSONObject jsonObj = new JSONObject(jsonString);  
-              } else {
-                fail("Query On Cluster should have result");
-              }
-            } else {
-              jsonString = bean.queryData(queries[i], member1.getId(), 10);
-              if (jsonString.contains("member")) {
-                JSONObject jsonObj = new JSONObject(jsonString);
-                //getLogWriter().info("testQueryOnPartitionedRegion" + queries[i] + " is = " + jsonString);
-              } else {
-                fail("Query On Member should have member");
-              }
-            }
-
-            
-
-          }
-        } catch (JSONException e) {
-          e.printStackTrace();
-          fail(e.getMessage());
-        } catch (Exception e) {
-          e.printStackTrace();
-          fail(e.getMessage());
-        }
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
+
+      String jsonString = distributedSystemMXBean.queryData(QUERIES[0], null, 10);
+      assertThat(jsonString).contains("result").doesNotContain("No Data Found");
+
+      for (int i = 0; i < QUERIES.length; i++) {
+        jsonString = distributedSystemMXBean.queryData(QUERIES[i], member1.getId(), 10);
+        assertThat(jsonString).contains("result");
+        assertThat(jsonString).contains("member");
+        assertThat("QUERIES[" + i + "]", jsonString, isJson(withJsonPath("$..result", anything())));
+
+        // TODO: better assertions
+        // assertThat("QUERIES[" + i + "]", result,
+        //            isJson(withJsonPath("$..member",
+        //                                equalTo(JsonPath.compile(result)))));
+        //                                //equalTo(new JSONObject().put(String.class.getName(), member1.getId())))));
+        //System.out.println(JsonPath.read(jsonString, "$.result.*"));
+        //System.out.println(JsonPath.read(jsonString, "$['result']['member']"));
+
+        verifyJsonIsValid(jsonString);
       }
     });
   }
 
   @Test
   public void testQueryOnReplicatedRegion() throws Exception {
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
 
-    
-    initCommonRegions();
-    
-    
-    this.managingNode.invoke(new SerializableRunnable("Query Test For REPL1") {
-
-      
-      public void run() {
-        Cache cache = getCache();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-
-        try {
-          for (int i = 0; i < queriesForRR.length; i++) {
-            String jsonString1 = null;
-            if (i == 0) {
-              jsonString1 = bean.queryData(queriesForRR[i], null, 10);
-              if (jsonString1.contains("result") && !jsonString1.contains("No Data Found")) {
-                JSONObject jsonObj = new JSONObject(jsonString1);
-              } else {
-                fail("Query On Cluster should have result");
-              }
-            } else {
-              jsonString1 = bean.queryData(queriesForRR[i], null, 10);
-              if (jsonString1.contains("result")) {
-                JSONObject jsonObj = new JSONObject(jsonString1);
-              } else {
-                LogWriterUtils.getLogWriter().info("Failed Test String" + queriesForRR[i] + " is = " + jsonString1);
-                fail("Join on Replicated did not work.");
-              }
-            }
-          }
-
-        } catch (JSONException e) {
-          fail(e.getMessage());
-        } catch (IOException e) {
-          fail(e.getMessage());
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+      String jsonString = distributedSystemMXBean.queryData(QUERIES_FOR_REPLICATED[0], null, 10);
+      assertThat(jsonString).contains("result").doesNotContain("No Data Found");
+
+      for (int i = 0; i < QUERIES_FOR_REPLICATED.length; i++) {
+        assertThat(jsonString).contains("result");
+        verifyJsonIsValid(jsonString);
       }
     });
   }
-  
-  @Category(FlakyTest.class) // GEODE-1539
+
   @Test
   public void testMemberWise() throws Exception {
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
 
-    final DistributedMember member1 = getMember(managedNode1);
-    final DistributedMember member2 = getMember(managedNode2);
-    
-    
-    initCommonRegions();
-    
-    
-    this.managingNode.invoke(new SerializableRunnable("testMemberWise") {
-
-      public void run() {
-        Cache cache = getCache();
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-
-        try {
-          byte[] bytes = bean.queryDataForCompressedResult(queriesForRR[0], member1.getId() + "," + member2.getId(), 2);
-          String jsonString = BeanUtilFuncs.decompress(bytes);
-          JSONObject jsonObj = new JSONObject(jsonString);
-          //String memberID = (String)jsonObj.get("member");
-          
-          //getLogWriter().info("testMemberWise " + queriesForRR[2] + " is = " + jsonString);
-
-        } catch (JSONException e) {
-          fail(e.getMessage());
-        } catch (IOException e) {
-          fail(e.getMessage());
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
-      }
+      byte[] bytes = distributedSystemMXBean.queryDataForCompressedResult(QUERIES_FOR_REPLICATED[0], member1.getId() + "," + member2.getId(), 2);
+      String jsonString = BeanUtilFuncs.decompress(bytes);
+
+      verifyJsonIsValid(jsonString);
     });
   }
 
-  
- 
   @Test
   public void testLimitForQuery() throws Exception {
-    
-    initCommonRegions();
-    managedNode1.invoke(getCacheSerializableRunnableForBigCollPuts(repRegionName4));
-    
-    managingNode.invoke(new SerializableRunnable("testLimitForQuery") {
-      public void run() {
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-
-        try {
-
-          // Query With Default values
-          assertEquals(TypedJson.DEFAULT_COLLECTION_ELEMENT_LIMIT, bean.getQueryCollectionsDepth());
-          assertEquals(ManagementConstants.DEFAULT_QUERY_LIMIT, bean.getQueryResultSetLimit());
-
-          String jsonString1 = bean.queryData(queriesForLimit[0], null, 0);
-          if (jsonString1.contains("result") && !jsonString1.contains("No Data Found")) {
-            JSONObject jsonObj = new JSONObject(jsonString1);
-            assertTrue(jsonString1.contains("BigColl_1_ElemenNo_"));
-            JSONArray arr = jsonObj.getJSONArray("result");
-            assertEquals(ManagementConstants.DEFAULT_QUERY_LIMIT, arr.length());
-            // Get the first element
-
-            JSONArray array1 = (JSONArray) arr.getJSONArray(0);
-            // Get the ObjectValue
-
-            JSONObject collectionObject = (JSONObject) array1.get(1);
-            assertEquals(100, collectionObject.length());
-
-          } else {
-            fail("Query On Cluster should have result");
-          }
-
-          // Query With Ovverride Values
-          
-          int newQueryCollectionDepth = 150;
-          int newQueryResultSetLimit = 500;
-          bean.setQueryCollectionsDepth(newQueryCollectionDepth);
-          bean.setQueryResultSetLimit(newQueryResultSetLimit);
-          
-          assertEquals(newQueryCollectionDepth, bean.getQueryCollectionsDepth());
-          assertEquals(newQueryResultSetLimit, bean.getQueryResultSetLimit());
-
-          jsonString1 = bean.queryData(queriesForLimit[0], null, 0);
-          if (jsonString1.contains("result") && !jsonString1.contains("No Data Found")) {
-            JSONObject jsonObj = new JSONObject(jsonString1);
-            assertTrue(jsonString1.contains("BigColl_1_ElemenNo_"));
-            JSONArray arr = jsonObj.getJSONArray("result");
-            assertEquals(newQueryResultSetLimit, arr.length());
-            // Get the first element
-
-            JSONArray array1 = (JSONArray) arr.getJSONArray(0);
-            // Get the ObjectValue
-
-            JSONObject collectionObject = (JSONObject) array1.get(1);
-            assertEquals(newQueryCollectionDepth, collectionObject.length());
-
-          } else {
-            fail("Query On Cluster should have result");
-          }
-
-        } catch (JSONException e) {
-          fail(e.getMessage());
-        } catch (IOException e) {
-          fail(e.getMessage());
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+    this.memberVMs[0].invoke("putBigInstances", () -> putBigInstances(REPLICATE_REGION_NAME4));
 
-      }
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
+
+      // Query With Default values
+      assertThat(distributedSystemMXBean.getQueryCollectionsDepth()).isEqualTo(TypedJson.DEFAULT_COLLECTION_ELEMENT_LIMIT);
+      assertThat(distributedSystemMXBean.getQueryResultSetLimit()).isEqualTo(DEFAULT_QUERY_LIMIT);
+
+      String jsonString = distributedSystemMXBean.queryData(QUERIES_FOR_LIMIT[0], null, 0);
+
+      verifyJsonIsValid(jsonString);
+      assertThat(jsonString).contains("result").doesNotContain("No Data Found");
+      assertThat(jsonString).contains(BIG_COLLECTION_ELEMENT_);
+
+      JSONObject jsonObject = new JSONObject(jsonString);
+      JSONArray jsonArray = jsonObject.getJSONArray("result");
+      assertThat(jsonArray.length()).isEqualTo(DEFAULT_QUERY_LIMIT);
+
+      // Get the first element
+      JSONArray jsonArray1 = jsonArray.getJSONArray(0);
+
+      // Get the ObjectValue
+      JSONObject collectionObject = (JSONObject) jsonArray1.get(1);
+      assertThat(collectionObject.length()).isEqualTo(100);
+
+      // Query With Override Values
+      int newQueryCollectionDepth = 150;
+      int newQueryResultSetLimit = 500;
+
+      distributedSystemMXBean.setQueryCollectionsDepth(newQueryCollectionDepth);
+      distributedSystemMXBean.setQueryResultSetLimit(newQueryResultSetLimit);
+
+      assertThat(distributedSystemMXBean.getQueryCollectionsDepth()).isEqualTo(newQueryCollectionDepth);
+      assertThat(distributedSystemMXBean.getQueryResultSetLimit()).isEqualTo(newQueryResultSetLimit);
+
+      jsonString = distributedSystemMXBean.queryData(QUERIES_FOR_LIMIT[0], null, 0);
+
+      verifyJsonIsValid(jsonString);
+      assertThat(jsonString).contains("result").doesNotContain("No Data Found");
+
+      jsonObject = new JSONObject(jsonString);
+      assertThat(jsonString).contains(BIG_COLLECTION_ELEMENT_);
+
+      jsonArray = jsonObject.getJSONArray("result");
+      assertThat(jsonArray.length()).isEqualTo(newQueryResultSetLimit);
+
+      // Get the first element
+      jsonArray1 = jsonArray.getJSONArray(0);
+
+      // Get the ObjectValue
+      collectionObject = (JSONObject) jsonArray1.get(1);
+      assertThat(collectionObject.length()).isEqualTo(newQueryCollectionDepth);
     });
   }
 
   @Test
-  public void testErrors() throws Exception{
-    
-    final DistributedMember member1 = getMember(managedNode1);
-    final DistributedMember member2 = getMember(managedNode2);
-    final DistributedMember member3 = getMember(managedNode3);
-    
-    initCommonRegions();
-    
-    this.managingNode.invoke(new SerializableRunnable("Test Error") {
-      public void run() {
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-
-        try {
-          Cache cache = getCache();
-          try {
-            String message = bean.queryData("Select * from TestPartitionedRegion1", null, 2); 
-            
-            JSONObject jsonObject = new JSONObject();
-            jsonObject.put("message", ManagementStrings.QUERY__MSG__INVALID_QUERY.toLocalizedString("Region mentioned in query probably missing /"));
-            String expectedMessage = jsonObject.toString();
-            assertEquals(expectedMessage,message);
-            
-          } catch (Exception e) {
-            fail(e.getLocalizedMessage());
-          }
-          
-          try {
-            String query = "Select * from /PartitionedRegionName9 r1, PartitionedRegionName2 r2 where r1.ID = r2.ID";
-            String message = bean.queryData(query, null, 2);
-            JSONObject jsonObject = new JSONObject();
-            jsonObject.put("message", ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND.toLocalizedString("/PartitionedRegionName9"));
-            String expectedMessage = jsonObject.toString();
-            assertEquals(expectedMessage,message);
-          } catch (Exception e) {
-            fail(e.getLocalizedMessage());
-          
-          }
-          
-          final String testTemp = "testTemp";
-          try {
-            RegionFactory rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
-            
-            rf.create(testTemp);
-            String query = "Select * from /"+testTemp;
-            
-            String message = bean.queryData(query, member1.getId(), 2);
-            
-            JSONObject jsonObject = new JSONObject();
-            jsonObject.put("message", ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBERS.toLocalizedString("/"+testTemp));
-            String expectedMessage = jsonObject.toString();
-            assertEquals(expectedMessage,message);
-          } catch (Exception e) {
-            fail(e.getLocalizedMessage());
-          }
-          
-          try {
-            String query = queries[1];            
-            String message = bean.queryData(query,null, 2);
-            
-            JSONObject jsonObject = new JSONObject();
-            jsonObject.put("message", ManagementStrings.QUERY__MSG__JOIN_OP_EX.toLocalizedString());
-            String expectedMessage = jsonObject.toString();
-            
-            assertEquals(expectedMessage,message);
-          } catch (Exception e) {
-            fail(e.getLocalizedMessage());
-          }
-
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+  public void testErrors() throws Exception {
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
 
-      }
+      String invalidQuery = "SELECT * FROM " + PARTITIONED_REGION_NAME1;
+      String invalidQueryResult = distributedSystemMXBean.queryData(invalidQuery, null, 2);
+      assertThat(invalidQueryResult, isJson(withJsonPath("$.message", equalTo(QUERY__MSG__INVALID_QUERY.toLocalizedString("Region mentioned in query probably missing /")))));
+
+      String nonexistentRegionName = this.testName.getMethodName() + "_NONEXISTENT_REGION";
+      String regionsNotFoundQuery = "SELECT * FROM /" + nonexistentRegionName + " r1, PARTITIONED_REGION_NAME2 r2 WHERE r1.ID = r2.ID";
+      String regionsNotFoundResult = distributedSystemMXBean.queryData(regionsNotFoundQuery, null, 2);
+      assertThat(regionsNotFoundResult, isJson(withJsonPath("$.message", equalTo(QUERY__MSG__REGIONS_NOT_FOUND.toLocalizedString("/" + nonexistentRegionName)))));
+
+      String regionName = this.testName.getMethodName() + "_REGION";
+      String regionsNotFoundOnMembersQuery = "SELECT * FROM /" + regionName;
+
+      RegionFactory regionFactory = this.managementTestRule.getCache().createRegionFactory(RegionShortcut.REPLICATE);
+      regionFactory.create(regionName);
+
+      String regionsNotFoundOnMembersResult = distributedSystemMXBean.queryData(regionsNotFoundOnMembersQuery, member1.getId(), 2);
+      assertThat(regionsNotFoundOnMembersResult, isJson(withJsonPath("$.message", equalTo(QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBERS.toLocalizedString("/" + regionName)))));
+
+      String joinMissingMembersQuery = QUERIES[1];
+      String joinMissingMembersResult = distributedSystemMXBean.queryData(joinMissingMembersQuery, null, 2);
+      assertThat(joinMissingMembersResult, isJson(withJsonPath("$.message", equalTo(QUERY__MSG__JOIN_OP_EX.toLocalizedString()))));
     });
   }
-  
+
   @Test
-  public void testNormalRegions() throws Exception{
-    
-    final DistributedMember member1 = getMember(managedNode1);
-    final DistributedMember member2 = getMember(managedNode2);
-    final DistributedMember member3 = getMember(managedNode3);
-    initCommonRegions();
-    
-    this.managingNode.invoke(new SerializableRunnable("Test Error") {
-      public void run() {
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-        final String testNormal = "testNormal";
-        final String testTemp = "testTemp";
-        
-        final String testSNormal = "testSNormal"; // to Reverse order of regions while getting Random region in QueryDataFunction
-        final String testATemp = "testATemp";
-        
-        try {
-          Cache cache = getCache();
-          RegionFactory rf = cache.createRegionFactory(RegionShortcut.LOCAL_HEAP_LRU);          
-          rf.create(testNormal);
-          rf.create(testSNormal);
-          
-          
-          Region region = cache.getRegion("/"+testNormal);
-          assertTrue(region.getAttributes().getDataPolicy() == DataPolicy.NORMAL);
-          
-          RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE);
-          rf1.create(testTemp);
-          rf1.create(testATemp);
-          String query1 = "Select * from /testTemp r1,/testNormal r2 where r1.ID = r2.ID";
-          String query2 = "Select * from /testSNormal r1,/testATemp r2 where r1.ID = r2.ID";
-          String query3 = "Select * from /testSNormal";
-          
-          try {
-           
-            bean.queryDataForCompressedResult(query1,null, 2);
-            bean.queryDataForCompressedResult(query2,null, 2);
-            bean.queryDataForCompressedResult(query3,null, 2);
-          } catch (Exception e) {
-            e.printStackTrace();
-          }
-
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+  public void testNormalRegions() throws Exception {
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
 
-      }
+      String normalRegionName1 = this.testName.getMethodName() + "_NORMAL_REGION_1";
+      String tempRegionName1 = this.testName.getMethodName() + "_TEMP_REGION_1";
+
+      String normalRegionName2 = this.testName.getMethodName() + "_NORMAL_REGION_2"; // to Reverse order of regions while getting Random region in QueryDataFunction [?]
+      String tempRegionName2 = this.testName.getMethodName() + "_TEMP_REGION_2";
+
+      Cache cache = this.managementTestRule.getCache();
+
+      RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.LOCAL_HEAP_LRU);
+      regionFactory.create(normalRegionName1);
+      regionFactory.create(normalRegionName2);
+
+      Region region = cache.getRegion("/" + normalRegionName1);
+      assertThat(region.getAttributes().getDataPolicy()).isEqualTo(DataPolicy.NORMAL);
+
+      RegionFactory regionFactory1 = cache.createRegionFactory(RegionShortcut.REPLICATE);
+      regionFactory1.create(tempRegionName1);
+      regionFactory1.create(tempRegionName2);
+
+      String query1 = "SELECT * FROM /" + tempRegionName1 + " r1, /" + normalRegionName1 + " r2 WHERE r1.ID = r2.ID";
+      String query2 = "SELECT * FROM /" + normalRegionName2 + " r1, /" + tempRegionName2 + " r2 WHERE r1.ID = r2.ID";
+      String query3 = "SELECT * FROM /" + normalRegionName2;
+
+      distributedSystemMXBean.queryDataForCompressedResult(query1, null, 2);
+      distributedSystemMXBean.queryDataForCompressedResult(query2, null, 2);
+      distributedSystemMXBean.queryDataForCompressedResult(query3, null, 2);
+
+      // TODO: assert results of queryDataForCompressedResult?
     });
   }
- 
+
   @Test
   public void testRegionsLocalDataSet() throws Exception {
+    String partitionedRegionName = this.testName.getMethodName() + "_PARTITIONED_REGION";
+
+    String[] values1 = new String[] { "val1", "val2", "val3" };
+    String[] values2 = new String[] { "val4", "val5", "val6" };
+
+    this.memberVMs[0].invoke(this.testName.getMethodName() + " Create Region", () -> {
+      PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+      partitionAttributesFactory.setRedundantCopies(2).setTotalNumBuckets(12);
+
+      List<FixedPartitionAttributes> fixedPartitionAttributesList = createFixedPartitionList(1);
+      for (FixedPartitionAttributes fixedPartitionAttributes : fixedPartitionAttributesList) {
+        partitionAttributesFactory.addFixedPartitionAttributes(fixedPartitionAttributes);
+      }
+      partitionAttributesFactory.setPartitionResolver(new SingleHopQuarterPartitionResolver());
 
-    final DistributedMember member1 = getMember(managedNode1);
-    final DistributedMember member2 = getMember(managedNode2);
-    final DistributedMember member3 = getMember(managedNode3);
-
-    final String PartitionedRegionName6 = "LocalDataSetTest";
-
-    final String[] valArray1 = new String[] { "val1", "val2", "val3" };
-    final String[] valArray2 = new String[] { "val4", "val5", "val6" };
-    this.managedNode1.invoke(new SerializableRunnable("testRegionsLocalDataSet:Create Region") {
-      public void run() {
-        try {
-    
-          Cache cache = getCache();
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-
-          paf.setRedundantCopies(2).setTotalNumBuckets(12);
-          
-          List<FixedPartitionAttributes> fpaList = createFixedPartitionList(1);
-          for (FixedPartitionAttributes fpa : fpaList) {
-            paf.addFixedPartitionAttributes(fpa);
-          }
-          paf.setPartitionResolver(new SingleHopQuarterPartitionResolver());
-          
-          RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
-              
-          Region r = rf.create(PartitionedRegionName6);
-
-          for (int i = 0; i < valArray1.length; i++) {
-            r.put(new Date(2013,1,i+5), valArray1[i]);
-          }
-        } catch (Exception e) {
-          e.printStackTrace();
-          fail(e.getMessage());
-        }
+      RegionFactory regionFactory = this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(partitionAttributesFactory.create());
+      Region region = regionFactory.create(partitionedRegionName);
 
+      for (int i = 0; i < values1.length; i++) {
+        region.put(getDate(2013, 1, i + 5), values1[i]);
       }
     });
 
-    this.managedNode2.invoke(new SerializableRunnable("testRegionsLocalDataSet: Create Region") {
-      public void run() {
-        try {
-
-          Cache cache = getCache();
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-
-          paf.setRedundantCopies(2).setTotalNumBuckets(12);
-          
-          List<FixedPartitionAttributes> fpaList = createFixedPartitionList(2);
-          for (FixedPartitionAttributes fpa : fpaList) {
-            paf.addFixedPartitionAttributes(fpa);
-          }
-          paf.setPartitionResolver(new SingleHopQuarterPartitionResolver());
-          
-          RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
-              
-          Region r = rf.create(PartitionedRegionName6);
-          
-          for (int i = 0; i < valArray2.length; i++) {
-            r.put(new Date(2013,5,i+5), valArray2[i]);
-          }
-          
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+    this.memberVMs[1].invoke(this.testName.getMethodName() + " Create Region", () -> {
+      PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+      partitionAttributesFactory.setRedundantCopies(2).setTotalNumBuckets(12);
 
+      List<FixedPartitionAttributes> fixedPartitionAttributesList = createFixedPartitionList(2);
+      for (FixedPartitionAttributes fixedPartitionAttributes : fixedPartitionAttributesList) {
+        partitionAttributesFactory.addFixedPartitionAttributes(fixedPartitionAttributes);
+      }
+      partitionAttributesFactory.setPartitionResolver(new SingleHopQuarterPartitionResolver());
+
+      RegionFactory regionFactory = this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(partitionAttributesFactory.create());
+      Region region = regionFactory.create(partitionedRegionName);
+
+      for (int i = 0; i < values2.length; i++) {
+        region.put(getDate(2013, 5, i + 5), values2[i]);
       }
     });
 
-    this.managedNode3.invoke(new SerializableRunnable("testRegionsLocalDataSet: Create Region") {
-      public void run() {
-        try {
-
-          Cache cache = getCache();
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-
-          paf.setRedundantCopies(2).setTotalNumBuckets(12);
-          
-          List<FixedPartitionAttributes> fpaList = createFixedPartitionList(3);
-          for (FixedPartitionAttributes fpa : fpaList) {
-            paf.addFixedPartitionAttributes(fpa);
-          }
-          paf.setPartitionResolver(new SingleHopQuarterPartitionResolver());
-          
-          RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
-              
-          Region r = rf.create(PartitionedRegionName6);
-          
-
-          
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+    this.memberVMs[2].invoke(this.testName.getMethodName() + " Create Region", () -> {
+      PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+      partitionAttributesFactory.setRedundantCopies(2).setTotalNumBuckets(12);
 
+      List<FixedPartitionAttributes> fixedPartitionAttributesList = createFixedPartitionList(3);
+      for (FixedPartitionAttributes fixedPartitionAttributes : fixedPartitionAttributesList) {
+        partitionAttributesFactory.addFixedPartitionAttributes(fixedPartitionAttributes);
       }
+      partitionAttributesFactory.setPartitionResolver(new SingleHopQuarterPartitionResolver());
+
+      RegionFactory regionFactory = this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(partitionAttributesFactory.create());
+      regionFactory.create(partitionedRegionName);
     });
 
-    final List<String> member1RealData = (List<String>)managedNode1.invoke(() -> QueryDataDUnitTest.getLocalDataSet( PartitionedRegionName6 ));
-   
-    final List<String> member2RealData = (List<String>) managedNode2.invoke(() -> QueryDataDUnitTest.getLocalDataSet( PartitionedRegionName6 ));
-    
-    final List<String> member3RealData = (List<String>) managedNode3.invoke(() -> QueryDataDUnitTest.getLocalDataSet( PartitionedRegionName6 ));
-    
-
-    
-    this.managingNode.invoke(new SerializableRunnable("testRegionsLocalDataSet") {
-      public void run() {
-        SystemManagementService service = (SystemManagementService) getManagementService();
-        DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
-        assertNotNull(bean);
-
-        try {
-          String query = "Select * from /" + PartitionedRegionName6;
-
-          try {
-            final DistributedRegionMXBean regionMBean = MBeanUtil.getDistributedRegionMbean("/"
-                + PartitionedRegionName6, 3);
-
-            Wait.waitForCriterion(new WaitCriterion() {
-
-              public String description() {
-                return "Waiting for all entries to get reflected at managing node";
-              }
-
-              public boolean done() {
-
-                boolean done = (regionMBean.getSystemRegionEntryCount() == (valArray1.length + valArray2.length));
-                return done;
-              }
-
-            }, MAX_WAIT, 1000, true);
-
-            LogWriterUtils.getLogWriter().info("member1RealData  is = " + member1RealData);
-            LogWriterUtils.getLogWriter().info("member2RealData  is = " + member2RealData);
-            LogWriterUtils.getLogWriter().info("member3RealData  is = " + member3RealData);
-            
-            String member1Result = bean.queryData(query, member1.getId(), 0);
-            LogWriterUtils.getLogWriter().info("member1Result " + query + " is = " + member1Result);
-
-
-            String member2Result = bean.queryData(query, member2.getId(), 0);
-            LogWriterUtils.getLogWriter().info("member2Result " + query + " is = " + member2Result);
-            
-            String member3Result = bean.queryData(query, member3.getId(), 0);
-            LogWriterUtils.getLogWriter().info("member3Result " + query + " is = " + member3Result);
-            
-            for (String val : member1RealData) {
-              assertTrue(member1Result.contains(val));
-             }
-            
-            for (String val : member2RealData) {
-              assertTrue(member2Result.contains(val));
-            }
-
-            assertTrue(member3Result.contains("No Data Found"));
-          } catch (Exception e) {
-            fail(e.getMessage());
-          }
-
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
+    final List<String> member1RealData = this.memberVMs[0].invoke(() -> getLocalDataSet(partitionedRegionName));
+    final List<String> member2RealData = this.memberVMs[1].invoke(() -> getLocalDataSet(partitionedRegionName));
+    final List<String> member3RealData = this.memberVMs[2].invoke(() -> getLocalDataSet(partitionedRegionName));
+
+    this.managerVM.invoke(this.testName.getMethodName(), () -> {
+      DistributedSystemMXBean distributedSystemMXBean = this.managementTestRule.getSystemManagementService().getDistributedSystemMXBean();
+      DistributedRegionMXBean distributedRegionMXBean = awaitDistributedRegionMXBean("/" + partitionedRegionName, 3);
+
+      String alias = "Waiting for all entries to get reflected at managing node";
+      int expectedEntryCount = values1.length + values2.length;
+      await(alias).until(() -> assertThat(distributedRegionMXBean.getSystemRegionEntryCount()).isEqualTo(expectedEntryCount));
 
+      String query = "Select * from /" + partitionedRegionName;
+
+      String member1Result = distributedSystemMXBean.queryData(query, member1.getId(), 0);
+      verifyJsonIsValid(member1Result);
+
+      String member2Result = distributedSystemMXBean.queryData(query, member2.getId(), 0);
+      verifyJsonIsValid(member2Result);
+
+      String member3Result = distributedSystemMXBean.queryData(query, member3.getId(), 0);
+      verifyJsonIsValid(member3Result);
+
+      for (String val : member1RealData) {
+        assertThat(member1Result).contains(val);
       }
+
+      for (String val : member2RealData) {
+        assertThat(member2Result).contains(val);
+      }
+
+      assertThat(member3Result).contains("No Data Found");
     });
   }
-  
-  
-  private static List<String> getLocalDataSet(String region){
-    PartitionedRegion parRegion = PartitionedRegionHelper.getPartitionedRegion(region, GemFireCacheImpl.getExisting());
-    Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
-    List<String> allPrimaryVals = new ArrayList<String>();
-    for(BucketRegion brRegion : localPrimaryBucketRegions){
-      for(Object obj : brRegion.values()){
-        allPrimaryVals.add((String)obj);
+
+  private Date getDate(final int year, final int month, final int date) {
+    Calendar calendar = Calendar.getInstance();
+    calendar.set(year, month, date);
+    return calendar.getTime();
+  }
+
+  private void verifyJsonIsValid(final String jsonString) throws JSONException {
+    assertThat(jsonString, isJson());
+    assertThat(jsonString, hasJsonPath("$.result"));
+    assertThat(new JSONObject(jsonString)).isNotNull();
+  }
+
+  private void putDataInRegion(final String regionName, final Object[] portfolio, final int from, final int to) {
+    Region region = this.managementTestRule.getCache().getRegion(regionName);
+    for (int i = from; i < to; i++) {
+      region.put(new Integer(i), portfolio[i]);
+    }
+  }
+
+  private void generateValuesInRegions() {
+    int COUNT_DESTINATION = 30;
+    int COUNT_FROM = 0;
+
+    // Create common Portfolios and NewPortfolios
+    final Portfolio[] portfolio = createPortfoliosAndPositions(COUNT_DESTINATION);
+
+    // Fill local region
+    this.memberVMs[0].invoke(() -> putDataInRegion(LOCAL_REGION_NAME, portfolio, COUNT_FROM, COUNT_DESTINATION));
+
+    // Fill replicated region
+    this.memberVMs[0].invoke(() -> putDataInRegion(REPLICATE_REGION_NAME1, portfolio, COUNT_FROM, COUNT_DESTINATION));
+    this.memberVMs[1].invoke(() -> putDataInRegion(REPLICATE_REGION_NAME2, portfolio, COUNT_FROM, COUNT_DESTINATION));
+
+    // Fill Partition Region
+    this.memberVMs[0].invoke(() -> putDataInRegion(PARTITIONED_REGION_NAME1, portfolio, COUNT_FROM, COUNT_DESTINATION));
+    this.memberVMs[0].invoke(() -> putDataInRegion(PARTITIONED_REGION_NAME2, portfolio, COUNT_FROM, COUNT_DESTINATION));
+    this.memberVMs[0].invoke(() -> putDataInRegion(PARTITIONED_REGION_NAME3, portfolio, COUNT_FROM, COUNT_DESTINATION));
+    this.memberVMs[0].invoke(() -> putDataInRegion(PARTITIONED_REGION_NAME4, portfolio, COUNT_FROM, COUNT_DESTINATION));
+    this.memberVMs[0].invoke(() -> putDataInRegion(PARTITIONED_REGION_NAME5, portfolio, COUNT_FROM, COUNT_DESTINATION));
+
+    this.memberVMs[0].invoke(() -> putPdxInstances(REPLICATE_REGION_NAME3));
+  }
+
+  private void putPdxInstances(final String regionName) throws CacheException {
+    Region region = this.managementTestRule.getCache().getRegion(regionName);
+
+    PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
+    pdxInstanceFactory.writeInt("ID", 111);
+    pdxInstanceFactory.writeString("status", "active");
+    pdxInstanceFactory.writeString("secId", "IBM");
+    PdxInstance pdxInstance = pdxInstanceFactory.create();
+    region.put("IBM", pdxInstance);
+
+    pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
+    pdxInstanceFactory.writeInt("ID", 222);
+    pdxInstanceFactory.writeString("status", "inactive");
+    pdxInstanceFactory.writeString("secId", "YHOO");
+    pdxInstance = pdxInstanceFactory.create();
+    region.put("YHOO", pdxInstance);
+
+    pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
+    pdxInstanceFactory.writeInt("ID", 333);
+    pdxInstanceFactory.writeString("status", "active");
+    pdxInstanceFactory.writeString("secId", "GOOGL");
+    pdxInstance = pdxInstanceFactory.create();
+    region.put("GOOGL", pdxInstance);
+
+    pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("Portfolio", false);
+    pdxInstanceFactory.writeInt("ID", 111);
+    pdxInstanceFactory.writeString("status", "inactive");
+    pdxInstanceFactory.writeString("secId", "VMW");
+    pdxInstance = pdxInstanceFactory.create();
+    region.put("VMW", pdxInstance);
+  }
+
+  private void putBigInstances(final String regionName) {
+    Region region = this.managementTestRule.getCache().getRegion(regionName);
+
+    for (int i = 0; i < 1200; i++) {
+      List<String> bigCollection = new ArrayList<>();
+      for (int j = 0; j < 200; j++) {
+        bigCollection.add(BIG_COLLECTION_ELEMENT_ + j);
       }
-      
+      region.put(BIG_COLLECTION_ + i, bigCollection);
     }
-    
-   return allPrimaryVals;
   }
 
-  /**
-   * creates a Fixed Partition List to be used for Fixed Partition Region
-   * 
-   * @param primaryIndex
-   *          index for each fixed partition
-   */
-  private static List<FixedPartitionAttributes> createFixedPartitionList(int primaryIndex) {
-    List<FixedPartitionAttributes> fpaList = new ArrayList<FixedPartitionAttributes>();
+  private void createLocalRegion() {
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.LOCAL).create(LOCAL_REGION_NAME);
+  }
+
+  private void createReplicatedRegion() {
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.REPLICATE).create(REPLICATE_REGION_NAME1);
+  }
+
+  private void createColocatedPR() {
+    PartitionResolver testKeyBasedResolver = new TestPartitionResolver();
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(NUM_OF_BUCKETS).setPartitionResolver(testKeyBasedResolver).create()).create(PARTITIONED_REGION_NAME1);
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(NUM_OF_BUCKETS).setPartitionResolver(testKeyBasedResolver).setColocatedWith(PARTITIONED_REGION_NAME1).create()).create(PARTITIONED_REGION_NAME2);
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(NUM_OF_BUCKETS).setPartitionResolver(testKeyBasedResolver).setColocatedWith(PARTITIONED_REGION_NAME2).create()).create(PARTITIONED_REGION_NAME3);
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(NUM_OF_BUCKETS).setPartitionResolver(testKeyBasedResolver).create()).create(PARTITIONED_REGION_NAME4); // not collocated
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(NUM_OF_BUCKETS).setPartitionResolver(testKeyBasedResolver).setColocatedWith(PARTITIONED_REGION_NAME4).create()).create(PARTITIONED_REGION_NAME5); // collocated with 4
+  }
+
+  private void createDistributedRegion(final String regionName) {
+    this.managementTestRule.getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
+  }
+
+  private void createRegionsInNodes() throws InterruptedException, TimeoutException, ExecutionException {
+    // Create local Region on servers
+    this.memberVMs[0].invoke(() -> createLocalRegion());
+
+    // Create ReplicatedRegion on servers
+    this.memberVMs[0].invoke(() -> createReplicatedRegion());
+    this.memberVMs[1].invoke(() -> createReplicatedRegion());
+    this.memberVMs[2].invoke(() -> createReplicatedRegion());
+
+    this.memberVMs[1].invoke(() -> createDistributedRegion(REPLICATE_REGION_NAME2));
+    this.memberVMs[0].invoke(() -> createDistributedRegion(REPLICATE_REGION_NAME3));
+    this.memberVMs[0].invoke(() -> createDistributedRegion(REPLICATE_REGION_NAME4));
+
+    // Create two co-located PartitionedRegions On Servers.
+    this.memberVMs[0].invoke(() -> createColocatedPR());
+    this.memberVMs[1].invoke(() -> createColocatedPR());
+    this.memberVMs[2].invoke(() -> createColocatedPR());
+
+    this.managerVM.invoke("Wait for all Region Proxies to get replicated", () -> {
+      awaitDistributedRegionMXBean("/" + PARTITIONED_REGION_NAME1, 3);
+      awaitDistributedRegionMXBean("/" + PARTITIONED_REGION_NAME2, 3);
+      awaitDistributedRegionMXBean("/" + PARTITIONED_REGION_NAME3, 3);
+      awaitDistributedRegionMXBean("/" + PARTITIONED_REGION_NAME4, 3);
+      awaitDistributedRegionMXBean("/" + PARTITIONED_REGION_NAME5, 3);
+      awaitDistributedRegionMXBean("/" + REPLICATE_REGION_NAME1, 3);
+      awaitDistributedRegionMXBean("/" + REPLICATE_REGION_NAME2, 1);
+      awaitDistributedRegionMXBean("/" + REPLICATE_REGION_NAME3, 1);
+      awaitDistributedRegionMXBean("/" + REPLICATE_REGION_NAME4, 1);
+    });
+  }
+
+  private List<String> getLocalDataSet(final String region) {
+    PartitionedRegion partitionedRegion = PartitionedRegionHelper.getPartitionedRegion(region, this.managementTestRule.getCache());
+    Set<BucketRegion> localPrimaryBucketRegions = partitionedRegion.getDataStore().getAllLocalPrimaryBucketRegions();
+
+    List<String> allPrimaryValues = new ArrayList<>();
+
+    for (BucketRegion bucketRegion : localPrimaryBucketRegions) {
+      for (Object value : bucketRegion.values()) {
+        allPrimaryValues.add((String) value);
+      }
+    }
+
+    return allPrimaryValues;
+  }
+
+  private List<FixedPartitionAttributes> createFixedPartitionList(final int primaryIndex) {
+    List<FixedPartitionAttributes> fixedPartitionAttributesList = new ArrayList<>();
     if (primaryIndex == 1) {
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q1", true, 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q2", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q3", 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q1", true, 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q2", 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q3", 3));
     }
     if (primaryIndex == 2) {
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q1", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q2", true, 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q3", 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q1", 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q2", true, 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q3", 3));
     }
     if (primaryIndex == 3) {
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q1", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q2", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q3", true, 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q1", 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q2", 3));
+      fixedPartitionAttributesList.add(createFixedPartition("Q3", true, 3));
+    }
+    return fixedPartitionAttributesList;
+  }
+
+  private MemberMXBean awaitMemberMXBeanProxy(final DistributedMember member) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+    ObjectName objectName = service.getMemberMBeanName(member);
+    String alias = "awaiting MemberMXBean proxy for " + member;
+
+    await(alias).until(() -> assertThat(service.getMBeanProxy(objectName, MemberMXBean.class)).isNotNull());
+
+    return service.getMBeanProxy(objectName, MemberMXBean.class);
+  }
+
+  private DistributedSystemMXBean awaitDistributedSystemMXBean() {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+
+    await().until(() -> assertThat(service.getDistributedSystemMXBean()).isNotNull());
+
+    return service.getDistributedSystemMXBean();
+  }
+
+  private DistributedRegionMXBean awaitDistributedRegionMXBean(final String name) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+
+    await().until(() -> assertThat(service.getDistributedRegionMXBean(name)).isNotNull());
+
+    return service.getDistributedRegionMXBean(name);
+  }
+
+  private DistributedRegionMXBean awaitDistributedRegionMXBean(final String name, final int memberCount) {
+    SystemManagementService service = this.managementTestRule.getSystemManagementService();
+
+    await().until(() -> assertThat(service.getDistributedRegionMXBean(name)).isNotNull());
+    await().until(() -> assertThat(service.getDistributedRegionMXBean(name).getMemberCount()).isEqualTo(memberCount));
+
+    return service.getDistributedRegionMXBean(name);
+  }
+
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
+  }
+
+  private ConditionFactory await(final String alias) {
+    return Awaitility.await(alias).atMost(2, MINUTES);
+  }
+
+  private static class TestPartitionResolver implements PartitionResolver {
+
+    @Override
+    public void close() {
+    }
+
+    @Override
+    public Serializable getRoutingObject(EntryOperation opDetails) {
+      return (((Integer)opDetails.getKey()).intValue() % NUM_OF_BUCKETS);
+    }
+
+    @Override
+    public String getName() {
+      return getClass().getName();
     }
-   return fpaList;
   }
 }


[15/50] [abbrv] incubator-geode git commit: GEODE-1993: refactor tests to use rules rather than abstract classes

Posted by kl...@apache.org.
GEODE-1993: refactor tests to use rules rather than abstract classes

* created ServerStarter and LocatorStarter in the rule package
* refacterred LocatorServerConfigurationRule
* refactor tests to use these rules


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/de621597
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/de621597
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/de621597

Branch: refs/heads/feature/GEODE-1930
Commit: de62159780496c3362a1b6ac840e87bba019bc62
Parents: b065993
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Wed Oct 12 09:30:15 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Oct 14 14:59:26 2016 -0700

----------------------------------------------------------------------
 .../internal/web/RestSecurityDUnitTest.java     | 557 -------------------
 .../web/RestSecurityIntegrationTest.java        | 497 +++++++++++++++++
 .../security/IntegratedSecurityService.java     |   2 +-
 .../security/AccessControlMBeanJUnitTest.java   |   7 +-
 ...CacheServerMBeanAuthenticationJUnitTest.java |   7 +-
 .../CacheServerMBeanAuthorizationJUnitTest.java |  13 +-
 .../CacheServerMBeanShiroJUnitTest.java         |  30 +-
 .../security/CacheServerStartupRule.java        |  74 +++
 .../security/CliCommandsSecurityTest.java       |   9 +-
 .../security/DataCommandsSecurityTest.java      |  11 +-
 .../DiskStoreMXBeanSecurityJUnitTest.java       |   9 +-
 .../GatewayReceiverMBeanSecurityTest.java       |   9 +-
 .../GatewaySenderMBeanSecurityTest.java         |   9 +-
 .../security/GfshCommandsPostProcessorTest.java |  30 +-
 .../security/GfshCommandsSecurityTest.java      |  48 +-
 .../security/GfshShellConnectionRule.java       |   5 +-
 .../security/JMXConnectionConfiguration.java    |  33 --
 .../security/JavaRmiServerNameTest.java         |  28 +-
 .../JsonAuthorizationCacheStartRule.java        |  86 ---
 .../LockServiceMBeanAuthorizationJUnitTest.java |  11 +-
 .../security/MBeanSecurityJUnitTest.java        |  10 +-
 .../security/MBeanServerConnectionRule.java     | 130 -----
 .../ManagerMBeanAuthorizationJUnitTest.java     |   9 +-
 .../security/MemberMBeanSecurityJUnitTest.java  |  13 +-
 .../security/ResourcePermissionTest.java        |  25 +-
 .../internal/security/ShiroCacheStartRule.java  |  64 ---
 .../security/AbstractSecureServerDUnitTest.java | 104 +---
 .../ClusterConfigWithoutSecurityDUnitTest.java  |  26 +-
 .../security/IntegratedClientAuthDUnitTest.java |  19 +-
 .../NoShowValue1PostProcessorDUnitTest.java     |   8 +-
 .../security/PDXPostProcessorDUnitTest.java     |  18 +-
 .../geode/security/PostProcessorDUnitTest.java  |  10 +-
 .../SecurityClusterConfigDUnitTest.java         |  46 +-
 .../SecurityWithoutClusterConfigDUnitTest.java  |  16 +-
 .../security/StartServerAuthorizationTest.java  |  17 +-
 .../dunit/rules/ConnectionConfiguration.java    |  34 ++
 .../rules/LocatorServerConfigurationRule.java   | 148 -----
 .../dunit/rules/LocatorServerStartupRule.java   | 133 +++++
 .../geode/test/dunit/rules/LocatorStarter.java  |  74 +++
 .../dunit/rules/MBeanServerConnectionRule.java  | 132 +++++
 .../geode/test/dunit/rules/ServerStarter.java   |  99 ++++
 .../geode/security/CQClientAuthDunitTest.java   |   6 +-
 .../security/CQPDXPostProcessorDUnitTest.java   |  19 +-
 .../security/CQPostProcessorDunitTest.java      |  10 +-
 .../LuceneClusterConfigurationDUnitTest.java    |  25 +-
 .../web/controllers/CommonCrudController.java   |   4 +-
 .../web/controllers/PdxBasedCrudController.java |   4 +-
 .../web/security/RestSecurityService.java       |   2 +-
 48 files changed, 1382 insertions(+), 1298 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityDUnitTest.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityDUnitTest.java
deleted file mode 100644
index 59e00c8..0000000
--- a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityDUnitTest.java
+++ /dev/null
@@ -1,557 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.geode.rest.internal.web;
-
-import static org.junit.Assert.*;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.MalformedURLException;
-import java.nio.charset.StandardCharsets;
-
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpHost;
-import org.apache.http.HttpResponse;
-import org.apache.http.auth.AuthScope;
-import org.apache.http.auth.UsernamePasswordCredentials;
-import org.apache.http.client.AuthCache;
-import org.apache.http.client.ClientProtocolException;
-import org.apache.http.client.CredentialsProvider;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpHead;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.client.protocol.HttpClientContext;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.impl.auth.BasicScheme;
-import org.apache.http.impl.client.BasicAuthCache;
-import org.apache.http.impl.client.BasicCredentialsProvider;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.json.JSONArray;
-import org.json.JSONObject;
-import org.json.JSONTokener;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.internal.AvailablePortHelper;
-import org.apache.geode.security.AbstractSecureServerDUnitTest;
-import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.SecurityTest;
-
-
-@Category({ DistributedTest.class, SecurityTest.class })
-public class RestSecurityDUnitTest extends AbstractSecureServerDUnitTest {
-
-  public final static String PROTOCOL = "http";
-  public final static String HOSTNAME = "localhost";
-  public final static String CONTEXT = "/geode/v1";
-
-  public RestSecurityDUnitTest() throws MalformedURLException {
-    int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    this.jmxPort = ports[0];
-    this.restPort = ports[1];
-  }
-
-  @Test
-  public void testFunctions() {
-    client1.invoke(() -> {
-      String json = "{\"@type\":\"double\",\"@value\":210}";
-
-      HttpResponse response = doGet("/functions", "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-      response = doGet("/functions", "stranger", "1234567");
-      assertEquals(403, getCode(response));
-      response = doGet("/functions", "dataReader", "1234567");
-      assertTrue(isOK(response));
-
-      response = doPost("/functions/AddFreeItemsToOrder", "unknown-user", "1234567", json);
-      assertEquals(401, getCode(response));
-      response = doPost("/functions/AddFreeItemsToOrder", "dataReader", "1234567", json);
-      assertEquals(403, getCode(response));
-      response = doPost("/functions/AddFreeItemsToOrder?onRegion=" + REGION_NAME, "dataWriter", "1234567", json);
-      // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-      assertEquals(500, getCode(response));
-    });
-  }
-
-  @Test
-  public void testQueries() {
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/queries", "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-      response = doGet("/queries", "stranger", "1234567");
-      assertEquals(403, getCode(response));
-      response = doGet("/queries", "dataReader", "1234567");
-      assertEquals(200, getCode(response));
-    });
-  }
-
-  @Test
-  public void testAdhocQuery() {
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/queries/adhoc?q=", "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-      response = doGet("/queries/adhoc?q=", "stranger", "1234567");
-      assertEquals(403, getCode(response));
-      response = doGet("/queries/adhoc?q=", "dataReader", "1234567");
-      // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-      assertEquals(500, getCode(response));
-    });
-  }
-
-  @Test
-  public void testPostQuery() {
-    client1.invoke(() -> {
-      HttpResponse response = doPost("/queries?id=0&q=", "unknown-user", "1234567", "");
-      assertEquals(401, getCode(response));
-      response = doPost("/queries?id=0&q=", "stranger", "1234567", "");
-      assertEquals(403, getCode(response));
-      response = doPost("/queries?id=0&q=", "dataWriter", "1234567", "");
-      // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-      assertEquals(500, getCode(response));
-    });
-  }
-
-  @Test
-  public void testPostQuery2() {
-    client1.invoke(() -> {
-      HttpResponse response = doPost("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
-      assertEquals(401, getCode(response));
-      response = doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
-      assertEquals(403, getCode(response));
-      response = doPost("/queries/id", "dataWriter", "1234567", "{\"id\" : \"foo\"}");
-      // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
-      assertEquals(500, getCode(response));
-    });
-  }
-
-  @Test
-  public void testPutQuery() {
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
-      assertEquals(401, getCode(response));
-      response = doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
-      assertEquals(403, getCode(response));
-      response = doPut("/queries/id", "dataWriter", "1234567", "{\"id\" : \"foo\"}");
-      // We should get a 404 because we're trying to update a query that doesn't exist
-      assertEquals(404, getCode(response));
-    });
-  }
-
-  @Test
-  public void testDeleteQuery() {
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/queries/id", "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-      response = doDelete("/queries/id", "stranger", "1234567");
-      assertEquals(403, getCode(response));
-      response = doDelete("/queries/id", "dataWriter", "1234567");
-      // We should get a 404 because we're trying to delete a query that doesn't exist
-      assertEquals(404, getCode(response));
-    });
-  }
-
-  @Test
-  public void testServers() {
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/servers", "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-      response = doGet("/servers", "stranger", "1234567");
-      assertEquals(403, getCode(response));
-      response = doGet("/servers", "super-user", "1234567");
-      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * This test should always return an OK, whether the user is known or unknown.  A phishing script should not be
-   * able to determine whether a user/password combination is good
-   */
-  @Test
-  public void testPing() {
-    client1.invoke(() -> {
-      HttpResponse response = doHEAD("/ping", "stranger", "1234567");
-      assertTrue(isOK(response));
-      response = doGet("/ping", "stranger", "1234567");
-      assertTrue(isOK(response));
-
-      response = doHEAD("/ping", "super-user", "1234567");
-      assertTrue(isOK(response));
-      response = doGet("/ping", "super-user", "1234567");
-      assertTrue(isOK(response));
-
-      // TODO - invalid username/password should still respond, but doesn't
-      //      response = doHEAD("/ping", "unknown-user", "badpassword");
-      //      assertTrue(isOK(response));
-      //      response = doGet("/ping", "unknown-user", "badpassword");
-      //      assertTrue(isOK(response));
-
-      // TODO - credentials are currently required and shouldn't be for this endpoint
-      //      response = doHEAD("/ping", null, null);
-      //      assertTrue(isOK(response));
-      //      response = doGet("/ping", null, null);
-      //      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * Test permissions on retrieving a list of regions.
-   */
-  @Test
-  public void getRegions() {
-    client1.invoke(() -> {
-      HttpResponse response = doGet("", "dataReader", "1234567");
-      assertEquals("A '200 - OK' was expected", 200, getCode(response));
-
-      assertTrue(isOK(response));
-      JSONObject jsonObject = new JSONObject(getResponseBody(response));
-      JSONArray regions = jsonObject.getJSONArray("regions");
-      assertNotNull(regions);
-      assertTrue(regions.length() > 0);
-      JSONObject region = regions.getJSONObject(0);
-      assertEquals("AuthRegion", region.get("name"));
-      assertEquals("REPLICATE", region.get("type"));
-    });
-
-    // List regions with an unknown user - 401
-    client1.invoke(() -> {
-      HttpResponse response = doGet("", "unknown-user", "badpassword");
-      assertEquals(401, getCode(response));
-    });
-
-    // list regions with insufficent rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doGet("", "authRegionReader", "1234567");
-      assertEquals(403, getCode(response));
-    });
-  }
-
-  /**
-   * Test permissions on getting a region
-   */
-  @Test
-  public void getRegion() {
-    // Test an unknown user - 401 error
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME, "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-    });
-
-    // Test a user with insufficient rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME, "stranger", "1234567");
-      assertEquals(403, getCode(response));
-    });
-
-    // Test an authorized user - 200
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME, "super-user", "1234567");
-      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * Test permissions on HEAD region
-   */
-  @Test
-  public void headRegion() {
-    // Test an unknown user - 401 error
-    client1.invoke(() -> {
-      HttpResponse response = doHEAD("/" + REGION_NAME, "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-    });
-
-    // Test a user with insufficient rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doHEAD("/" + REGION_NAME, "stranger", "1234567");
-      assertEquals(403, getCode(response));
-    });
-
-    // Test an authorized user - 200
-    client1.invoke(() -> {
-      HttpResponse response = doHEAD("/" + REGION_NAME, "super-user", "1234567");
-      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * Test permissions on deleting a region
-   */
-  @Test
-  public void deleteRegion() {
-    // Test an unknown user - 401 error
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/" + REGION_NAME, "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-    });
-
-    // Test a user with insufficient rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/" + REGION_NAME, "dataReader", "1234567");
-      assertEquals(403, getCode(response));
-    });
-
-    // Test an authorized user - 200
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/" + REGION_NAME, "super-user", "1234567");
-      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * Test permissions on getting a region's keys
-   */
-  @Test
-  public void getRegionKeys() {
-    // Test an authorized user
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME + "/keys", "super-user", "1234567");
-      assertTrue(isOK(response));
-    });
-    // Test an unauthorized user
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME + "/keys", "dataWriter", "1234567");
-      assertEquals(403, getCode(response));
-    });
-  }
-
-  /**
-   * Test permissions on retrieving a key from a region
-   */
-  @Test
-  public void getRegionKey() {
-    // Test an authorized user
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME + "/key1", "key1User", "1234567");
-      assertTrue(isOK(response));
-    });
-    // Test an unauthorized user
-    client1.invoke(() -> {
-      HttpResponse response = doGet("/" + REGION_NAME + "/key1", "dataWriter", "1234567");
-      assertEquals(403, getCode(response));
-    });
-  }
-
-  /**
-   * Test permissions on deleting a region's key(s)
-   */
-  @Test
-  public void deleteRegionKey() {
-    // Test an unknown user - 401 error
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/" + REGION_NAME + "/key1", "unknown-user", "1234567");
-      assertEquals(401, getCode(response));
-    });
-
-    // Test a user with insufficient rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/" + REGION_NAME + "/key1", "dataReader", "1234567");
-      assertEquals(403, getCode(response));
-    });
-
-    // Test an authorized user - 200
-    client1.invoke(() -> {
-      HttpResponse response = doDelete("/" + REGION_NAME + "/key1", "key1User", "1234567");
-      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * Test permissions on deleting a region's key(s)
-   */
-  @Test
-  public void postRegionKey() {
-    // Test an unknown user - 401 error
-    client1.invoke(() -> {
-      HttpResponse response = doPost("/" + REGION_NAME + "?key9", "unknown", "1234567", "{ \"key9\" : \"foo\" }");
-      assertEquals(401, getCode(response));
-    });
-
-    // Test a user with insufficient rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doPost("/" + REGION_NAME + "?key9", "dataReader", "1234567", "{ \"key9\" : \"foo\" }");
-      assertEquals(403, getCode(response));
-    });
-
-    // Test an authorized user - 200
-    client1.invoke(() -> {
-      HttpResponse response = doPost("/" + REGION_NAME + "?key9", "dataWriter", "1234567", "{ \"key9\" : \"foo\" }");
-      assertEquals(201, getCode(response));
-      assertTrue(isOK(response));
-    });
-  }
-
-  /**
-   * Test permissions on deleting a region's key(s)
-   */
-  @Test
-  public void putRegionKey() {
-
-    String json = "{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for  XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225}";
-    String casJSON = "{\"@old\":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for  XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225},\"@new \":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1013,\"description\":\"Order for  New Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/25/2014\",\"contact\":\"Vanilla Bean\",\"email\":\"vanillabean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":12345,\"description\":\"part 123\",\"quantity\":12,\"unitPrice\":29.99,\"totalPrice\":149.95}],\"totalPrice\":149.95}}";
-    // Test an unknown user - 401 error
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=PUT", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
-      assertEquals(401, getCode(response));
-    });
-
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=CAS", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
-      assertEquals(401, getCode(response));
-    });
-
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "unknown-user", "1234567", "{ \"@old\" : \"value1\", \"@new\" : \"CASvalue\" }");
-      assertEquals(401, getCode(response));
-    });
-
-    // Test a user with insufficient rights - 403
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=PUT", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
-      assertEquals(403, getCode(response));
-    });
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
-      assertEquals(403, getCode(response));
-    });
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=CAS", "dataReader", "1234567", casJSON);
-      assertEquals(403, getCode(response));
-    });
-
-    // Test an authorized user - 200
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=PUT", "key1User", "1234567", "{ \"key1\" : \"foo\" }");
-      assertEquals(200, getCode(response));
-      assertTrue(isOK(response));
-    });
-    client1.invoke(() -> {
-      HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "key1User", "1234567", json);
-      assertEquals(200, getCode(response));
-      assertTrue(isOK(response));
-    });
-  }
-
-  protected HttpResponse doHEAD(String query, String username, String password) throws MalformedURLException {
-    HttpHead httpHead = new HttpHead(CONTEXT + query);
-    return doRequest(httpHead, username, password);
-  }
-
-
-  protected HttpResponse doPost(String query, String username, String password, String body) throws MalformedURLException {
-    HttpPost httpPost = new HttpPost(CONTEXT + query);
-    httpPost.addHeader("content-type", "application/json");
-    httpPost.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
-    return doRequest(httpPost, username, password);
-  }
-
-
-  protected HttpResponse doPut(String query, String username, String password, String body) throws MalformedURLException {
-    HttpPut httpPut = new HttpPut(CONTEXT + query);
-    httpPut.addHeader("content-type", "application/json");
-    httpPut.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
-    return doRequest(httpPut, username, password);
-  }
-
-  protected HttpResponse doGet(String uri, String username, String password) throws MalformedURLException {
-    HttpGet getRequest = new HttpGet(CONTEXT + uri);
-    return doRequest(getRequest, username, password);
-  }
-
-  protected HttpResponse doDelete(String uri, String username, String password) throws MalformedURLException {
-    HttpDelete httpDelete = new HttpDelete(CONTEXT + uri);
-    return doRequest(httpDelete, username, password);
-  }
-
-  /**
-   * Check the HTTP status of the response and return if it's within the OK range
-   * @param response The HttpResponse message received from the server
-   *
-   * @return true if the status code is a 2XX-type code (200-299), otherwise false
-   */
-  protected boolean isOK(HttpResponse response) {
-    int returnCode = response.getStatusLine().getStatusCode();
-    return (returnCode < 300 && returnCode >= 200);
-  }
-
-  /**
-   * Check the HTTP status of the response and return true if a 401
-   * @param response The HttpResponse message received from the server
-   *
-   * @return true if the status code is 401, otherwise false
-   */
-  protected boolean isUnauthorized(HttpResponse response) {
-    int returnCode = response.getStatusLine().getStatusCode();
-    return returnCode == 401;
-  }
-
-  /**
-   * Retrieve the status code of the HttpResponse
-   * @param response The HttpResponse message received from the server
-   *
-   * @return a numeric value
-   */
-  protected int getCode(HttpResponse response) {
-    return response.getStatusLine().getStatusCode();
-  }
-
-  protected JSONTokener getResponseBody(HttpResponse response) throws IOException {
-    HttpEntity entity = response.getEntity();
-    InputStream content = entity.getContent();
-    BufferedReader reader = new BufferedReader(new InputStreamReader(
-      content));
-    String line;
-    StringBuilder str = new StringBuilder();
-    while ((line = reader.readLine()) != null) {
-      str.append(line);
-    }
-    return new JSONTokener(str.toString());
-  }
-
-  private HttpResponse doRequest(HttpRequestBase request, String username, String password) throws MalformedURLException {
-    HttpHost targetHost = new HttpHost(HOSTNAME, this.restPort, PROTOCOL);
-    CloseableHttpClient httpclient = HttpClients.custom().build();
-    HttpClientContext clientContext = HttpClientContext.create();
-    // if username is null, do not put in authentication
-    if (username != null) {
-      CredentialsProvider credsProvider = new BasicCredentialsProvider();
-      credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()), new UsernamePasswordCredentials(username, password));
-      httpclient = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
-      AuthCache authCache = new BasicAuthCache();
-      BasicScheme basicAuth = new BasicScheme();
-      authCache.put(targetHost, basicAuth);
-      clientContext.setCredentialsProvider(credsProvider);
-      clientContext.setAuthCache(authCache);
-    }
-
-    try {
-      return httpclient.execute(targetHost, request, clientContext);
-    } catch (ClientProtocolException e) {
-      e.printStackTrace();
-      fail("Rest GET should not have thrown ClientProtocolException!");
-    } catch (IOException e) {
-      e.printStackTrace();
-      fail("Rest GET Request should not have thrown IOException!");
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
new file mode 100644
index 0000000..ef019a4
--- /dev/null
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
@@ -0,0 +1,497 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.rest.internal.web;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.junit.Assert.*;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.nio.charset.StandardCharsets;
+import java.util.Properties;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpResponse;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.AuthCache;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.CredentialsProvider;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.client.protocol.HttpClientContext;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.auth.BasicScheme;
+import org.apache.http.impl.client.BasicAuthCache;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.security.templates.SampleSecurityManager;
+import org.apache.geode.test.dunit.rules.ServerStarter;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+import org.apache.geode.test.junit.categories.SecurityTest;
+
+
+@Category({ IntegrationTest.class, SecurityTest.class })
+public class RestSecurityIntegrationTest {
+
+  protected static final String REGION_NAME = "AuthRegion";
+
+  public final static String PROTOCOL = "http";
+  public final static String HOSTNAME = "localhost";
+  public final static String CONTEXT = "/geode/v1";
+
+  private static int restPort = AvailablePortHelper.getRandomAvailableTCPPort();
+  static Properties properties = new Properties() {{
+    setProperty(SampleSecurityManager.SECURITY_JSON, "org/apache/geode/management/internal/security/clientServer.json");
+    setProperty(SECURITY_MANAGER, SampleSecurityManager.class.getName());
+    setProperty(START_DEV_REST_API, "true");
+    setProperty(HTTP_SERVICE_BIND_ADDRESS, "localhost");
+    setProperty(HTTP_SERVICE_PORT, restPort + "");
+  }};
+
+  @ClassRule
+  public static ServerStarter serverStarter = new ServerStarter(properties);
+
+  @BeforeClass
+  public static void before() throws Exception {
+    serverStarter.startServer();
+    serverStarter.cache.createRegionFactory(RegionShortcut.REPLICATE).create(REGION_NAME);
+  }
+
+  @Test
+  public void testFunctions() throws Exception {
+    String json = "{\"@type\":\"double\",\"@value\":210}";
+
+    HttpResponse response = doGet("/functions", "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+    response = doGet("/functions", "stranger", "1234567");
+    assertEquals(403, getCode(response));
+    response = doGet("/functions", "dataReader", "1234567");
+    assertTrue(isOK(response));
+
+    response = doPost("/functions/AddFreeItemsToOrder", "unknown-user", "1234567", json);
+    assertEquals(401, getCode(response));
+    response = doPost("/functions/AddFreeItemsToOrder", "dataReader", "1234567", json);
+    assertEquals(403, getCode(response));
+    response = doPost("/functions/AddFreeItemsToOrder?onRegion=" + REGION_NAME, "dataWriter", "1234567", json);
+    // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
+    assertEquals(500, getCode(response));
+  }
+
+  @Test
+  public void testQueries() throws Exception {
+    HttpResponse response = doGet("/queries", "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+    response = doGet("/queries", "stranger", "1234567");
+    assertEquals(403, getCode(response));
+    response = doGet("/queries", "dataReader", "1234567");
+    assertEquals(200, getCode(response));
+  }
+
+  @Test
+  public void testAdhocQuery() throws Exception {
+    HttpResponse response = doGet("/queries/adhoc?q=", "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+    response = doGet("/queries/adhoc?q=", "stranger", "1234567");
+    assertEquals(403, getCode(response));
+    response = doGet("/queries/adhoc?q=", "dataReader", "1234567");
+    // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
+    assertEquals(500, getCode(response));
+  }
+
+  @Test
+  public void testPostQuery() throws Exception {
+    HttpResponse response = doPost("/queries?id=0&q=", "unknown-user", "1234567", "");
+    assertEquals(401, getCode(response));
+    response = doPost("/queries?id=0&q=", "stranger", "1234567", "");
+    assertEquals(403, getCode(response));
+    response = doPost("/queries?id=0&q=", "dataWriter", "1234567", "");
+    // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
+    assertEquals(500, getCode(response));
+  }
+
+  @Test
+  public void testPostQuery2() throws Exception {
+    HttpResponse response = doPost("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(401, getCode(response));
+    response = doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(403, getCode(response));
+    response = doPost("/queries/id", "dataWriter", "1234567", "{\"id\" : \"foo\"}");
+    // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
+    assertEquals(500, getCode(response));
+  }
+
+  @Test
+  public void testPutQuery() throws Exception {
+    HttpResponse response = doPut("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(401, getCode(response));
+    response = doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
+    assertEquals(403, getCode(response));
+    response = doPut("/queries/id", "dataWriter", "1234567", "{\"id\" : \"foo\"}");
+    // We should get a 404 because we're trying to update a query that doesn't exist
+    assertEquals(404, getCode(response));
+  }
+
+  @Test
+  public void testDeleteQuery() throws Exception {
+    HttpResponse response = doDelete("/queries/id", "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+    response = doDelete("/queries/id", "stranger", "1234567");
+    assertEquals(403, getCode(response));
+    response = doDelete("/queries/id", "dataWriter", "1234567");
+    // We should get a 404 because we're trying to delete a query that doesn't exist
+    assertEquals(404, getCode(response));
+  }
+
+  @Test
+  public void testServers() throws Exception {
+    HttpResponse response = doGet("/servers", "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+    response = doGet("/servers", "stranger", "1234567");
+    assertEquals(403, getCode(response));
+    response = doGet("/servers", "super-user", "1234567");
+    assertTrue(isOK(response));
+  }
+
+  /**
+   * This test should always return an OK, whether the user is known or unknown.  A phishing script should not be
+   * able to determine whether a user/password combination is good
+   */
+  @Test
+  public void testPing() throws Exception {
+    HttpResponse response = doHEAD("/ping", "stranger", "1234567");
+    assertTrue(isOK(response));
+    response = doGet("/ping", "stranger", "1234567");
+    assertTrue(isOK(response));
+
+    response = doHEAD("/ping", "super-user", "1234567");
+    assertTrue(isOK(response));
+    response = doGet("/ping", "super-user", "1234567");
+    assertTrue(isOK(response));
+
+    // TODO - invalid username/password should still respond, but doesn't
+    //      response = doHEAD("/ping", "unknown-user", "badpassword");
+    //      assertTrue(isOK(response));
+    //      response = doGet("/ping", "unknown-user", "badpassword");
+    //      assertTrue(isOK(response));
+
+    // TODO - credentials are currently required and shouldn't be for this endpoint
+    //      response = doHEAD("/ping", null, null);
+    //      assertTrue(isOK(response));
+    //      response = doGet("/ping", null, null);
+    //      assertTrue(isOK(response));
+  }
+
+  /**
+   * Test permissions on retrieving a list of regions.
+   */
+  @Test
+  public void getRegions() throws Exception {
+    HttpResponse response = doGet("", "dataReader", "1234567");
+    assertEquals("A '200 - OK' was expected", 200, getCode(response));
+
+    assertTrue(isOK(response));
+    JSONObject jsonObject = new JSONObject(getResponseBody(response));
+    JSONArray regions = jsonObject.getJSONArray("regions");
+    assertNotNull(regions);
+    assertTrue(regions.length() > 0);
+    JSONObject region = regions.getJSONObject(0);
+    assertEquals("AuthRegion", region.get("name"));
+    assertEquals("REPLICATE", region.get("type"));
+
+    // List regions with an unknown user - 401
+    response = doGet("", "unknown-user", "badpassword");
+    assertEquals(401, getCode(response));
+
+    // list regions with insufficent rights - 403
+    response = doGet("", "authRegionReader", "1234567");
+    assertEquals(403, getCode(response));
+  }
+
+  /**
+   * Test permissions on getting a region
+   */
+  @Test
+  public void getRegion() throws Exception {
+    // Test an unknown user - 401 error
+    HttpResponse response = doGet("/" + REGION_NAME, "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+
+    // Test a user with insufficient rights - 403
+    response = doGet("/" + REGION_NAME, "stranger", "1234567");
+    assertEquals(403, getCode(response));
+
+    // Test an authorized user - 200
+    response = doGet("/" + REGION_NAME, "super-user", "1234567");
+    assertTrue(isOK(response));
+  }
+
+  /**
+   * Test permissions on HEAD region
+   */
+  @Test
+  public void headRegion() throws Exception {
+    // Test an unknown user - 401 error
+    HttpResponse response = doHEAD("/" + REGION_NAME, "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+
+    // Test a user with insufficient rights - 403
+    response = doHEAD("/" + REGION_NAME, "stranger", "1234567");
+    assertEquals(403, getCode(response));
+
+    // Test an authorized user - 200
+    response = doHEAD("/" + REGION_NAME, "super-user", "1234567");
+    assertTrue(isOK(response));
+  }
+
+  /**
+   * Test permissions on deleting a region
+   */
+  @Test
+  public void deleteRegion() throws Exception {
+    // Test an unknown user - 401 error
+    HttpResponse response = doDelete("/" + REGION_NAME, "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+
+    // Test a user with insufficient rights - 403
+    response = doDelete("/" + REGION_NAME, "dataReader", "1234567");
+    assertEquals(403, getCode(response));
+  }
+
+  /**
+   * Test permissions on getting a region's keys
+   */
+  @Test
+  public void getRegionKeys() throws Exception {
+    // Test an authorized user
+    HttpResponse response = doGet("/" + REGION_NAME + "/keys", "super-user", "1234567");
+    assertTrue(isOK(response));
+    // Test an unauthorized user
+    response = doGet("/" + REGION_NAME + "/keys", "dataWriter", "1234567");
+    assertEquals(403, getCode(response));
+  }
+
+  /**
+   * Test permissions on retrieving a key from a region
+   */
+  @Test
+  public void getRegionKey() throws Exception {
+    // Test an authorized user
+    HttpResponse response = doGet("/" + REGION_NAME + "/key1", "key1User", "1234567");
+    assertTrue(isOK(response));
+    // Test an unauthorized user
+    response = doGet("/" + REGION_NAME + "/key1", "dataWriter", "1234567");
+    assertEquals(403, getCode(response));
+  }
+
+  /**
+   * Test permissions on deleting a region's key(s)
+   */
+  @Test
+  public void deleteRegionKey() throws Exception {
+    // Test an unknown user - 401 error
+    HttpResponse response = doDelete("/" + REGION_NAME + "/key1", "unknown-user", "1234567");
+    assertEquals(401, getCode(response));
+
+    // Test a user with insufficient rights - 403
+    response = doDelete("/" + REGION_NAME + "/key1", "dataReader", "1234567");
+    assertEquals(403, getCode(response));
+
+    // Test an authorized user - 200
+    response = doDelete("/" + REGION_NAME + "/key1", "key1User", "1234567");
+    assertTrue(isOK(response));
+  }
+
+  /**
+   * Test permissions on deleting a region's key(s)
+   */
+  @Test
+  public void postRegionKey() throws Exception {
+    // Test an unknown user - 401 error
+    HttpResponse response = doPost("/" + REGION_NAME + "?key9", "unknown", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(401, getCode(response));
+
+    // Test a user with insufficient rights - 403
+    response = doPost("/" + REGION_NAME + "?key9", "dataReader", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(403, getCode(response));
+
+    // Test an authorized user - 200
+    response = doPost("/" + REGION_NAME + "?key9", "dataWriter", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(201, getCode(response));
+    assertTrue(isOK(response));
+  }
+
+  /**
+   * Test permissions on deleting a region's key(s)
+   */
+  @Test
+  public void putRegionKey() throws Exception {
+
+    String json = "{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for  XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225}";
+    String casJSON = "{\"@old\":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for  XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225},\"@new \":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1013,\"description\":\"Order for  New Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/25/2014\",\"contact\":\"Vanilla Bean\",\"email\":\"vanillabean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":12345,\"description\":\"part 123\",\"quantity\":12,\"unitPrice\":29.99,\"totalPrice\":149.95}],\"totalPrice\":149.95}}";
+    // Test an unknown user - 401 error
+    HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=PUT", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(401, getCode(response));
+
+    response = doPut("/" + REGION_NAME + "/key1?op=CAS", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
+    assertEquals(401, getCode(response));
+    response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "unknown-user", "1234567", "{ \"@old\" : \"value1\", \"@new\" : \"CASvalue\" }");
+    assertEquals(401, getCode(response));
+
+    response = doPut("/" + REGION_NAME + "/key1?op=PUT", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
+    assertEquals(403, getCode(response));
+
+    response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
+    assertEquals(403, getCode(response));
+
+    response = doPut("/" + REGION_NAME + "/key1?op=CAS", "dataReader", "1234567", casJSON);
+    assertEquals(403, getCode(response));
+
+    response = doPut("/" + REGION_NAME + "/key1?op=PUT", "key1User", "1234567", "{ \"key1\" : \"foo\" }");
+    assertEquals(200, getCode(response));
+    assertTrue(isOK(response));
+
+    response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "key1User", "1234567", json);
+    assertEquals(200, getCode(response));
+    assertTrue(isOK(response));
+  }
+
+  protected HttpResponse doHEAD(String query, String username, String password) throws MalformedURLException {
+    HttpHead httpHead = new HttpHead(CONTEXT + query);
+    return doRequest(httpHead, username, password);
+  }
+
+
+  protected HttpResponse doPost(String query, String username, String password, String body) throws MalformedURLException {
+    HttpPost httpPost = new HttpPost(CONTEXT + query);
+    httpPost.addHeader("content-type", "application/json");
+    httpPost.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
+    return doRequest(httpPost, username, password);
+  }
+
+
+  protected HttpResponse doPut(String query, String username, String password, String body) throws MalformedURLException {
+    HttpPut httpPut = new HttpPut(CONTEXT + query);
+    httpPut.addHeader("content-type", "application/json");
+    httpPut.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
+    return doRequest(httpPut, username, password);
+  }
+
+  protected HttpResponse doGet(String uri, String username, String password) throws MalformedURLException {
+    HttpGet getRequest = new HttpGet(CONTEXT + uri);
+    return doRequest(getRequest, username, password);
+  }
+
+  protected HttpResponse doDelete(String uri, String username, String password) throws MalformedURLException {
+    HttpDelete httpDelete = new HttpDelete(CONTEXT + uri);
+    return doRequest(httpDelete, username, password);
+  }
+
+  /**
+   * Check the HTTP status of the response and return if it's within the OK range
+   *
+   * @param response The HttpResponse message received from the server
+   *
+   * @return true if the status code is a 2XX-type code (200-299), otherwise false
+   */
+  protected boolean isOK(HttpResponse response) {
+    int returnCode = response.getStatusLine().getStatusCode();
+    return (returnCode < 300 && returnCode >= 200);
+  }
+
+  /**
+   * Check the HTTP status of the response and return true if a 401
+   *
+   * @param response The HttpResponse message received from the server
+   *
+   * @return true if the status code is 401, otherwise false
+   */
+  protected boolean isUnauthorized(HttpResponse response) {
+    int returnCode = response.getStatusLine().getStatusCode();
+    return returnCode == 401;
+  }
+
+  /**
+   * Retrieve the status code of the HttpResponse
+   *
+   * @param response The HttpResponse message received from the server
+   *
+   * @return a numeric value
+   */
+  protected int getCode(HttpResponse response) {
+    return response.getStatusLine().getStatusCode();
+  }
+
+  protected JSONTokener getResponseBody(HttpResponse response) throws IOException {
+    HttpEntity entity = response.getEntity();
+    InputStream content = entity.getContent();
+    BufferedReader reader = new BufferedReader(new InputStreamReader(content));
+    String line;
+    StringBuilder str = new StringBuilder();
+    while ((line = reader.readLine()) != null) {
+      str.append(line);
+    }
+    return new JSONTokener(str.toString());
+  }
+
+  private HttpResponse doRequest(HttpRequestBase request, String username, String password) throws MalformedURLException {
+    HttpHost targetHost = new HttpHost(HOSTNAME, this.restPort, PROTOCOL);
+    CloseableHttpClient httpclient = HttpClients.custom().build();
+    HttpClientContext clientContext = HttpClientContext.create();
+    // if username is null, do not put in authentication
+    if (username != null) {
+      CredentialsProvider credsProvider = new BasicCredentialsProvider();
+      credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()), new UsernamePasswordCredentials(username, password));
+      httpclient = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
+      AuthCache authCache = new BasicAuthCache();
+      BasicScheme basicAuth = new BasicScheme();
+      authCache.put(targetHost, basicAuth);
+      clientContext.setCredentialsProvider(credsProvider);
+      clientContext.setAuthCache(authCache);
+    }
+
+    try {
+      return httpclient.execute(targetHost, request, clientContext);
+    } catch (ClientProtocolException e) {
+      e.printStackTrace();
+      fail("Rest GET should not have thrown ClientProtocolException!");
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail("Rest GET Request should not have thrown IOException!");
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/main/java/org/apache/geode/internal/security/IntegratedSecurityService.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/security/IntegratedSecurityService.java b/geode-core/src/main/java/org/apache/geode/internal/security/IntegratedSecurityService.java
index 79b70f8..ac1be0d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/security/IntegratedSecurityService.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/security/IntegratedSecurityService.java
@@ -107,7 +107,7 @@ public class IntegratedSecurityService implements SecurityService{
       }
     }
 
-    // in other cases like admin rest call or pulse authorization
+    // in other cases like rest call, client operations, we get it from the current thread
     currentUser = SecurityUtils.getSubject();
 
     if (currentUser == null || currentUser.getPrincipal() == null) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/AccessControlMBeanJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/AccessControlMBeanJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/AccessControlMBeanJUnitTest.java
index c22fff3..db4767e 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/AccessControlMBeanJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/AccessControlMBeanJUnitTest.java
@@ -25,6 +25,8 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.AvailablePort;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -36,8 +38,7 @@ public class AccessControlMBeanJUnitTest {
   private AccessControlMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -51,7 +52,7 @@ public class AccessControlMBeanJUnitTest {
    * Test that any authenticated user can access this method
    */
   @Test
-  @JMXConnectionConfiguration(user = "stranger", password = "1234567")
+  @ConnectionConfiguration(user = "stranger", password = "1234567")
   public void testAnyAccess() throws Exception {
     assertThat(bean.authorize("DATA", "READ")).isEqualTo(false);
     assertThat(bean.authorize("CLUSTER", "READ")).isEqualTo(false);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthenticationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthenticationJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthenticationJUnitTest.java
index 3880948..0dd512d 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthenticationJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthenticationJUnitTest.java
@@ -24,6 +24,8 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.CacheServerMXBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
@@ -34,8 +36,7 @@ public class CacheServerMBeanAuthenticationJUnitTest {
   private CacheServerMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -46,7 +47,7 @@ public class CacheServerMBeanAuthenticationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testAllAccess() throws Exception {
     bean.removeIndex("foo");
     bean.fetchLoadProbe();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
index 7bbfbcc..03b41a9 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
@@ -26,6 +26,8 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.CacheServerMXBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.FlakyTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
@@ -38,8 +40,7 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   private CacheServerMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -50,7 +51,7 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testDataAdmin() throws Exception {
     bean.removeIndex("foo");
     assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
@@ -63,7 +64,7 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-admin", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-admin", password = "1234567")
   public void testClusterAdmin() throws Exception {
     assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
@@ -72,7 +73,7 @@ public class CacheServerMBeanAuthorizationJUnitTest {
 
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testDataUser() throws Exception {
     assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     bean.executeContinuousQuery("bar");
@@ -80,7 +81,7 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "stranger", password = "1234567")
+  @ConnectionConfiguration(user = "stranger", password = "1234567")
   public void testNoAccess() throws Exception {
     assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanShiroJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanShiroJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanShiroJUnitTest.java
index 721a431..d0df150 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanShiroJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerMBeanShiroJUnitTest.java
@@ -16,28 +16,40 @@
  */
 package org.apache.geode.management.internal.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.assertj.core.api.Assertions.*;
 
+import java.util.Properties;
+
 import org.junit.Before;
-import org.junit.ClassRule;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.CacheServerMXBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
 @Category({ IntegrationTest.class, SecurityTest.class })
 public class CacheServerMBeanShiroJUnitTest {
-
-  private static int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+  static int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+  static Properties properties = new Properties(){{
+    setProperty(JMX_MANAGER_PORT, jmxManagerPort+"");
+    setProperty(SECURITY_SHIRO_INIT, "shiro.ini");
+  }};
 
   private CacheServerMXBean bean;
 
-  @ClassRule
-  public static ShiroCacheStartRule serverRule = new ShiroCacheStartRule(jmxManagerPort, "shiro.ini");
+  @BeforeClass
+  public static void before() throws Exception {
+    ServerStarter serverStarter = new ServerStarter(properties);
+    serverStarter.startServer();
+  }
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -48,7 +60,7 @@ public class CacheServerMBeanShiroJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "root", password = "secret")
+  @ConnectionConfiguration(user = "root", password = "secret")
   public void testAllAccess() throws Exception {
     bean.removeIndex("foo");
     bean.executeContinuousQuery("bar");
@@ -61,7 +73,7 @@ public class CacheServerMBeanShiroJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "guest", password = "guest")
+  @ConnectionConfiguration(user = "guest", password = "guest")
   public void testNoAccess() throws Exception {
     assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
@@ -74,7 +86,7 @@ public class CacheServerMBeanShiroJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "regionAReader", password = "password")
+  @ConnectionConfiguration(user = "regionAReader", password = "password")
   public void testRegionAccess() throws Exception{
     assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());
@@ -84,7 +96,7 @@ public class CacheServerMBeanShiroJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "dataReader", password = "12345")
+  @ConnectionConfiguration(user = "dataReader", password = "12345")
   public void testDataRead() throws Exception{
     assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerStartupRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerStartupRule.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerStartupRule.java
new file mode 100644
index 0000000..b99d8e1
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/CacheServerStartupRule.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.management.internal.security;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.io.Serializable;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.rules.ExternalResource;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.security.templates.SampleSecurityManager;
+import org.apache.geode.test.dunit.rules.ServerStarter;
+
+/**
+ * this rule would help you start up a cache server with the given properties in the current VM
+ */
+public class CacheServerStartupRule extends ExternalResource implements Serializable {
+
+  private ServerStarter serverStarter;
+
+  public static CacheServerStartupRule withDefaultSecurityJson(int jmxManagerPort) {
+    return new CacheServerStartupRule(jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  }
+
+  public CacheServerStartupRule(int jmxManagerPort, String jsonFile) {
+    Properties properties = new Properties();
+    if(jmxManagerPort>0){
+      properties.put(JMX_MANAGER_PORT, String.valueOf(jmxManagerPort));
+    }
+    if(jsonFile!=null){
+      properties.put(SECURITY_MANAGER, SampleSecurityManager.class.getName());
+      properties.put(SampleSecurityManager.SECURITY_JSON, jsonFile);
+    }
+    serverStarter = new ServerStarter(properties);
+  }
+
+  @Before
+  public void before() throws Throwable {
+    serverStarter.startServer();
+    serverStarter.cache.createRegionFactory().create("region1");
+  }
+
+  @After
+  public void after(){
+    serverStarter.after();
+  }
+
+  public Cache getCache() {
+    return serverStarter.cache;
+  }
+
+  public int getServerPort(){
+    return serverStarter.server.getPort();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/CliCommandsSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/CliCommandsSecurityTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/CliCommandsSecurityTest.java
index 84155a9..403e2ab 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/CliCommandsSecurityTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/CliCommandsSecurityTest.java
@@ -30,6 +30,8 @@ import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.management.MemberMXBean;
 import org.apache.geode.security.NotAuthorizedException;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -43,8 +45,7 @@ public class CliCommandsSecurityTest {
   private List<TestCommand> commands = TestCommand.getCommands();
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -55,7 +56,7 @@ public class CliCommandsSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "stranger", password = "1234567")
+  @ConnectionConfiguration(user = "stranger", password = "1234567")
   public void testNoAccess(){
    for (TestCommand command:commands) {
      // skip query commands since query commands are only available in client shell
@@ -77,7 +78,7 @@ public class CliCommandsSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "super-user", password = "1234567")
+  @ConnectionConfiguration(user = "super-user", password = "1234567")
   public void testAdminUser() throws Exception {
     for (TestCommand command:commands) {
       LogService.getLogger().info("processing: "+command.getCommand());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/DataCommandsSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/DataCommandsSecurityTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/DataCommandsSecurityTest.java
index 0084cb8..1b2a7ce 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/DataCommandsSecurityTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/DataCommandsSecurityTest.java
@@ -27,6 +27,8 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.MemberMXBean;
 import org.apache.geode.security.GemFireSecurityException;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -38,8 +40,7 @@ public class DataCommandsSecurityTest {
   private MemberMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -50,7 +51,7 @@ public class DataCommandsSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "region1-user", password = "1234567")
+  @ConnectionConfiguration(user = "region1-user", password = "1234567")
   public void testDataUser() throws Exception {
     bean.processCommand("locate entry --key=k1 --region=region1");
 
@@ -58,7 +59,7 @@ public class DataCommandsSecurityTest {
     assertThatThrownBy(() -> bean.processCommand("locate entry --key=k1 --region=secureRegion")).isInstanceOf(GemFireSecurityException.class);
   }
 
-  @JMXConnectionConfiguration(user = "secure-user", password = "1234567")
+  @ConnectionConfiguration(user = "secure-user", password = "1234567")
   @Test
   public void testSecureDataUser(){
     // can do all these on both regions
@@ -67,7 +68,7 @@ public class DataCommandsSecurityTest {
   }
 
   // dataUser has all the permissions granted, but not to region2 (only to region1)
-  @JMXConnectionConfiguration(user = "region1-user", password = "1234567")
+  @ConnectionConfiguration(user = "region1-user", password = "1234567")
   @Test
   public void testRegionAccess(){
     assertThatThrownBy(() -> bean.processCommand("rebalance --include-region=region2")).isInstanceOf(GemFireSecurityException.class)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
index 750ce2a..8e556a7 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
@@ -27,6 +27,8 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.DiskStoreMXBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -38,8 +40,7 @@ public class DiskStoreMXBeanSecurityJUnitTest {
   private DiskStoreMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -55,7 +56,7 @@ public class DiskStoreMXBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testAllAccess() throws Exception {
     bean.flush();
     bean.forceCompaction();
@@ -70,7 +71,7 @@ public class DiskStoreMXBeanSecurityJUnitTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testNoAccess() throws Exception {
     assertThatThrownBy(() -> bean.flush()).hasMessageContaining(TestCommand.dataManage.toString());
     assertThatThrownBy(() -> bean.forceCompaction()).hasMessageContaining(TestCommand.dataManage.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewayReceiverMBeanSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewayReceiverMBeanSecurityTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewayReceiverMBeanSecurityTest.java
index b64a6f7..b799cbd 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewayReceiverMBeanSecurityTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewayReceiverMBeanSecurityTest.java
@@ -32,6 +32,8 @@ import org.junit.experimental.categories.Category;
 import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.GatewayReceiverMXBean;
 import org.apache.geode.management.ManagementService;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -47,8 +49,7 @@ public class GatewayReceiverMBeanSecurityTest {
   private GatewayReceiverMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -72,7 +73,7 @@ public class GatewayReceiverMBeanSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testAllAccess() throws Exception {
     bean.getAverageBatchProcessingTime();
     bean.getBindAddress();
@@ -83,7 +84,7 @@ public class GatewayReceiverMBeanSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testNoAccess() throws Exception {
     assertThatThrownBy(() -> bean.getTotalConnectionsTimedOut()).hasMessageContaining(TestCommand.clusterRead.toString());
     assertThatThrownBy(() -> bean.start()).hasMessageContaining(TestCommand.dataManage.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewaySenderMBeanSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewaySenderMBeanSecurityTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewaySenderMBeanSecurityTest.java
index 9acf8db..a800064 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewaySenderMBeanSecurityTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/GatewaySenderMBeanSecurityTest.java
@@ -33,6 +33,8 @@ import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.management.GatewaySenderMXBean;
 import org.apache.geode.management.ManagementService;
 import org.apache.geode.management.internal.beans.GatewaySenderMBean;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.MBeanServerConnectionRule;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -48,8 +50,7 @@ public class GatewaySenderMBeanSecurityTest {
   private GatewaySenderMXBean bean;
 
   @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxManagerPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  public static CacheServerStartupRule serverRule = CacheServerStartupRule.withDefaultSecurityJson(jmxManagerPort);
 
   @Rule
   public MBeanServerConnectionRule connectionRule = new MBeanServerConnectionRule(jmxManagerPort);
@@ -73,7 +74,7 @@ public class GatewaySenderMBeanSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testAllAccess() throws Exception {
     bean.getAlertThreshold();
     bean.getAverageDistributionTimePerBatch();
@@ -90,7 +91,7 @@ public class GatewaySenderMBeanSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "stranger", password = "1234567")
+  @ConnectionConfiguration(user = "stranger", password = "1234567")
   public void testNoAccess() throws Exception {
     assertThatThrownBy(() -> bean.getAlertThreshold()).hasMessageContaining(TestCommand.clusterRead.toString());
     assertThatThrownBy(() -> bean.getAverageDistributionTimePerBatch()).hasMessageContaining(TestCommand.clusterRead.toString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsPostProcessorTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsPostProcessorTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsPostProcessorTest.java
index 34fd5a9..6eba91c 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsPostProcessorTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsPostProcessorTest.java
@@ -16,30 +16,46 @@
  */
 package org.apache.geode.management.internal.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.apache.geode.internal.Assert.*;
 
-import org.apache.geode.security.templates.SamplePostProcessor;
+import java.util.Properties;
+
 import org.junit.Before;
-import org.junit.ClassRule;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.internal.AvailablePortHelper;
 import org.apache.geode.management.internal.cli.HeadlessGfsh;
+import org.apache.geode.security.templates.SamplePostProcessor;
+import org.apache.geode.security.templates.SampleSecurityManager;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
 @Category({ IntegrationTest.class, SecurityTest.class })
 public class GfshCommandsPostProcessorTest {
 
-  protected static int jmxPort = AvailablePortHelper.getRandomAvailableTCPPort();
+  protected static int jmxPort  = AvailablePortHelper.getRandomAvailableTCPPort();
+  static Properties properties = new Properties(){{
+    setProperty(JMX_MANAGER_PORT, jmxPort+"");
+    setProperty(SECURITY_POST_PROCESSOR, SamplePostProcessor.class.getName());
+    setProperty(SECURITY_MANAGER, SampleSecurityManager.class.getName());
+    setProperty("security-json", "org/apache/geode/management/internal/security/cacheServer.json");
+  }};
 
   private HeadlessGfsh gfsh = null;
 
-  @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxPort, "org/apache/geode/management/internal/security/cacheServer.json", SamplePostProcessor.class);
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    ServerStarter serverStarter = new ServerStarter(properties);
+    serverStarter.startServer();
+    serverStarter.cache.createRegionFactory(RegionShortcut.REPLICATE).create("region1");
+  }
 
   @Rule
   public GfshShellConnectionRule gfshConnection = new GfshShellConnectionRule(jmxPort);
@@ -50,7 +66,7 @@ public class GfshCommandsPostProcessorTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testGetPostProcess() throws Exception {
     gfsh.executeCommand("put --region=region1 --key=key1 --value=value1");
     gfsh.executeCommand("put --region=region1 --key=key2 --value=value2");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/de621597/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsSecurityTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsSecurityTest.java
index def1792..960d13d 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsSecurityTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/security/GfshCommandsSecurityTest.java
@@ -16,17 +16,20 @@
  */
 package org.apache.geode.management.internal.security;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
 import java.util.List;
+import java.util.Properties;
 
 import org.apache.shiro.authz.permission.WildcardPermission;
 import org.junit.Before;
-import org.junit.ClassRule;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.internal.AvailablePortHelper;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.management.cli.Result;
@@ -34,6 +37,9 @@ import org.apache.geode.management.internal.cli.HeadlessGfsh;
 import org.apache.geode.management.internal.cli.result.CommandResult;
 import org.apache.geode.management.internal.cli.result.ErrorResultData;
 import org.apache.geode.management.internal.cli.result.ResultBuilder;
+import org.apache.geode.security.templates.SampleSecurityManager;
+import org.apache.geode.test.dunit.rules.ConnectionConfiguration;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
@@ -44,11 +50,21 @@ public class GfshCommandsSecurityTest {
   protected static int jmxPort = ports[0];
   protected static int httpPort = ports[1];
 
+  static Properties properties = new Properties(){{
+    setProperty(JMX_MANAGER_PORT, jmxPort+"");
+    setProperty(HTTP_SERVICE_PORT, httpPort+"");
+    setProperty(SECURITY_MANAGER, SampleSecurityManager.class.getName());
+    setProperty("security-json", "org/apache/geode/management/internal/security/cacheServer.json");
+  }};
+
   private HeadlessGfsh gfsh = null;
 
-  @ClassRule
-  public static JsonAuthorizationCacheStartRule serverRule = new JsonAuthorizationCacheStartRule(
-      jmxPort, httpPort, "org/apache/geode/management/internal/security/cacheServer.json");
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    ServerStarter serverStarter = new ServerStarter(properties);
+    serverStarter.startServer();
+    serverStarter.cache.createRegionFactory(RegionShortcut.REPLICATE).create("region1");
+  }
 
   @Rule
   public GfshShellConnectionRule gfshConnection = new GfshShellConnectionRule(jmxPort, httpPort, false);
@@ -59,67 +75,67 @@ public class GfshCommandsSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "wrongPwd")
+  @ConnectionConfiguration(user = "data-admin", password = "wrongPwd")
   public void testInvalidCredentials() throws Exception {
     assertFalse(gfshConnection.isAuthenticated());
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
+  @ConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testValidCredentials() throws Exception{
     assertTrue(gfshConnection.isAuthenticated());
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-reader", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-reader", password = "1234567")
   public void testClusterReader() throws Exception{
     runCommandsWithAndWithout("CLUSTER:READ");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-writer", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-writer", password = "1234567")
   public void testClusterWriter() throws Exception{
     runCommandsWithAndWithout("CLUSTER:WRITE");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "cluster-manager", password = "1234567")
+  @ConnectionConfiguration(user = "cluster-manager", password = "1234567")
   public void testClusterManager() throws Exception{
     runCommandsWithAndWithout("CLUSTER:MANAGE");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-reader", password = "1234567")
+  @ConnectionConfiguration(user = "data-reader", password = "1234567")
   public void testDataReader() throws Exception{
     runCommandsWithAndWithout("DATA:READ");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-writer", password = "1234567")
+  @ConnectionConfiguration(user = "data-writer", password = "1234567")
   public void testDataWriter() throws Exception{
     runCommandsWithAndWithout("DATA:WRITE");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-manager", password = "1234567")
+  @ConnectionConfiguration(user = "data-manager", password = "1234567")
   public void testDataManager() throws Exception{
     runCommandsWithAndWithout("DATA:MANAGE");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "regionA-reader", password = "1234567")
+  @ConnectionConfiguration(user = "regionA-reader", password = "1234567")
   public void testRegionAReader() throws Exception{
     runCommandsWithAndWithout("DATA:READ:RegionA");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "regionA-writer", password = "1234567")
+  @ConnectionConfiguration(user = "regionA-writer", password = "1234567")
   public void testRegionAWriter() throws Exception{
     runCommandsWithAndWithout("DATA:WRITE:RegionA");
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "regionA-manager", password = "1234567")
+  @ConnectionConfiguration(user = "regionA-manager", password = "1234567")
   public void testRegionAManager() throws Exception{
     runCommandsWithAndWithout("DATA:MANAGE:RegionA");
   }
@@ -168,7 +184,7 @@ public class GfshCommandsSecurityTest {
   }
 
   @Test
-  @JMXConnectionConfiguration(user = "data-user", password = "1234567")
+  @ConnectionConfiguration(user = "data-user", password = "1234567")
   public void testGetPostProcess() throws Exception {
     gfsh.executeCommand("put --region=region1 --key=key2 --value=value2");
     gfsh.executeCommand("put --region=region1 --key=key2 --value=value2");



[39/50] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into feature/GEODE-2019

Posted by kl...@apache.org.
Merge remote-tracking branch 'origin/develop' into feature/GEODE-2019


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/bc060f95
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/bc060f95
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/bc060f95

Branch: refs/heads/feature/GEODE-1930
Commit: bc060f95c1f889fe5c6261006fe8a2591bfe6187
Parents: 139c0a3 7e659b2
Author: Karen Miller <km...@pivotal.io>
Authored: Thu Oct 20 16:37:25 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Thu Oct 20 16:37:25 2016 -0700

----------------------------------------------------------------------
 .../LauncherLifecycleCommandsDUnitTest.java     | 22 ++++++
 .../geode/distributed/ServerLauncher.java       | 78 ++++++++++++++------
 .../membership/gms/membership/GMSJoinLeave.java |  7 +-
 .../cli/commands/LauncherLifecycleCommands.java | 29 +++++++-
 .../internal/cli/commands/ShellCommands.java    | 48 +++---------
 .../internal/cli/i18n/CliStrings.java           | 13 +++-
 .../management/internal/cli/shell/Gfsh.java     | 41 ++++++----
 .../geode.apache.org/schema/cache/cache-1.0.xsd |  4 +
 .../PRColocatedEquiJoinDUnitTest.java           |  2 +
 .../cli/commands/golden-help-offline.properties |  9 +++
 10 files changed, 172 insertions(+), 81 deletions(-)
----------------------------------------------------------------------



[22/50] [abbrv] incubator-geode git commit: GEODE-706 Fixed race condition between expiry thread and put thread.

Posted by kl...@apache.org.
GEODE-706 Fixed race condition between expiry thread and put thread.

There was possibility that expiry thread destroying the entry and
another thread doing update on same key. In this case expiry thread
cancel's existing task and update thread adds expiry task. But this
tasks are refer by regionEntry, which is same for both the threads.
So in this case if expiry thread cancel's task after update thread
then that entry will never expire.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a3bd2566
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a3bd2566
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a3bd2566

Branch: refs/heads/feature/GEODE-1930
Commit: a3bd256648e30b0bf04c565a8f21d00868c29806
Parents: f1df6fc
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Fri Oct 14 14:00:25 2016 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Mon Oct 17 14:30:46 2016 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/AbstractRegionMap.java  |  4 ++--
 .../geode/internal/cache/EntryEventImpl.java     |  9 +++++++++
 .../geode/internal/cache/EntryExpiryTask.java    |  3 ++-
 .../apache/geode/internal/cache/LocalRegion.java | 19 +++++++++++++++----
 4 files changed, 28 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a3bd2566/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
index 5861e9a..e02c7e1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
@@ -1513,9 +1513,9 @@ public abstract class AbstractRegionMap implements RegionMap {
           } finally {
             if (opCompleted) {
               if (re != null) {
-                owner.cancelExpiryTask(re);
+                owner.cancelExpiryTask(re, event.getExpiryTask());
               } else if (tombstone != null) {
-                owner.cancelExpiryTask(tombstone);
+                owner.cancelExpiryTask(tombstone, event.getExpiryTask());
               }
             }
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a3bd2566/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
index 6a964c0..d059aab 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
@@ -73,6 +73,7 @@ public class EntryEventImpl
   // PACKAGE FIELDS //
   public transient LocalRegion region;
   private transient RegionEntry re;
+  private transient ExpiryTask expiryTask;
 
   protected KeyInfo keyInfo;
 
@@ -2853,4 +2854,12 @@ public class EntryEventImpl
   public boolean isOldValueOffHeap() {
     return isOffHeapReference(this.oldValue);
   }
+
+  public ExpiryTask getExpiryTask() {
+    return expiryTask;
+  }
+
+  public void setExpiryTask(ExpiryTask expiryTask) {
+    this.expiryTask = expiryTask;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a3bd2566/geode-core/src/main/java/org/apache/geode/internal/cache/EntryExpiryTask.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryExpiryTask.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryExpiryTask.java
index 816f32f..0c20d32 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryExpiryTask.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryExpiryTask.java
@@ -117,6 +117,7 @@ public class EntryExpiryTask extends ExpiryTask {
     @Released EntryEventImpl event = EntryEventImpl.create(
         lr, Operation.EXPIRE_DESTROY, key, null,
         createExpireEntryCallback(lr, key), false, lr.getMyId());
+    event.setExpiryTask(this);
     try {
     event.setPendingSecondaryExpireDestroy(isPending);
     if (lr.generateEventID()) {
@@ -229,7 +230,7 @@ public class EntryExpiryTask extends ExpiryTask {
     // so the next call to addExpiryTaskIfAbsent will
     // add a new task instead of doing nothing, which would
     // erroneously cancel expiration for this key.
-    getLocalRegion().cancelExpiryTask(this.re);
+    getLocalRegion().cancelExpiryTask(this.re, null);
     getLocalRegion().performExpiryTimeout(this);
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a3bd2566/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index a6951de..ac4c705 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -8717,14 +8717,25 @@ public class LocalRegion extends AbstractRegion
       }
     }
   }
+  
+  void cancelExpiryTask(RegionEntry re) {
+    cancelExpiryTask(re, null);
+  }
 
-  void cancelExpiryTask(RegionEntry re)
+  void cancelExpiryTask(RegionEntry re, ExpiryTask expiryTask)
   {
-    EntryExpiryTask oldTask = this.entryExpiryTasks.remove(re);
-    if (oldTask != null) {
-      if (oldTask.cancel()) {
+    if (expiryTask != null) {
+      this.entryExpiryTasks.remove(re, expiryTask);
+      if (expiryTask.cancel()) {
         this.cache.getExpirationScheduler().incCancels();
       }
+    } else {
+      EntryExpiryTask oldTask = this.entryExpiryTasks.remove(re);
+      if (oldTask != null) {
+        if (oldTask.cancel()) {
+          this.cache.getExpirationScheduler().incCancels();
+        }
+      }
     }
   }
 


[12/50] [abbrv] incubator-geode git commit: GEODE-1952 Move docs build README; edit build instructions

Posted by kl...@apache.org.
GEODE-1952 Move docs build README; edit build instructions


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a0de4c93
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a0de4c93
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a0de4c93

Branch: refs/heads/feature/GEODE-1930
Commit: a0de4c93f53dc51b1b1ca3489c66f3f8e0a62fa3
Parents: d573de2
Author: Joey McAllister <jm...@pivotal.io>
Authored: Fri Oct 14 12:13:15 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Fri Oct 14 14:51:04 2016 -0700

----------------------------------------------------------------------
 geode-book/README.md     | 61 ++++++++++++++++++++++++++++++++++++++++++-
 geode-docs/CONTRIBUTE.md |  8 +++---
 geode-docs/README.md     | 53 -------------------------------------
 3 files changed, 63 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a0de4c93/geode-book/README.md
----------------------------------------------------------------------
diff --git a/geode-book/README.md b/geode-book/README.md
index a1c1602..198becc 100644
--- a/geode-book/README.md
+++ b/geode-book/README.md
@@ -1 +1,60 @@
-For information about writing, editing, building, and publishing the Apache Geode documentation, please view the `README.md` and `CONTRIBUTING.md` files in the `geode-docs` directory.
\ No newline at end of file
+# Apache Geode End-User Documentation
+
+Apache Geode provides the full source for end-user documentation in markdown format (see `../geode-docs/CONTRIBUTE.md`). The latest check-ins to `incubator-geode/geode-docs` are regularly built and published to http://geode.incubator.apache.org/docs/. Users can build the markdown into an HTML user guide using [Bookbinder](https://github.com/pivotal-cf/bookbinder) and the instructions below.
+
+Bookbinder is a Ruby gem that binds  a unified documentation web application from markdown, html, and/or DITA source material. The source material for bookbinder must be stored either in local directories or in GitHub repositories. Bookbinder runs [middleman](http://middlemanapp.com/) to produce a Rackup app that can be deployed locally or as a Web application.
+
+This document contains instructions for building and viewing the Geode documentation locally.
+
+- [Prerequisites](#prerequisites)
+- [Bookbinder Usage](#bookbinder-usage)
+- [Building the Documentation](#building-the-documentation)
+
+## Prerequisites
+
+Bookbinder requires Ruby version 2.0.0-p195 or higher.
+
+Follow the instructions below to install Bookbinder:
+
+1. Add gem "bookbindery" to your Gemfile.
+2. Run `bundle install` to install the dependencies specified in your Gemfile.
+
+## Bookbinder Usage
+
+Bookbinder is meant to be used from within a project called a **book**. The book includes a configuration file that describes which documentation repositories to use as source materials. Bookbinder provides a set of scripts to aggregate those repositories and publish them to various locations.
+
+For Geode, a preconfigured **book** is provided in the directory `geode-book`, which gathers content from the directory `geode-docs`. You can use this configuration to build HTML for Geode on your local system.
+
+The installed `config.yml` file configures the Geode book for building locally. The file configures the local directory for the markdown source files.
+
+## Building the Documentation
+
+1. The GemFile in the `geode-book` directory already defines the `gem "bookbindery"` dependency. Make sure you are in the `geode-book` directory and enter:
+
+```
+  $ bundle install
+```
+
+   Note: You will not have to run `bundle install` on subsequent builds.
+
+2. To build the documentation locally using the installed `config.yml` file, enter:
+
+```
+  $ bundle exec bookbinder bind local
+```
+   Bookbinder converts the markdown source into HTML, which it puts in the `final_app` directory.
+
+3. Navigate to the `geode-book/final_app/` and enter:
+
+  ```
+  $ bundle install
+  ```
+   Note: You will not have to run `bundle install` on subsequent builds.
+
+4. To start the website locally, enter:
+
+  ```
+  $ rackup
+  ```
+
+   You can now view the local documentation at <http://localhost:9292>. 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a0de4c93/geode-docs/CONTRIBUTE.md
----------------------------------------------------------------------
diff --git a/geode-docs/CONTRIBUTE.md b/geode-docs/CONTRIBUTE.md
index 979474e..8ff2502 100644
--- a/geode-docs/CONTRIBUTE.md
+++ b/geode-docs/CONTRIBUTE.md
@@ -6,6 +6,8 @@ Apache Geode welcomes your contributions to the community's documentation effort
 - [Working with Images and Graphics](#working-with-images-and-graphics)
 - [Writing Guidelines](#writing-guidelines)
 
+For instructions on building the documentation locally, see `../geode-book/README.md`.
+
 ## Working with Markdown Files
 
 You can edit markdown files in any text editor. For more, read [Daring Fireball's Markdown Syntax page](https://daringfireball.net/projects/markdown/syntax).
@@ -23,13 +25,9 @@ The Wikipedia page [Comparison of Vector Graphics Editors](http://en.wikipedia.o
 The most important advice we can provide for working with the Apache Geode docs is to spend some time becoming familiar with the existing source files and the structure of the project directory. In particular, note the following conventions and tips:
 
 - Top-level subdirectories organize topics into "books": basic_config, configuring, developing, etc.
-
 - Use lowercase characters for all file and directory names. Separate words in filenames with an underscore (`_`) character.
-
 - Use the `.md` file extension for topic files.
-
 - Add new topics to the existing directories by subject type. Only create a new directory if you are starting a new subject or a new book.
-
 - To start a new topic, you can make a copy of an existing file with similar content and edit it.
-
 - Use the appropriate document type for the content you are writing. Create multiple topics if you are writing overview, procedural, and reference content.
+- To edit elements in the navigation pane (the "subnav") that appears on the left side of the documentation, navigate to `../geode-book/master_middleman/source/subnavs/geode-subnav.md`.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a0de4c93/geode-docs/README.md
----------------------------------------------------------------------
diff --git a/geode-docs/README.md b/geode-docs/README.md
deleted file mode 100644
index 9fb5870..0000000
--- a/geode-docs/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Apache Geode End-User Documentation
-
-Apache Geode provides the full source for end-user documentation in markdown format. The latest check-ins to `incubator-geode/geode-docs` are regularly built and published to http://geode.incubator.apache.org/docs/. Users can build the markdown into an HTML user guide using [Bookbinder](https://github.com/pivotal-cf/bookbinder) and the instructions below.
-
-Bookbinder is a Ruby gem that binds  a unified documentation web application from markdown, html, and/or DITA source material. The source material for bookbinder must be stored either in local directories or in GitHub repositories. Bookbinder runs [middleman](http://middlemanapp.com/) to produce a Rackup app that can be deployed locally or as a Web application.
-
-This document contains instructions for building and viewing the Geode documentation locally.
-
-- [Prerequisites](#prerequisites)
-- [Bookbinder Usage](#bookbinder-usage)
-- [Building the Documentation](#building-the-documentation)
-
-## Prerequisites
-
-Bookbinder requires Ruby version 2.0.0-p195 or higher.
-
-Follow the instructions below to install Bookbinder:
-
-1. Add gem "bookbindery" to your Gemfile.
-2. Run `bundle install` to install the dependencies specified in your Gemfile.
-
-## Bookbinder Usage
-
-Bookbinder is meant to be used from within a project called a **book**. The book includes a configuration file that describes which documentation repositories to use as source materials. Bookbinder provides a set of scripts to aggregate those repositories and publish them to various locations.
-
-For Geode, a preconfigured **book** is provided in the directory `geode-book`, which gathers content from the directory `geode-docs`. You can use this configuration to build HTML for Geode on your local system.
-
-The installed `config.yml` file configures the Geode book for building locally. The file configures the local directory for the markdown source files.
-
-## Building the Documentation
-
-1. The GemFile in the `geode-book` directory already defines the `gem "bookbindery"` dependency. Make sure you are in the `geode-book` directory and enter:
-
-  ```
-  $ bundle install
-  ```
-
-2. To build the files locally using the installed `config.yml` file, enter:
-
-  ```
-  $ bundle exec bookbinder bind local
-  ```
-
-  Bookbinder converts the markdown source into HTML, which it puts in the `final_app` directory.
-
-3. To view the local documentation, do the following:
-
-  ```
-  $ cd final_app
-  $ rackup
-  ```
-
-  You can now view the local documentation at <http://localhost:9292>.


[35/50] [abbrv] incubator-geode git commit: GEODE-2019 Add automated rebalance documentation

Posted by kl...@apache.org.
GEODE-2019  Add automated rebalance documentation


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f1be596a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f1be596a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f1be596a

Branch: refs/heads/feature/GEODE-1930
Commit: f1be596a322911525908524663cc0e5cad17a2bb
Parents: 11ef3eb
Author: Karen Miller <km...@pivotal.io>
Authored: Thu Oct 20 10:23:12 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Thu Oct 20 10:23:12 2016 -0700

----------------------------------------------------------------------
 .../source/subnavs/geode-subnav.erb             |  3 +
 .../automated_rebalance.html.md.erb             | 66 ++++++++++++++++++++
 .../chapter_overview.html.md.erb                |  5 ++
 3 files changed, 74 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1be596a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index 53e9118..2373f4b 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -959,6 +959,9 @@ limitations under the License.
                                 <a href="/docs/developing/partitioned_regions/rebalancing_pr_data.html">Rebalancing Partitioned Region Data</a>
                             </li>
                             <li>
+                                <a href="/docs/developing/partitioned_regions/automated_rebalance.html">Automated Rebalancing of Partitioned Region Data</a>
+                            </li>
+                            <li>
                                 <a href="/docs/developing/partitioned_regions/checking_region_redundancy.html">Checking Redundancy in Partitioned Regions</a>
                             </li>
                             <li>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1be596a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
new file mode 100644
index 0000000..d4ca2a6
--- /dev/null
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -0,0 +1,66 @@
+---
+title:  Automated Rebalance
+---
+
+Automated rebalance triggers a rebalance
+(see [Rebalancing Partitioned Region Data](rebalancing_pr_data.html))
+operation based on a
+time schedule.
+At the scheduled intervals, the balance of the partitioned regions
+is evaluated based on configured criteria.
+One criterion is a minimum threshhold for number of bytes that
+would be transferred if the rebalance takes place.
+The other criterion uses the ratio of the number of bytes
+that would be transferred to the total number of bytes in the
+regions.
+If the evaluation indicates the system is out of balance,
+the rebalance transfer is initiated.
+
+To enable automated rebalance, specify the rebalance manager
+in the `<initializer>` attribute within the `<cache>` configuration
+of the `cache.xml` file:
+
+``` pre
+<class-name> org.apache.geode.cache.util.AutoBalancer </class-name>
+```
+
+The time schedule that triggers an evaluation and possible rebalance
+uses a cron-based specification in 
+the `<initializer>` attribute within the `<cache>` configuration 
+of the `cache.xml` file.
+This scheduling specification is required.
+Specify the cron expression in the Spring format.
+This example specification triggers each Saturday at 3am:
+
+``` pre
+<parameter name="schedule"> 0 0 3 ? * SAT </parameter>
+```
+
+This example specification triggers once each day at 4am:
+
+``` pre
+<parameter name="schedule"> 0 0 4 * * ?</parameter>
+```
+
+The automated rebalance specifications that specify criteria for
+triggering the rebalance are optional and have reasonable default values.
+
+One criterion is a minimum number of bytes that would be transferred
+if the rebalance were to take place.
+The specification is in units of bytes; here is the specification
+for the default value of 100MB:
+
+``` pre
+<parameter name="minimum-size"> 104857600</parameter>
+```
+
+Another criterion represents the ratio of bytes that would be transferred
+to the total number of bytes in the partitioned regions,
+represented as an integer percentage.
+The default is 10 percent.
+This example specificies 15 percent:
+ 
+``` pre
+<parameter name="size-threshold-percent"> 15 </parameter>
+```
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1be596a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb b/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
index c92921b..e450ee5 100644
--- a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
@@ -49,6 +49,11 @@ In addition to basic region management, partitioned regions include options for
 
     In a distributed system with minimal contention to the concurrent threads reading or updating from the members, you can use rebalancing to dynamically increase or decrease your data and processing capacity.
 
+- **[Automated Rebalancing of Partitioned Region Data](../../developing/partitioned_regions/automated_rebalance.html)**
+
+    The automated rebalance feature triggers a rebalance operation
+based on a time schedule.
+
 -   **[Checking Redundancy in Partitioned Regions](../../developing/partitioned_regions/checking_region_redundancy.html)**
 
     Under some circumstances, it can be important to verify that your partitioned region data is redundant and that upon member restart, redundancy has been recovered properly across partitioned region members.


[38/50] [abbrv] incubator-geode git commit: GEODE-2019 Add automated rebalance documentation

Posted by kl...@apache.org.
GEODE-2019 Add automated rebalance documentation

Revise content header to match the header in the subnav.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/139c0a36
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/139c0a36
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/139c0a36

Branch: refs/heads/feature/GEODE-1930
Commit: 139c0a36ff0aba3c5f8f9b964d0109d18a10ca6e
Parents: f1be596
Author: Karen Miller <km...@pivotal.io>
Authored: Thu Oct 20 14:18:27 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Thu Oct 20 14:18:27 2016 -0700

----------------------------------------------------------------------
 .../developing/partitioned_regions/automated_rebalance.html.md.erb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/139c0a36/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
index d4ca2a6..387275d 100644
--- a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -1,5 +1,5 @@
 ---
-title:  Automated Rebalance
+title:  Automated Rebalancing of Partitioned Region Data
 ---
 
 Automated rebalance triggers a rebalance


[10/50] [abbrv] incubator-geode git commit: Fix README numbering, anchors

Posted by kl...@apache.org.
Fix README numbering, anchors


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/d8afffb5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/d8afffb5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/d8afffb5

Branch: refs/heads/feature/GEODE-1930
Commit: d8afffb508856d29a2a68c8555259d961e67d77f
Parents: 952e7e3
Author: Joey McAllister <jm...@pivotal.io>
Authored: Wed Oct 12 14:49:46 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Fri Oct 14 14:51:03 2016 -0700

----------------------------------------------------------------------
 geode-docs/README.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d8afffb5/geode-docs/README.md
----------------------------------------------------------------------
diff --git a/geode-docs/README.md b/geode-docs/README.md
index 9564079..9f83260 100644
--- a/geode-docs/README.md
+++ b/geode-docs/README.md
@@ -6,9 +6,9 @@ Bookbinder is a Ruby gem that binds  a unified documentation web application fro
 
 This document contains instructions for building and viewing the Geode documentation locally.
 
-- [Prerequisites](#prereq)
-- [Bookbinder Usage](#usage)
-- [Building the Documentation](#building)
+- [Prerequisites](#prerequisites)
+- [Bookbinder Usage](#bookbinder-usage)
+- [Building the Documentation](#building-the-documentation)
 
 ## Prerequisites
 
@@ -43,7 +43,7 @@ The installed `config.yml` file configures the Geode book for building locally.
 
   Bookbinder converts the markdown source into HTML, which it puts in the `final_app` directory.
 
-5. To view the local documentation, do the following:
+3. To view the local documentation, do the following:
 
   ```
   $ cd final_app


[40/50] [abbrv] incubator-geode git commit: GEODE-2019 Adding missing apache license header

Posted by kl...@apache.org.
GEODE-2019 Adding missing apache license header


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/af55d929
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/af55d929
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/af55d929

Branch: refs/heads/feature/GEODE-1930
Commit: af55d9292c2eacf9db43909b9c68c32b927b21de
Parents: bc060f9
Author: Dan Smith <up...@apache.org>
Authored: Thu Oct 20 17:11:25 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 20 17:11:25 2016 -0700

----------------------------------------------------------------------
 .../automated_rebalance.html.md.erb                 | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/af55d929/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
index 387275d..37b7dce 100644
--- a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -1,6 +1,22 @@
 ---
 title:  Automated Rebalancing of Partitioned Region Data
 ---
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
 
 Automated rebalance triggers a rebalance
 (see [Rebalancing Partitioned Region Data](rebalancing_pr_data.html))


[37/50] [abbrv] incubator-geode git commit: GEODE-388: Marking dynamic-region-factory as deprecated in the xml.

Posted by kl...@apache.org.
GEODE-388: Marking dynamic-region-factory as deprecated in the xml.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/59df3d93
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/59df3d93
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/59df3d93

Branch: refs/heads/feature/GEODE-1930
Commit: 59df3d93e7c51e43685356de82b074531966015f
Parents: b2e7768
Author: Dan Smith <up...@apache.org>
Authored: Wed Oct 19 10:49:07 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 20 13:09:12 2016 -0700

----------------------------------------------------------------------
 .../META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/59df3d93/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
----------------------------------------------------------------------
diff --git a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
index adf734c..d3d83f1 100755
--- a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
+++ b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
@@ -1245,7 +1245,11 @@ As of 6.5 disk-dirs is deprecated on region-attributes. Use disk-store-name inst
   </xsd:complexType>
   <xsd:complexType name="dynamic-region-factory-type">
     <xsd:annotation>
+      <xsd:appinfo>deprecated</xsd:appinfo>
       <xsd:documentation>
+        dynamic-region-factory is deprecated. Use functions to create regions dynamically
+        instead.
+
         A "dynamic-region-factory" element configures a dynamic region factory for
         this cache. If this optional element is missing then the cache does not
         support dynamic regions.


[02/50] [abbrv] incubator-geode git commit: GEODE-1999: Fix offheap memory leak when exception is thrown during basicDestroy call to remove GatewaySenderEventImpl from the sender queue

Posted by kl...@apache.org.
GEODE-1999: Fix offheap memory leak when exception is thrown during basicDestroy call to remove GatewaySenderEventImpl from the sender queue

Using try and finally to make sure the offheap reference will be released.
Make similar changes for the parrellel wan queue as well.
Also release offheap memory if a virtualPut failed to put the GatewaySenderEvent into the sender queue.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/08adacd2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/08adacd2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/08adacd2

Branch: refs/heads/feature/GEODE-1930
Commit: 08adacd2cfb93533ec016a82a0f71d7110e1819d
Parents: 582694d
Author: eshu <es...@pivotal.io>
Authored: Thu Oct 13 10:44:53 2016 -0700
Committer: eshu <es...@pivotal.io>
Committed: Thu Oct 13 10:44:53 2016 -0700

----------------------------------------------------------------------
 .../cache/AbstractBucketRegionQueue.java        | 34 +++++------
 .../geode/internal/cache/BucketRegionQueue.java | 59 +++++++++++---------
 .../wan/serial/SerialGatewaySenderQueue.java    | 26 ++++++---
 3 files changed, 68 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/08adacd2/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
index 8fa8597..7ae1249 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
@@ -357,31 +357,31 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
       boolean ifOld, Object expectedOldValue, boolean requireOldValue,
       long lastModified, boolean overwriteDestroyed) throws TimeoutException,
       CacheWriterException {
-    boolean success = super.virtualPut(event, ifNew, ifOld, expectedOldValue,
-        requireOldValue, lastModified, overwriteDestroyed);
-    if (success) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("Key : ----> {}", event.getKey());
+    try {
+      boolean success = super.virtualPut(event, ifNew, ifOld, expectedOldValue,
+          requireOldValue, lastModified, overwriteDestroyed);
+      if (success) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("Key : ----> {}", event.getKey());
+        }      
+      } else {
+        GatewaySenderEventImpl.release(event.getRawNewValue());
       }
-      //@Unretained Object ov = event.getRawOldValue();
-      //if (ov instanceof GatewaySenderEventImpl) {
-      //  ((GatewaySenderEventImpl)ov).release();
-      //}
-       GatewaySenderEventImpl.release(event.getRawOldValue());
+      return success;
+    } finally {
+      GatewaySenderEventImpl.release(event.getRawOldValue());
     }
-    return success;
     
   }
   @Override
   protected void basicDestroy(final EntryEventImpl event,
       final boolean cacheWrite, Object expectedOldValue)
       throws EntryNotFoundException, CacheWriterException, TimeoutException {
-    super.basicDestroy(event, cacheWrite, expectedOldValue);
-    //@Unretained Object rov = event.getRawOldValue();
-    //if (rov instanceof GatewaySenderEventImpl) {
-    //  ((GatewaySenderEventImpl) rov).release();
-    //}
-	GatewaySenderEventImpl.release(event.getRawOldValue());
+    try {
+      super.basicDestroy(event, cacheWrite, expectedOldValue);
+    } finally {
+      GatewaySenderEventImpl.release(event.getRawOldValue());
+    }
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/08adacd2/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
index 294b616..ecc659a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
@@ -257,34 +257,38 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
       boolean ifOld, Object expectedOldValue, boolean requireOldValue,
       long lastModified, boolean overwriteDestroyed) throws TimeoutException,
       CacheWriterException {
-    boolean success = super.virtualPut(event, ifNew, ifOld, expectedOldValue,
-        requireOldValue, lastModified, overwriteDestroyed);
-
-    if (success) {
-      GatewaySenderEventImpl.release(event.getRawOldValue());
-
-      if (getPartitionedRegion().getColocatedWith() == null) {
-        return success;
-      }
-
-      if (getPartitionedRegion().isConflationEnabled() && this.getBucketAdvisor().isPrimary()) {
-        Object object = event.getNewValue();
-        Long key = (Long)event.getKey();
-        if (object instanceof Conflatable) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("Key :{} , Object : {} is conflatable", key, object);
-          }
-          // TODO: TO optimize by destroying on primary and secondary separately
-          // in case of conflation
-          conflateOldEntry((Conflatable)object, key);
-        } else {
-          if (logger.isDebugEnabled()) {
-            logger.debug("Object : {} is not conflatable", object);
+    try {
+      boolean success = super.virtualPut(event, ifNew, ifOld, expectedOldValue,
+          requireOldValue, lastModified, overwriteDestroyed);
+  
+      if (success) {
+        if (getPartitionedRegion().getColocatedWith() == null) {
+          return success;
+        }
+  
+        if (getPartitionedRegion().isConflationEnabled() && this.getBucketAdvisor().isPrimary()) {
+          Object object = event.getNewValue();
+          Long key = (Long)event.getKey();
+          if (object instanceof Conflatable) {
+            if (logger.isDebugEnabled()) {
+              logger.debug("Key :{} , Object : {} is conflatable", key, object);
+            }
+            // TODO: TO optimize by destroying on primary and secondary separately
+            // in case of conflation
+            conflateOldEntry((Conflatable)object, key);
+          } else {
+            if (logger.isDebugEnabled()) {
+              logger.debug("Object : {} is not conflatable", object);
+            }
           }
         }
+      } else {
+        GatewaySenderEventImpl.release(event.getRawNewValue());
       }
+      return success;
+    } finally {
+      GatewaySenderEventImpl.release(event.getRawOldValue());
     }
-    return success;
   }
 
   private void conflateOldEntry(Conflatable object, Long tailKey) {
@@ -357,9 +361,12 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
     if (getPartitionedRegion().isConflationEnabled()) {
       removeIndex((Long)event.getKey());
     }
-    super.basicDestroy(event, cacheWrite, expectedOldValue);
+    try {
+      super.basicDestroy(event, cacheWrite, expectedOldValue);
+    } finally {
+      GatewaySenderEventImpl.release(event.getRawOldValue());
+    }
 
-    GatewaySenderEventImpl.release(event.getRawOldValue());
     // Primary buckets should already remove the key while peeking
     if (!this.getBucketAdvisor().isPrimary()) {
       if (logger.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/08adacd2/geode-core/src/main/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueue.java
index 79b9d86..a22666c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueue.java
@@ -1301,22 +1301,32 @@ public class SerialGatewaySenderQueue implements RegionQueue {
     protected void basicDestroy(final EntryEventImpl event,
         final boolean cacheWrite, Object expectedOldValue)
         throws EntryNotFoundException, CacheWriterException, TimeoutException {
-
-      super.basicDestroy(event, cacheWrite, expectedOldValue);
-      GatewaySenderEventImpl.release(event.getRawOldValue());
+      try {
+        super.basicDestroy(event, cacheWrite, expectedOldValue);
+      } finally {
+        GatewaySenderEventImpl.release(event.getRawOldValue());
+      }
     }
     @Override
     protected boolean virtualPut(EntryEventImpl event, boolean ifNew,
         boolean ifOld, Object expectedOldValue, boolean requireOldValue,
         long lastModified, boolean overwriteDestroyed) throws TimeoutException,
         CacheWriterException {
-      boolean success = super.virtualPut(event, ifNew, ifOld, expectedOldValue,
-          requireOldValue, lastModified, overwriteDestroyed);
-
-      if (success) {
+      try {
+        boolean success = super.virtualPut(event, ifNew, ifOld, expectedOldValue,
+            requireOldValue, lastModified, overwriteDestroyed);
+        if (!success) {
+          //release offheap reference if GatewaySenderEventImpl is not put into 
+          //the region queue
+          GatewaySenderEventImpl.release(event.getRawNewValue());
+        }
+        return success;
+      } finally {
+        //GatewaySenderQueue probably only adding new events into the queue.
+        //Add the finally block just in case if there actually is an update 
+        //in the sender queue or occurs in the the future.
         GatewaySenderEventImpl.release(event.getRawOldValue());
       }
-      return success;
     }
   }
 }


[27/50] [abbrv] incubator-geode git commit: GEODE-2011: add FlakyTest to category to testNonPersistentServerRestartAutoSerializer

Posted by kl...@apache.org.
GEODE-2011: add FlakyTest to category to testNonPersistentServerRestartAutoSerializer


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5c50954d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5c50954d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5c50954d

Branch: refs/heads/feature/GEODE-1930
Commit: 5c50954d12644b95739ee0e754852ea0f88524b8
Parents: a53c4b1
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Oct 17 13:56:30 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Oct 17 16:30:20 2016 -0700

----------------------------------------------------------------------
 .../test/java/org/apache/geode/pdx/PdxClientServerDUnitTest.java   | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c50954d/geode-core/src/test/java/org/apache/geode/pdx/PdxClientServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/pdx/PdxClientServerDUnitTest.java b/geode-core/src/test/java/org/apache/geode/pdx/PdxClientServerDUnitTest.java
index 1afb1ad..22c6571 100644
--- a/geode-core/src/test/java/org/apache/geode/pdx/PdxClientServerDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/pdx/PdxClientServerDUnitTest.java
@@ -54,6 +54,7 @@ import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.categories.FlakyTest;
 
 @Category(DistributedTest.class)
 public class PdxClientServerDUnitTest extends JUnit4CacheTestCase {
@@ -187,6 +188,7 @@ public class PdxClientServerDUnitTest extends JUnit4CacheTestCase {
    * Test of bug 47338 - what happens to the client type
    * registry if the server is restarted.
    */
+  @Category(FlakyTest.class) // GEODE-2011
   @Test
   public void testNonPersistentServerRestartAutoSerializer() {
     Host host = Host.getHost(0);


[04/50] [abbrv] incubator-geode git commit: GEODE-1991: Removing sleeps from HARegionQueueJUnitTest

Posted by kl...@apache.org.
GEODE-1991: Removing sleeps from HARegionQueueJUnitTest

Getting rid of a bunch of sleeps in HARegionQueueJUnitTest to fix a
bunch of tests with race conditions. Tests of expiration were sleeping
for short amounts of time and then asserting that expiration happened or
didn't. Changing these sleeps to use Awailitily.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c2ddc96c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c2ddc96c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c2ddc96c

Branch: refs/heads/feature/GEODE-1930
Commit: c2ddc96c9e1bbbfaec156e1a9985979bda7b3e36
Parents: 08adacd
Author: Dan Smith <up...@apache.org>
Authored: Tue Oct 11 16:46:18 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 13 11:13:18 2016 -0700

----------------------------------------------------------------------
 .../ha/BlockingHARegionQueueJUnitTest.java      | 182 ++++++--------
 .../cache/ha/HARegionQueueJUnitTest.java        | 252 +++++++------------
 2 files changed, 172 insertions(+), 262 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c2ddc96c/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
index 48fb3a2..436cc0c 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
@@ -22,6 +22,9 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import com.jayway.awaitility.Awaitility;
 
 import org.junit.Ignore;
 import org.junit.Test;
@@ -82,39 +85,30 @@ public class BlockingHARegionQueueJUnitTest extends HARegionQueueJUnitTest
    * 
    */
   @Test
-  public void testBlockingPutAndTake()
+  public void testBlockingPutAndTake() throws InterruptedException, IOException, ClassNotFoundException
   {
-    try {
-      HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
-      hrqa.setBlockingQueueCapacity(1);
-      final HARegionQueue hrq = this.createHARegionQueue("testBlockingPutAndTake",
-          hrqa);
-      hrq.setPrimary(true);//fix for 40314 - capacity constraint is checked for primary only.
-      EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
-      hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
-      Thread t1 = new Thread(new Runnable() {
-        public void run() {
-          try{
-          EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
-          hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
-          }catch(Exception e) {
-            encounteredException=true;
-          }
+    HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
+    hrqa.setBlockingQueueCapacity(1);
+    final HARegionQueue hrq = this.createHARegionQueue("testBlockingPutAndTake",
+        hrqa);
+    hrq.setPrimary(true);//fix for 40314 - capacity constraint is checked for primary only.
+    EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
+    hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
+    Thread t1 = new Thread(new Runnable() {
+      public void run() {
+        try{
+        EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
+        hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
+        }catch(Exception e) {
+          encounteredException=true;
         }
-      });
-      t1.start();
-      Thread.sleep(4000);
-      assertTrue(t1.isAlive());
-      Conflatable conf = (Conflatable)hrq.take();
-      assertNotNull(conf);
-      Thread.sleep(2000);
-      assertTrue(!t1.isAlive());      
-
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      fail("Test failed because of exception " + e);
-    }
+      }
+    });
+    t1.start();
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> t1.isAlive());
+    Conflatable conf = (Conflatable)hrq.take();
+    assertNotNull(conf);
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !t1.isAlive());
   }
 
   /**
@@ -123,45 +117,37 @@ public class BlockingHARegionQueueJUnitTest extends HARegionQueueJUnitTest
    * 
    */
   @Test
-  public void testBlockingPutAndPeekRemove()
+  public void testBlockingPutAndPeekRemove() throws InterruptedException, IOException, ClassNotFoundException
   {
-    try {
-      HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
-      hrqa.setBlockingQueueCapacity(1);
-      final HARegionQueue hrq = this.createHARegionQueue(
-          "testBlockingPutAndPeekRemove", hrqa);
-      hrq.setPrimary(true);//fix for 40314 - capacity constraint is checked for primary only.
-      EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
-      hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
-      Thread t1 = new Thread(new Runnable() {
-        public void run()
-        {
-          try {
-            EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
-            hrq
-                .put(new ConflatableObject("key1", "val2", id2, false,
-                    "testing"));
-          }
-          catch (Exception e) {
-            encounteredException = true;
-          }
+    HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
+    hrqa.setBlockingQueueCapacity(1);
+    final HARegionQueue hrq = this.createHARegionQueue(
+        "testBlockingPutAndPeekRemove", hrqa);
+    hrq.setPrimary(true);//fix for 40314 - capacity constraint is checked for primary only.
+    EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
+    hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
+    Thread t1 = new Thread(new Runnable() {
+      public void run()
+      {
+        try {
+          EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
+          hrq
+              .put(new ConflatableObject("key1", "val2", id2, false,
+                  "testing"));
         }
-      });
-      t1.start();
-      Thread.sleep(4000);
-      assertTrue("put-thread expected to blocked, but was not ", t1.isAlive());
-      Conflatable conf = (Conflatable)hrq.peek();
-      assertNotNull(conf);
-      hrq.remove();
-      Thread.sleep(2000);
-      assertFalse("Put-thread blocked unexpectedly", t1.isAlive());
-      assertFalse("Exception occured in put-thread", encounteredException);
+        catch (Exception e) {
+          encounteredException = true;
+        }
+      }
+    });
+    t1.start();
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> t1.isAlive());
+    Conflatable conf = (Conflatable)hrq.peek();
+    assertNotNull(conf);
+    hrq.remove();
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !t1.isAlive());
+    assertFalse("Exception occured in put-thread", encounteredException);
 
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      fail("Test failed because of exception " + e);
-    }
   }
 
   /**
@@ -173,42 +159,36 @@ public class BlockingHARegionQueueJUnitTest extends HARegionQueueJUnitTest
   //expiry is not applicable on primary so marking this test as invalid.
   @Ignore
   @Test
-  public void testBlockingPutAndExpiry()
+  public void testBlockingPutAndExpiry() throws InterruptedException, IOException, ClassNotFoundException
   {
-    try {
-      HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
-      hrqa.setBlockingQueueCapacity(1);
-      hrqa.setExpiryTime(4);
-      final HARegionQueue hrq = this.createHARegionQueue(
-          "testBlockingPutAndExpiry", hrqa);
-      
-      EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
-      hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
-      Thread t1 = new Thread(new Runnable() {
-        public void run()
-        {
-          try {
-            EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
-            hrq
-                .put(new ConflatableObject("key1", "val2", id2, false,
-                    "testing"));
-          }
-          catch (Exception e) {
-            encounteredException = true;
-          }
+    HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
+    hrqa.setBlockingQueueCapacity(1);
+    hrqa.setExpiryTime(1);
+    final HARegionQueue hrq = this.createHARegionQueue(
+        "testBlockingPutAndExpiry", hrqa);
+
+    EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
+    long start = System.currentTimeMillis();
+    hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
+    Thread t1 = new Thread(new Runnable() {
+      public void run()
+      {
+        try {
+          EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
+          hrq
+              .put(new ConflatableObject("key1", "val2", id2, false,
+                  "testing"));
         }
-      });
-      t1.start();
-      Thread.sleep(2000);
-      assertTrue("put-thread expected to blocked, but was not ", t1.isAlive());
-      Thread.sleep(2500);
+        catch (Exception e) {
+          encounteredException = true;
+        }
+      }
+    });
+    t1.start();
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> t1.isAlive());
+    waitAtLeast(1000, start, () -> {
       assertFalse("Put-thread blocked unexpectedly", t1.isAlive());
-      assertFalse("Exception occured in put-thread", encounteredException);
-
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      fail("Test failed because of exception " + e);
-    }
+    });
+    assertFalse("Exception occured in put-thread", encounteredException);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c2ddc96c/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
index 3704758..a161b12 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
@@ -20,6 +20,7 @@ import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.junit.Assert.*;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -29,6 +30,9 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.TimeUnit;
+
+import com.jayway.awaitility.Awaitility;
 
 import org.junit.After;
 import org.junit.Before;
@@ -436,49 +440,31 @@ public class HARegionQueueJUnitTest {
    * tests whether expiry of entry in the regin queue occurs as expected
    */
   @Test
-  public void testExpiryPositive() {
-    try {     
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(1);
-      //HARegionQueue regionqueue = new HARegionQueue("testing", cache, haa);      
-      HARegionQueue regionqueue = createHARegionQueue("testing",haa);
-      regionqueue.put(new ConflatableObject("key", "value", new EventID(
-          new byte[] { 1 }, 1, 1), true, "testing"));
-      Map map = (Map)regionqueue.getConflationMapForTesting().get("testing");
-      assertTrue(!map.isEmpty());
-      Thread.sleep(3000);
-      assertTrue(" Expected region size to be zero since expiry time has been exceeded but it is  "
-                  + regionqueue.getRegion().keys().size(), regionqueue
-                  .getRegion().keys().size() == 0);
-
-      assertTrue(map.isEmpty());      
-    }
-    catch (Exception e) {
-      throw new AssertionError(" test failed due to ", e);
-    }
+  public void testExpiryPositive() throws InterruptedException, IOException, ClassNotFoundException {
+    HARegionQueueAttributes haa = new HARegionQueueAttributes();
+    haa.setExpiryTime(1);
+    HARegionQueue regionqueue = createHARegionQueue("testing", haa);
+    long start = System.currentTimeMillis();
+    regionqueue.put(new ConflatableObject("key", "value", new EventID(
+      new byte[] { 1 }, 1, 1), true, "testing"));
+    Map map = (Map) regionqueue.getConflationMapForTesting().get("testing");
+    waitAtLeast(1000, start, () -> {
+      assertEquals(Collections.EMPTY_MAP, map);
+      assertEquals(Collections.EMPTY_SET, regionqueue.getRegion().keys());
+    });
   }
 
   /**
-   * tests whether things are not deleted before expiry
+   * Wait until a given runnable stops throwing exceptions. It should take
+   * at least minimumElapsedTime after the supplied start time to happen.
+   *
+   * This is useful for validating that an entry doesn't expire until
+   * a certain amount of time has passed
    */
-  @Test
-  public void testExpiryNegative() {
-    try {
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(100);
-      //RegionQueue regionqueue = new HARegionQueue("testing", cache, haa);
-      HARegionQueue regionqueue = createHARegionQueue("testing",haa);
-      regionqueue.put(new ConflatableObject("key", "value", new EventID(
-          new byte[] { 1 }, 1, 1), false, "testing"));
-      Thread.sleep(1200);
-      assertTrue(" Expected region size to be 2, since expiry time has not been exceeded but it is : "
-                  + regionqueue.getRegion().keys().size(), regionqueue
-                  .getRegion().keys().size() == 2);
-
-    }
-    catch (Exception e) {
-      throw new AssertionError(" test failed due to ", e);
-    }
+  protected void waitAtLeast(final int minimumElapsedTIme, final long start, final Runnable runnable) {
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(runnable);
+    long elapsed = System.currentTimeMillis() - start;
+    assertTrue(elapsed >= minimumElapsedTIme);
   }
 
   /**
@@ -486,82 +472,34 @@ public class HARegionQueueJUnitTest {
    * expected
    */
   @Test
-  public void testExpiryPositiveWithConflation() {
-    try {
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(2);
-      //HARegionQueue regionqueue = new HARegionQueue("testing", cache, haa);
-      HARegionQueue regionqueue = createHARegionQueue("testing",haa);
-      regionqueue.put(new ConflatableObject("key", "value", new EventID(
-          new byte[] { 1 }, 1, 1), true, "testing"));
-      regionqueue.put(new ConflatableObject("key", "newValue", new EventID(
-          new byte[] { 1 }, 1, 2), true, "testing"));
-      assertTrue(" Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
-              !(regionqueue.size() == 0));
-      assertTrue(" Expected the available id's size not  to be zero since expiry time has not  been exceeded but it is not so ",
-              !(regionqueue.getAvalaibleIds().size() == 0));
-      assertTrue(" Expected conflation map size not  to be zero since expiry time has not been exceeded but it is not so "
-                  + ((((Map)(regionqueue.getConflationMapForTesting()
-                      .get("testing"))).get("key"))),
-              !((((Map)(regionqueue.getConflationMapForTesting().get("testing")))
-                  .get("key")) == null));
-      assertTrue(" Expected eventID map size not to be zero since expiry time has not been exceeded but it is not so ",
-              !(regionqueue.getEventsMapForTesting().size() == 0));
-      Thread.sleep(5000);
-
-      ThreadIdentifier tid = new ThreadIdentifier(new byte[] { 1 }, 1);
-      System.out.println(" it still contains thread id : "
-          + regionqueue.getRegion().containsKey(tid));
-      assertTrue(" Expected region size to be zero since expiry time has been exceeded but it is not so ",
-              regionqueue.getRegion().keys().size() == 0);
-      assertTrue(" Expected the available id's size to be zero since expiry time has been exceeded but it is not so ",
-              regionqueue.getAvalaibleIds().size() == 0);
-      System.out.println((((Map)(regionqueue.getConflationMapForTesting()
-          .get("testing"))).get("key")));
-      assertTrue(" Expected conflation map size to be zero since expiry time has been exceeded but it is not so ",
-              ((((Map)(regionqueue.getConflationMapForTesting().get("testing")))
-                  .get("key")) == null));
-      assertTrue(" Expected eventID to be zero since expiry time has been exceeded but it is not so ",
-              (regionqueue.getEventsMapForTesting().size() == 0));
-    }
-    catch (Exception e) {
-      throw new AssertionError("test failed due to ", e);
-    }
-  }
-
-  /**
-   * test no expiry of events or data if expiry time not exceeded
-   */
-  @Test
-  public void testExpiryNegativeWithConflation() {
-    try {
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(100);
-      //RegionQueue regionqueue = new HARegionQueue("testing", cache, haa);
-      HARegionQueue regionqueue = createHARegionQueue("testing",haa);
-      regionqueue.put(new ConflatableObject("key", "value", new EventID(
-          new byte[] { 1 }, 1, 1), true, "testing"));
-      regionqueue.put(new ConflatableObject("key", "newValue", new EventID(
-          new byte[] { 1 }, 1, 2), true, "testing"));
-      Thread.sleep(1200);
-      assertTrue(
-              " Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
-              !(regionqueue.size() == 0));
-      assertTrue(
-              " Expected the available id's size not  to be zero since expiry time has not  been exceeded but it is not so ",
-              !(regionqueue.getAvalaibleIds().size() == 0));
-      assertTrue(
-              " Expected conflation map size not  to be zero since expiry time has not been exceeded but it is not so ",
-              !(((Map)(regionqueue
-                  .getConflationMapForTesting().get("testing"))).size() == 0));
-      assertTrue(
-              " Expected eventID map size not to be zero since expiry time has not been exceeded but it is not so ",
-              !(regionqueue.getEventsMapForTesting().size() == 0));
-
-    }
-    catch (Exception e) {
-      throw new AssertionError("test failed due to ", e);
-    }
+  public void testExpiryPositiveWithConflation() throws InterruptedException, IOException, ClassNotFoundException {
+    HARegionQueueAttributes haa = new HARegionQueueAttributes();
+    haa.setExpiryTime(1);
+    HARegionQueue regionqueue = createHARegionQueue("testing", haa);
+    long start = System.currentTimeMillis();
+    regionqueue.put(new ConflatableObject("key", "value", new EventID(
+      new byte[] { 1 }, 1, 1), true, "testing"));
+    regionqueue.put(new ConflatableObject("key", "newValue", new EventID(
+      new byte[] { 1 }, 1, 2), true, "testing"));
+    assertTrue(" Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
+      !(regionqueue.size() == 0));
+    assertTrue(
+      " Expected the available id's size not  to be zero since expiry time has not  been exceeded but it is not so ",
+      !(regionqueue.getAvalaibleIds().size() == 0));
+    assertTrue(" Expected conflation map size not  to be zero since expiry time has not been exceeded but it is not so "
+        + ((((Map) (regionqueue.getConflationMapForTesting()
+        .get("testing"))).get("key"))),
+      !((((Map) (regionqueue.getConflationMapForTesting().get("testing")))
+        .get("key")) == null));
+    assertTrue(" Expected eventID map size not to be zero since expiry time has not been exceeded but it is not so ",
+      !(regionqueue.getEventsMapForTesting().size() == 0));
+
+    waitAtLeast(1000, start, () -> {
+      assertEquals(Collections.EMPTY_SET, regionqueue.getRegion().keys());
+      assertEquals(Collections.EMPTY_SET, regionqueue.getAvalaibleIds());
+      assertEquals(Collections.EMPTY_MAP, regionqueue.getConflationMapForTesting().get("testing"));
+      assertEquals(Collections.EMPTY_MAP, regionqueue.getEventsMapForTesting());
+    });
   }
 
   /**
@@ -571,7 +509,7 @@ public class HARegionQueueJUnitTest {
   public void testNoExpiryOfThreadId() {
     try {
       HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(3);
+      haa.setExpiryTime(45);
       //RegionQueue regionqueue = new HARegionQueue("testing", cache, haa);
       HARegionQueue regionqueue = createHARegionQueue("testing",haa);
       EventID ev1 = new EventID(new byte[] { 1 }, 1, 1);
@@ -581,9 +519,11 @@ public class HARegionQueueJUnitTest {
       Conflatable cf2 = new ConflatableObject("key", "value2", ev2, true,
           "testing");
       regionqueue.put(cf1);
-      Thread.sleep(2000);
+      final long tailKey = regionqueue.tailKey.get();
       regionqueue.put(cf2);
-      Thread.sleep(1500);
+      //Invalidate will trigger the expiration of the entry
+      //See HARegionQueue.createCacheListenerForHARegion
+      regionqueue.getRegion().invalidate(tailKey);
       assertTrue(
               " Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
               !(regionqueue.size() == 0));
@@ -637,27 +577,22 @@ public class HARegionQueueJUnitTest {
    * corresponding put comes
    */
   @Test
-  public void testOnlyQRMComing() {
-    try {
-      HARegionQueueAttributes harqAttr = new HARegionQueueAttributes();
-      harqAttr.setExpiryTime(1);
-      //RegionQueue regionqueue = new HARegionQueue("testing", cache, harqAttr);
-      HARegionQueue regionqueue = createHARegionQueue("testing",harqAttr);
-      EventID id = new EventID(new byte[] { 1 }, 1, 1);
-      regionqueue.removeDispatchedEvents(id);
-      assertTrue(
-          " Expected testingID to be present since only QRM achieved ",
-          regionqueue.getRegion().containsKey(
-              new ThreadIdentifier(new byte[] { 1 }, 1)));
-      Thread.sleep(2500);
-      assertTrue(
-              " Expected testingID not to be present since it should have expired after 2.5 seconds",
-              !regionqueue.getRegion().containsKey(
-                  new ThreadIdentifier(new byte[] { 1 }, 1)));
-    }
-    catch (Exception e) {
-      throw new AssertionError("test failed due to ", e);
-    }
+  public void testOnlyQRMComing() throws InterruptedException, IOException, ClassNotFoundException {
+    HARegionQueueAttributes harqAttr = new HARegionQueueAttributes();
+    harqAttr.setExpiryTime(1);
+    //RegionQueue regionqueue = new HARegionQueue("testing", cache, harqAttr);
+    HARegionQueue regionqueue = createHARegionQueue("testing",harqAttr);
+    EventID id = new EventID(new byte[] { 1 }, 1, 1);
+    long start = System.currentTimeMillis();
+    regionqueue.removeDispatchedEvents(id);
+    assertTrue(
+        " Expected testingID to be present since only QRM achieved ",
+        regionqueue.getRegion().containsKey(
+            new ThreadIdentifier(new byte[] { 1 }, 1)));
+    waitAtLeast(1000, start, () ->
+    assertTrue(" Expected testingID not to be present since it should have expired after 2.5 seconds",
+            !regionqueue.getRegion().containsKey(
+                new ThreadIdentifier(new byte[] { 1 }, 1))));
   }
 
   /**
@@ -1821,29 +1756,26 @@ public class HARegionQueueJUnitTest {
    * system property to set expiry
    */
   @Test
-  public void testExpiryUsingSystemProperty() {
-    try {      
-      System.setProperty(HARegionQueue.REGION_ENTRY_EXPIRY_TIME,"1");      
-      
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();            
-      HARegionQueue regionqueue = createHARegionQueue("testing",haa);
+  public void testExpiryUsingSystemProperty() throws InterruptedException, IOException, ClassNotFoundException {
+    try {
+      System.setProperty(HARegionQueue.REGION_ENTRY_EXPIRY_TIME, "1");
+
+      HARegionQueueAttributes haa = new HARegionQueueAttributes();
+      HARegionQueue regionqueue = createHARegionQueue("testing", haa);
+      long start = System.currentTimeMillis();
       regionqueue.put(new ConflatableObject("key", "value", new EventID(
-          new byte[] { 1 }, 1, 1), true, "testing"));
-      Map map = (Map)regionqueue.getConflationMapForTesting().get("testing");
+        new byte[] { 1 }, 1, 1), true, "testing"));
+      Map map = (Map) regionqueue.getConflationMapForTesting().get("testing");
       assertTrue(!map.isEmpty());
-      Thread.sleep(3000);
-      assertTrue(
-              " Expected region size to be zero since expiry time has been exceeded but it is  "
-                  + regionqueue.getRegion().keys().size(), regionqueue
-                  .getRegion().keys().size() == 0);
 
-      assertTrue(map.isEmpty());      
+      waitAtLeast(1000, start, () -> {
+        assertEquals(Collections.EMPTY_MAP, map);
+        assertEquals(Collections.EMPTY_SET, regionqueue.getRegion().keys());
+      });
+    } finally {
       // [yogi]system property set to null, to avoid using it in the subsequent tests   
       System.setProperty(HARegionQueue.REGION_ENTRY_EXPIRY_TIME,"");
     }
-    catch (Exception e) {
-      throw new AssertionError(" test failed due to ", e);
-    }
   }
 
   /**
@@ -1862,11 +1794,9 @@ public class HARegionQueueJUnitTest {
     int updatedMessageSyncInterval = 10;
     cache.setMessageSyncInterval(updatedMessageSyncInterval);
 
-    // sleep for a time just more the intial messageSyncInterval1 , so that
-    // the value is updated in QRM run loop.
-    Thread.sleep((initialMessageSyncInterval + 1) * 1000);
 
-    assertEquals("messageSyncInterval not updated.",
-        updatedMessageSyncInterval, HARegionQueue.getMessageSyncInterval());
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until( () ->
+      assertEquals("messageSyncInterval not updated.",
+        updatedMessageSyncInterval, HARegionQueue.getMessageSyncInterval()));
   }
 }


[36/50] [abbrv] incubator-geode git commit: GEODE-2022: Marking testRRPRLocalQueryingWithHetroIndexes as flaky

Posted by kl...@apache.org.
GEODE-2022: Marking testRRPRLocalQueryingWithHetroIndexes as flaky


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7e659b23
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7e659b23
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7e659b23

Branch: refs/heads/feature/GEODE-1930
Commit: 7e659b2381eae3847d1c7e29bb1b37df518dde35
Parents: 59df3d9
Author: Dan Smith <up...@apache.org>
Authored: Thu Oct 20 13:07:35 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 20 13:09:12 2016 -0700

----------------------------------------------------------------------
 .../cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java      | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e659b23/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
index c9e5084..3e8250b 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
@@ -19,6 +19,7 @@
  */
 package org.apache.geode.cache.query.partitioned;
 
+import org.apache.geode.test.junit.categories.FlakyTest;
 import org.junit.experimental.categories.Category;
 import org.junit.Test;
 
@@ -1341,6 +1342,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
 
   @Test
+  @Category(FlakyTest.class) // GEODE-2022
   public void testRRPRLocalQueryingWithHetroIndexes() throws Exception {
 
     Host host = Host.getHost(0);


[06/50] [abbrv] incubator-geode git commit: GEODE-1466 : Added TemporaryFileRule JUnit rule for tests that need to create files in a particular directory.

Posted by kl...@apache.org.
GEODE-1466 : Added TemporaryFileRule JUnit rule for tests that need to create files in a particular directory.

* This closes #260


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b0659935
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b0659935
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b0659935

Branch: refs/heads/feature/GEODE-1930
Commit: b06599353d40ae4b54282c71aba9df57411e2704
Parents: 8a08032
Author: Jared Stewart <js...@pivotal.io>
Authored: Thu Oct 13 16:50:35 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Oct 14 12:00:03 2016 -0700

----------------------------------------------------------------------
 geode-junit/build.gradle                        |   5 +-
 .../test/junit/rules/TemporaryFileRule.java     | 111 ++++++++++++++++
 .../test/junit/rules/TemporaryFileRuleTest.java | 130 +++++++++++++++++++
 3 files changed, 245 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b0659935/geode-junit/build.gradle
----------------------------------------------------------------------
diff --git a/geode-junit/build.gradle b/geode-junit/build.gradle
index 3e4eb22..f7e5e46 100755
--- a/geode-junit/build.gradle
+++ b/geode-junit/build.gradle
@@ -17,7 +17,10 @@
 
 dependencies {
   testCompile 'commons-lang:commons-lang:' + project.'commons-lang.version'
-  compile ('junit:junit:' + project.'junit.version') {
+  testCompile 'com.google.guava:guava:' + project.'guava.version'
+  testCompile 'org.assertj:assertj-core:' + project.'assertj-core.version'
+
+  compile('junit:junit:' + project.'junit.version') {
     exclude module: 'hamcrest-core'
   }
   compile 'org.hamcrest:hamcrest-all:' + project.'hamcrest-all.version'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b0659935/geode-junit/src/main/java/org/apache/geode/test/junit/rules/TemporaryFileRule.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/TemporaryFileRule.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/TemporaryFileRule.java
new file mode 100644
index 0000000..bd2cac2
--- /dev/null
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/TemporaryFileRule.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.geode.test.junit.rules;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Set;
+
+import org.junit.rules.ExternalResource;
+
+
+/**
+ * A {@link org.junit.rules.TestRule} to create temporary files in a given directory that should be
+ * deleted when the test method finishes.  This is useful in place of {@link org.junit.rules.TemporaryFolder} when a test needs
+ * to create files in a particular directory, for example user.home or user.dir.
+ *
+ * <p>Example of usage:
+ * <pre>
+ * public static class HasTemporaryFile {
+ *  &#064;Rule
+ *  public TemporaryFileRule temporaryFileRule = TemporaryFileRule.inUserHome();
+ *
+ *  &#064;Test
+ *  public void testUsingTempFolder() throws IOException {
+ *      File createdFile= temporaryFileRule.newFile(&quot;myfile.txt&quot;);
+ *      File createdFile= temporaryFileRule.newFile(&quot;myfile2.txt&quot;);
+ *      // ...
+ *     }
+ * }
+ * </pre>
+ */
+public class TemporaryFileRule extends ExternalResource {
+
+  private final String directory;
+
+  private Set<File> files;
+
+  private TemporaryFileRule(String parentDirectory) {
+    this.directory = parentDirectory;
+  }
+
+  public static TemporaryFileRule inUserHome() {
+    return new TemporaryFileRule(System.getProperty("user.home"));
+  }
+
+  public static TemporaryFileRule inCurrentDir() {
+    return new TemporaryFileRule(System.getProperty("user.dir"));
+  }
+
+  public static TemporaryFileRule inDirectory(String directory) {
+    return new TemporaryFileRule(directory);
+  }
+
+  @Override
+  public void before() {
+    files = new HashSet<>();
+  }
+
+  @Override
+  public void after() {
+    files.stream().filter(Objects::nonNull).filter(File::exists).forEach(File::delete);
+  }
+
+  /**
+   * Creates a new file with the given name in the specified {@link #directory}.
+   *
+   * @param fileName the name of the file to create.
+   *
+   * @return the file that was created.
+   *
+   * @throws IllegalStateException if the file already exists.
+   * @throws IllegalStateException if there is an {@link IOException} while creating the file.
+   */
+  public File newFile(String fileName) {
+    return createFile(directory, fileName);
+  }
+
+
+  private File createFile(String directory, String fileName) {
+    File file = new File(directory, fileName);
+    try {
+      if (!file.createNewFile()) {
+        throw new IllegalStateException("The specified file " + file.getAbsolutePath() + " already exists.");
+      }
+    } catch (IOException e) {
+      throw new IllegalStateException("IOException attempting to create file " + file.getAbsolutePath() + ".", e);
+    }
+
+    file.deleteOnExit();
+    files.add(file);
+    return file;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b0659935/geode-junit/src/test/java/org/apache/geode/test/junit/rules/TemporaryFileRuleTest.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/test/java/org/apache/geode/test/junit/rules/TemporaryFileRuleTest.java b/geode-junit/src/test/java/org/apache/geode/test/junit/rules/TemporaryFileRuleTest.java
new file mode 100644
index 0000000..4deb1ee
--- /dev/null
+++ b/geode-junit/src/test/java/org/apache/geode/test/junit/rules/TemporaryFileRuleTest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.geode.test.junit.rules;
+
+import static org.assertj.core.api.Assertions.*;
+
+import java.io.File;
+
+import com.google.common.io.Files;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.Result;
+
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class TemporaryFileRuleTest {
+
+  @Test
+  public void exceptionIsThrownIfFileAlreadyExists() {
+    Result result = TestRunner.runTest(TemporaryFileRuleTest.ExceptionIsThrownIfFileAlreadyExists.class);
+
+    assertThat(result.wasSuccessful()).isTrue();
+  }
+
+  @Test
+  public void fileGetsCreatedProperly() {
+    Result result = TestRunner.runTest(TemporaryFileRuleTest.FileGetsCreatedProperly.class);
+
+    assertThat(result.wasSuccessful()).isTrue();
+  }
+
+
+  @Test
+  public void filesGetCleanedUpAfterTestMethod() {
+    Result result = TestRunner.runTest(TemporaryFileRuleTest.FilesGetCleanedUpAfterTestMethod.class);
+
+    assertThat(result.wasSuccessful()).isTrue();
+  }
+
+  /**
+   * Used by test {@link #exceptionIsThrownIfFileAlreadyExists()}
+   */
+  public static class ExceptionIsThrownIfFileAlreadyExists {
+
+    static File tempDirectory = Files.createTempDir();
+
+    @Rule
+    public TemporaryFileRule temporaryFileRule = TemporaryFileRule.inDirectory(tempDirectory.getAbsolutePath());
+
+    @Test
+    public void doTest() throws Exception {
+      String fileName = "fileThatAlreadyExists.txt";
+      File tempFile = new File(tempDirectory, fileName);
+      assertThat(tempFile.createNewFile()).isTrue();
+      assertThatThrownBy(() -> temporaryFileRule.newFile(fileName)).isInstanceOf(IllegalStateException.class);
+    }
+  }
+
+
+  /**
+   * Used by test {@link #fileGetsCreatedProperly()}
+   */
+  public static class FileGetsCreatedProperly {
+
+    static File tempDirectory = Files.createTempDir();
+
+    @Rule
+    public TemporaryFileRule temporaryFileRule = TemporaryFileRule.inDirectory(tempDirectory.getAbsolutePath());
+
+    @Test
+    public void doTest() throws Exception {
+      String fileName = "expectedFile.txt";
+      File expectedFile = new File(tempDirectory, fileName);
+      File actualFile = temporaryFileRule.newFile(fileName);
+
+      assertThat(actualFile).isEqualTo(expectedFile);
+    }
+  }
+
+  /**
+   * Used by test {@link #filesGetCleanedUpAfterTestMethod()}
+   *
+   * This test ensures that {@link TemporaryFileRule} cleans up the files it created in between each test method.
+   */
+  public static class FilesGetCleanedUpAfterTestMethod {
+
+    private static String fileName1 = "test1.txt";
+    private static String fileName2 = "test2.txt";
+
+    static File tempDirectory = Files.createTempDir();
+
+    @Rule
+    public TemporaryFileRule temporaryFileRule = TemporaryFileRule.inDirectory(tempDirectory.getAbsolutePath());
+
+    @Test
+    public void test1() throws Exception {
+      temporaryFileRule.newFile(fileName1);
+
+      assertThat(new File(tempDirectory, fileName1)).exists();
+      assertThat(new File(tempDirectory, fileName2)).doesNotExist();
+    }
+
+    @Test
+    public void test2() throws Exception {
+      temporaryFileRule.newFile(fileName2);
+
+      assertThat(new File(tempDirectory, fileName1)).doesNotExist();
+      assertThat(new File(tempDirectory, fileName2)).exists();
+    }
+  }
+
+}


[26/50] [abbrv] incubator-geode git commit: GEODE-2005: fix javadoc warning and format

Posted by kl...@apache.org.
GEODE-2005: fix javadoc warning and format


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3d173b18
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3d173b18
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3d173b18

Branch: refs/heads/feature/GEODE-1930
Commit: 3d173b1852facd50bc6396521bf862dd442bfa46
Parents: a3bd256
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Oct 17 10:43:02 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Oct 17 16:30:19 2016 -0700

----------------------------------------------------------------------
 .../web/controllers/CommonCrudController.java   | 23 ++++++++++----------
 1 file changed, 12 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3d173b18/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
index 2bcb31b..30c8b3a 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
@@ -14,7 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.geode.rest.internal.web.controllers;
 
 import java.util.ArrayList;
@@ -48,21 +47,20 @@ import org.apache.geode.rest.internal.web.exception.GemfireRestException;
 import org.apache.geode.rest.internal.web.util.ArrayUtils;
 import org.apache.geode.rest.internal.web.util.JSONUtils;
 
-
 /**
  * The CommonCrudController serves REST Requests related to listing regions, 
  * listing keys in region, delete keys or delete all data in region.
- * <p/>
+ *
  * @since GemFire 8.0
  */
-
 @SuppressWarnings("unused")
 public abstract class CommonCrudController extends AbstractBaseController {
   
   private static final Logger logger = LogService.getLogger();
   
   /**
-   * list all available resources (Regions) in the GemFire cluster
+   * List all available resources (Regions) in the GemFire cluster
+   *
    * @return JSON document containing result
    */
   @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_VALUE, MediaType.APPLICATION_JSON_VALUE })
@@ -89,7 +87,8 @@ public abstract class CommonCrudController extends AbstractBaseController {
   
   /**
    * List all keys for the given region in the GemFire cluster
-   * @param region gemfire region
+   *
+   * @param  region gemfire region
    * @return JSON document containing result
    */
   @RequestMapping(method = RequestMethod.GET, value = "/{region}/keys",
@@ -122,8 +121,9 @@ public abstract class CommonCrudController extends AbstractBaseController {
   
   /**
    * Delete data for single key or specific keys in region
-   * @param region gemfire region
-   * @param keys for which data is requested
+   *
+   * @param  region gemfire region
+   * @param  keys for which data is requested
    * @return JSON document containing result
    */
   @RequestMapping(method = RequestMethod.DELETE, value = "/{region}/{keys}",
@@ -153,7 +153,8 @@ public abstract class CommonCrudController extends AbstractBaseController {
 
   /**
    * Delete all data in region
-   * @param region gemfire region
+   *
+   * @param  region gemfire region
    * @return JSON document containing result
    */
   @RequestMapping(method = RequestMethod.DELETE, value = "/{region}")
@@ -180,8 +181,8 @@ public abstract class CommonCrudController extends AbstractBaseController {
   }
 
   /**
-   * Ping is not secured so that it may not be used to determine a valid username/password
-   * @return
+   * Ping is not secured so that it may not be used to determine a valid
+   * username/password
    */
   @RequestMapping(method = { RequestMethod.GET, RequestMethod.HEAD }, value = "/ping")
   @ApiOperation(


[11/50] [abbrv] incubator-geode git commit: GEODE-1952 Add output, final_app dirs to rat exclusions

Posted by kl...@apache.org.
GEODE-1952 Add output, final_app dirs to rat exclusions


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/d573de2b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/d573de2b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/d573de2b

Branch: refs/heads/feature/GEODE-1930
Commit: d573de2ba5bc98c452699a6654861bcf748745cf
Parents: e9669d6
Author: Joey McAllister <jm...@pivotal.io>
Authored: Thu Oct 13 20:47:50 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Fri Oct 14 14:51:04 2016 -0700

----------------------------------------------------------------------
 gradle/rat.gradle | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d573de2b/gradle/rat.gradle
----------------------------------------------------------------------
diff --git a/gradle/rat.gradle b/gradle/rat.gradle
index 0b01d81..27366a4 100644
--- a/gradle/rat.gradle
+++ b/gradle/rat.gradle
@@ -94,11 +94,15 @@ rat {
     '**/publickeyfile',
     '**/*.dat',
 
+    // Geode docs
+    'geode-book/Gemfile.lock',
+    'geode-book/output/**',
+    'geode-book/final_app/**',
+
     // other text files
     'geode-spark-connector/project/plugins.sbt',
     'geode-spark-connector/project/build.properties',
     '**/log4j*.xml',
-    'geode-book/Gemfile.lock',
 
     // modules
     'extensions/**/log4j.properties',


[19/50] [abbrv] incubator-geode git commit: GEODE-1993: allow LocatorServerStartupRule to save server's ports as well.

Posted by kl...@apache.org.
GEODE-1993: allow LocatorServerStartupRule to save server's ports as well.

* added more tetss


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5abe957c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5abe957c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5abe957c

Branch: refs/heads/feature/GEODE-1930
Commit: 5abe957ca1cd42eeaf82549c275711e6e94dddec
Parents: 1fb0d0a
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Mon Oct 17 08:08:39 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Mon Oct 17 08:08:39 2016 -0700

----------------------------------------------------------------------
 .../ClusterConfigWithoutSecurityDUnitTest.java  |   4 +-
 .../security/PeerAuthenticatorDUnitTest.java    |  63 ++++--------
 ...eerSecurityWithEmbeddedLocatorDUnitTest.java | 102 +++++++++++++++++++
 .../SecurityClusterConfigDUnitTest.java         |  10 +-
 .../SecurityWithoutClusterConfigDUnitTest.java  |   2 +-
 .../security/StartServerAuthorizationTest.java  |   4 +-
 .../dunit/rules/LocatorServerStartupRule.java   |  12 ++-
 .../LuceneClusterConfigurationDUnitTest.java    |   2 +-
 8 files changed, 140 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
index 72dbd1a..1bbfa0f 100644
--- a/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/ClusterConfigWithoutSecurityDUnitTest.java
@@ -71,7 +71,7 @@ public class ClusterConfigWithoutSecurityDUnitTest extends JUnit4DistributedTest
 
     // initial security properties should only contain initial set of values
     ServerStarter serverStarter = new ServerStarter(props);
-    serverStarter.startServer(lsRule.getLocatorPort(0));
+    serverStarter.startServer(lsRule.getPort(0));
     DistributedSystem ds = serverStarter.cache.getDistributedSystem();
 
     // after cache is created, the configuration won't chagne
@@ -92,7 +92,7 @@ public class ClusterConfigWithoutSecurityDUnitTest extends JUnit4DistributedTest
 
     ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getPort(0)))
       .isInstanceOf(GemFireConfigException.class)
       .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION.toLocalizedString());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/security/PeerAuthenticatorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/PeerAuthenticatorDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/PeerAuthenticatorDUnitTest.java
index bb147c7..b12ea43 100644
--- a/geode-core/src/test/java/org/apache/geode/security/PeerAuthenticatorDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/PeerAuthenticatorDUnitTest.java
@@ -20,73 +20,50 @@ package org.apache.geode.security;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
-import java.io.File;
 import java.util.Properties;
 
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.distributed.Locator;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.security.templates.DummyAuthenticator;
-import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.categories.SecurityTest;
 
 @Category({ DistributedTest.class, SecurityTest.class })
 public class PeerAuthenticatorDUnitTest extends JUnit4DistributedTestCase {
-  protected VM locator = null;
-  protected VM server = null;
-  protected VM server1 = null;
+  @Rule
+  public LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
 
   @Before
   public void before() throws Exception {
-    final Host host = Host.getHost(0);
-    this.locator = host.getVM(0);
-    this.server = host.getVM(1);
-    this.server1 = host.getVM(2);
+    Properties props = new Properties();
+    props.setProperty(SECURITY_PEER_AUTHENTICATOR, DummyAuthenticator.class.getName());
+    lsRule.getLocatorVM(0, props);
   }
-
   @Test
   public void testPeerAuthenticator() throws Exception{
-    int locatorPort = locator.invoke(()->{
-      Properties props = new Properties();
-      props.setProperty(SECURITY_PEER_AUTHENTICATOR, DummyAuthenticator.class.getName());
-      props.setProperty(MCAST_PORT, "0");
-      props.put(JMX_MANAGER, "true");
-      props.put(JMX_MANAGER_START, "true");
-      props.put(JMX_MANAGER_PORT, "0");
-      Locator locatorObj = Locator.startLocatorAndDS(0, new File("locator.log"), props);
-      return locatorObj.getPort();
-    });
 
-    // set up server with security
-    String locators = "localhost[" + locatorPort + "]";
-    server.invoke(()->{
-      Properties props = new Properties();
-      props.setProperty(MCAST_PORT, "0");
-      props.setProperty(LOCATORS, locators);
-
-      // the following are needed for peer-to-peer authentication
-      props.setProperty("security-username", "user");
-      props.setProperty("security-password", "user");
-      // this should execute without exception
-      InternalDistributedSystem ds = getSystem(props);
-    });
+    int locatorPort = lsRule.getPort(0);
+    Properties server1Props = new Properties();
+    server1Props.setProperty("security-username", "user");
+    server1Props.setProperty("security-password", "user");
+    lsRule.getServerVM(1, server1Props, locatorPort);
 
-    server1.invoke(()->{
-      Properties props = new Properties();
-      props.setProperty(MCAST_PORT, "0");
-      props.setProperty(LOCATORS, locators);
 
-      // the following are needed for peer-to-peer authentication
-      props.setProperty("security-username", "bogus");
-      props.setProperty("security-password", "user");
+    Properties server2Props = new Properties();
+    server2Props.setProperty("security-username", "bogus");
+    server2Props.setProperty("security-password", "user");
+    VM server2 = lsRule.getNodeVM(2);
 
-      assertThatThrownBy(()->getSystem(props)).isInstanceOf(GemFireSecurityException.class);
+    server2.invoke(()->{
+      ServerStarter serverStarter = new ServerStarter(server2Props);
+      assertThatThrownBy(()->serverStarter.startServer(locatorPort)).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("Invalid user name");
     });
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/security/PeerSecurityWithEmbeddedLocatorDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/PeerSecurityWithEmbeddedLocatorDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/PeerSecurityWithEmbeddedLocatorDUnitTest.java
new file mode 100644
index 0000000..a42f6db
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/security/PeerSecurityWithEmbeddedLocatorDUnitTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.security;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.util.Properties;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.security.templates.DummyAuthenticator;
+import org.apache.geode.security.templates.SimpleSecurityManager;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.ServerStarter;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.categories.SecurityTest;
+
+@Category({ DistributedTest.class, SecurityTest.class })
+public class PeerSecurityWithEmbeddedLocatorDUnitTest extends JUnit4DistributedTestCase {
+
+  @Rule
+  public LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
+
+
+  @Test
+  public void testPeerSecurityManager() throws Exception{
+    int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
+
+    Properties server0Props = new Properties();
+    server0Props.setProperty(SECURITY_MANAGER, SimpleSecurityManager.class.getName());
+    server0Props.setProperty("start-locator", "localhost["+locatorPort+"]");
+    lsRule.getServerVM(0, server0Props);
+
+
+    Properties server1Props = new Properties();
+    server1Props.setProperty("security-username", "cluster");
+    server1Props.setProperty("security-password", "cluster");
+    lsRule.getServerVM(1, server1Props, locatorPort);
+
+    Properties server2Props = new Properties();
+    server2Props.setProperty("security-username", "user");
+    server2Props.setProperty("security-password", "wrongPwd");
+
+    VM server2 = lsRule.getNodeVM(2);
+    server2.invoke(()->{
+      ServerStarter serverStarter = new ServerStarter(server2Props);
+      assertThatThrownBy(()->serverStarter.startServer(locatorPort))
+        .isInstanceOf(GemFireSecurityException.class)
+        .hasMessageContaining("Security check failed. Authentication error");
+    });
+  }
+
+  @Test
+  public void testPeerAuthenticator() throws Exception{
+    int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
+
+    Properties server0Props = new Properties();
+    server0Props.setProperty(SECURITY_PEER_AUTHENTICATOR, DummyAuthenticator.class.getName());
+    server0Props.setProperty("start-locator", "localhost["+locatorPort+"]");
+    lsRule.getServerVM(0, server0Props);
+
+
+    Properties server1Props = new Properties();
+    server1Props.setProperty("security-username", "user");
+    server1Props.setProperty("security-password", "user");
+    lsRule.getServerVM(1, server1Props, locatorPort);
+
+    Properties server2Props = new Properties();
+    server2Props.setProperty("security-username", "bogus");
+    server2Props.setProperty("security-password", "user");
+
+    VM server2 = lsRule.getNodeVM(2);
+    server2.invoke(()->{
+      ServerStarter serverStarter = new ServerStarter(server2Props);
+      assertThatThrownBy(()->serverStarter.startServer(locatorPort))
+        .isInstanceOf(GemFireSecurityException.class)
+        .hasMessageContaining("Invalid user name");
+    });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
index 5364c91..07ac8be 100644
--- a/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/SecurityClusterConfigDUnitTest.java
@@ -68,7 +68,7 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
 
     // initial security properties should only contain initial set of values
     ServerStarter serverStarter = new ServerStarter(props);
-    serverStarter.startServer(lsRule.getLocatorPort(0));
+    serverStarter.startServer(lsRule.getPort(0));
     DistributedSystem ds = serverStarter.cache.getDistributedSystem();
 
     // after cache is created, we got the security props passed in by cluster config
@@ -90,7 +90,7 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
 
     // initial security properties should only contain initial set of values
     ServerStarter serverStarter = new ServerStarter(props);
-    serverStarter.startServer(lsRule.getLocatorPort(0));
+    serverStarter.startServer(lsRule.getPort(0));
     DistributedSystem ds = serverStarter.cache.getDistributedSystem();
 
     // after cache is created, we got the security props passed in by cluster config
@@ -112,7 +112,7 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     // initial security properties should only contain initial set of values
     ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getPort(0)))
       .isInstanceOf(GemFireConfigException.class)
       .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
         .toLocalizedString());
@@ -132,7 +132,7 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
     // initial security properties should only contain initial set of values
     ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getPort(0)))
       .isInstanceOf(GemFireConfigException.class)
       .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION
                                                        .toLocalizedString());
@@ -151,7 +151,7 @@ public class SecurityClusterConfigDUnitTest extends JUnit4DistributedTestCase {
 
     ServerStarter serverStarter = new ServerStarter(props);
 
-    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getLocatorPort(0)))
+    assertThatThrownBy(() -> serverStarter.startServer(lsRule.getPort(0)))
       .isInstanceOf(GemFireConfigException.class)
       .hasMessage(LocalizedStrings.GEMFIRE_CACHE_SECURITY_MISCONFIGURATION_2
                                                        .toLocalizedString());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java b/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
index d3ed823..f715cb2 100644
--- a/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/SecurityWithoutClusterConfigDUnitTest.java
@@ -70,7 +70,7 @@ public class SecurityWithoutClusterConfigDUnitTest extends JUnit4DistributedTest
 
     // initial security properties should only contain initial set of values
     ServerStarter serverStarter = new ServerStarter(props);
-    serverStarter.startServer(lsRule.getLocatorPort(0));
+    serverStarter.startServer(lsRule.getPort(0));
     DistributedSystem ds = serverStarter.cache.getDistributedSystem();
     assertEquals(3, ds.getSecurityProperties().size());
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java b/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
index f6928bf..a001493 100644
--- a/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
+++ b/geode-core/src/test/java/org/apache/geode/security/StartServerAuthorizationTest.java
@@ -58,7 +58,7 @@ public class StartServerAuthorizationTest extends JUnit4DistributedTestCase {
     VM server = lsRule.getNodeVM(1);
     server.invoke(()->{
       ServerStarter serverStarter = new ServerStarter(props);
-      assertThatThrownBy(()->serverStarter.startServer(lsRule.getLocatorPort(0))).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("Security check failed. Authentication error. Please check your credentials");
+      assertThatThrownBy(()->serverStarter.startServer(lsRule.getPort(0))).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("Security check failed. Authentication error. Please check your credentials");
     });
   }
 
@@ -73,7 +73,7 @@ public class StartServerAuthorizationTest extends JUnit4DistributedTestCase {
     VM server = lsRule.getNodeVM(1);
     server.invoke(()->{
       ServerStarter serverStarter = new ServerStarter(props);
-      assertThatThrownBy(()->serverStarter.startServer(lsRule.getLocatorPort(0))).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("user not authorized for CLUSTER:MANAGE");
+      assertThatThrownBy(()->serverStarter.startServer(lsRule.getPort(0))).isInstanceOf(GemFireSecurityException.class).hasMessageContaining("user not authorized for CLUSTER:MANAGE");
     });
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
index 71894c8..41326e0 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
@@ -41,7 +41,7 @@ public class LocatorServerStartupRule extends ExternalResource implements Serial
 
   private Host host = getHost(0);
 
-  public int[] locatorPorts = new int[4];
+  public int[] ports = new int[4];
 
 
   // these are only avaialbe in each VM
@@ -75,7 +75,7 @@ public class LocatorServerStartupRule extends ExternalResource implements Serial
       locatorStarter.startLocator();
       return locatorStarter.locator.getPort();
     });
-    locatorPorts[index] = locatorPort;
+    ports[index] = locatorPort;
     return locatorVM;
   }
 
@@ -98,10 +98,12 @@ public class LocatorServerStartupRule extends ExternalResource implements Serial
   public VM getServerVM(int index, Properties properties, int locatorPort) {
     VM nodeVM = getNodeVM(index);
     properties.setProperty(NAME, "server-"+index);
-    nodeVM.invoke(() -> {
+    int port = nodeVM.invoke(() -> {
       serverStarter = new ServerStarter(properties);
       serverStarter.startServer(locatorPort);
+      return serverStarter.server.getPort();
     });
+    ports[index] = port;
     return nodeVM;
   }
 
@@ -116,8 +118,8 @@ public class LocatorServerStartupRule extends ExternalResource implements Serial
     return host.getVM(index);
   }
 
-  public int getLocatorPort(int index){
-    return locatorPorts[index];
+  public int getPort(int index){
+    return ports[index];
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5abe957c/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
index bcc5ab3..8dc5e0f 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
@@ -241,7 +241,7 @@ public class LuceneClusterConfigurationDUnitTest extends CliCommandTestBase {
     if (addGroup) {
       nodeProperties.setProperty(GROUPS, groupName);
     }
-    return ls.getServerVM(vmIndex, nodeProperties, ls.getLocatorPort(0));
+    return ls.getServerVM(vmIndex, nodeProperties, ls.getPort(0));
   }
 
   private VM startLocatorWithClusterConfigurationEnabled() throws Exception {


[16/50] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/release/1.0.0-incubating' into develop

Posted by kl...@apache.org.
Merge remote-tracking branch 'origin/release/1.0.0-incubating' into develop


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/40c19179
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/40c19179
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/40c19179

Branch: refs/heads/feature/GEODE-1930
Commit: 40c19179bce99586ab6efef21f3e836a4b628733
Parents: a7b9ac1 a0de4c9
Author: Dan Smith <up...@apache.org>
Authored: Fri Oct 14 16:17:34 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Fri Oct 14 16:17:34 2016 -0700

----------------------------------------------------------------------
 extensions/geode-modules-assembly/build.gradle  |   12 +-
 extensions/geode-modules-hibernate/build.gradle |   42 -
 .../geode/modules/hibernate/EnumType.java       |   57 -
 .../geode/modules/hibernate/GemFireCache.java   |  238 --
 .../modules/hibernate/GemFireCacheListener.java |   54 -
 .../modules/hibernate/GemFireCacheProvider.java |  200 -
 .../hibernate/GemFireQueryCacheFactory.java     |   39 -
 .../modules/hibernate/GemFireRegionFactory.java |  221 --
 .../modules/hibernate/internal/Access.java      |  257 --
 .../ClientServerRegionFactoryDelegate.java      |  201 --
 .../hibernate/internal/CollectionAccess.java    |  224 --
 .../hibernate/internal/EntityRegionWriter.java  |   87 -
 .../hibernate/internal/EntityVersion.java       |   27 -
 .../hibernate/internal/EntityVersionImpl.java   |   50 -
 .../hibernate/internal/EntityWrapper.java       |   89 -
 .../hibernate/internal/GemFireBaseRegion.java   |  166 -
 .../internal/GemFireCollectionRegion.java       |   59 -
 .../hibernate/internal/GemFireEntityRegion.java |  187 -
 .../internal/GemFireQueryResultsRegion.java     |  113 -
 .../modules/hibernate/internal/KeyWrapper.java  |   92 -
 .../internal/NonStrictReadWriteAccess.java      |   83 -
 .../hibernate/internal/ReadOnlyAccess.java      |   55 -
 .../hibernate/internal/ReadWriteAccess.java     |   36 -
 .../internal/RegionFactoryDelegate.java         |  146 -
 .../hibernate/internal/TransactionalAccess.java |   25 -
 .../java/org/apache/geode/modules/Event.java    |   67 -
 .../geode/modules/HibernateJUnitTest.java       |  416 ---
 .../java/org/apache/geode/modules/Owner.java    |  185 -
 .../java/org/apache/geode/modules/Person.java   |   72 -
 .../org/apache/geode/modules/SecondVMTest.java  |   95 -
 .../src/test/resources/log4j.properties         |   16 -
 .../org/apache/geode/modules/Event.hbm.xml      |   32 -
 .../org/apache/geode/modules/Person.hbm.xml     |   36 -
 geode-book/.gitignore                           |    2 +
 geode-book/Gemfile                              |   22 +
 geode-book/Gemfile.lock                         |  203 ++
 geode-book/README.md                            |   60 +
 geode-book/config.yml                           |   37 +
 .../master_middleman/source/images/favicon.ico  |  Bin 0 -> 1317 bytes
 .../master_middleman/source/index.html.erb      |   23 +
 .../master_middleman/source/javascripts/book.js |   31 +
 .../source/javascripts/waypoints/context.js     |  315 ++
 .../source/javascripts/waypoints/group.js       |  120 +
 .../javascripts/waypoints/noframeworkAdapter.js |  228 ++
 .../source/javascripts/waypoints/sticky.js      |   78 +
 .../source/javascripts/waypoints/waypoint.js    |  175 +
 .../source/layouts/_book-footer.erb             |   23 +
 .../master_middleman/source/layouts/_title.erb  |   21 +
 .../source/stylesheets/book-styles.css.scss     |   18 +
 .../stylesheets/partials/_book-base-values.scss |   14 +
 .../source/stylesheets/partials/_book-vars.scss |   33 +
 .../source/subnavs/geode-subnav.erb             | 3098 ++++++++++++++++
 geode-book/redirects.rb                         |   18 +
 geode-docs/.gitignore                           |    6 +
 geode-docs/CONTRIBUTE.md                        |   33 +
 geode-docs/about_geode.html.md.erb              |   26 +
 geode-docs/basic_config/book_intro.html.md.erb  |   40 +
 .../chapter_overview.html.md.erb                |   40 +
 ...uted_system_member_configuration.html.md.erb |   51 +
 .../config_concepts/local_vs_remote.html.md.erb |   29 +
 .../chapter_overview.html.md.erb                |   32 +
 .../managing_data_entries.html.md.erb           |  146 +
 .../using_custom_classes.html.md.erb            |   51 +
 .../data_regions/chapter_overview.html.md.erb   |   65 +
 .../create_a_region_with_API.html.md.erb        |   80 +
 .../create_a_region_with_cacheXML.html.md.erb   |   85 +
 .../create_a_region_with_gfsh.html.md.erb       |   55 +
 .../creating_custom_attributes.html.md.erb      |   64 +
 .../managing_data_regions.html.md.erb           |  222 ++
 .../managing_region_attributes.html.md.erb      |  113 +
 .../new_region_existing_data.html.md.erb        |   28 +
 .../data_regions/region_naming.html.md.erb      |   31 +
 .../data_regions/region_shortcuts.html.md.erb   |  115 +
 .../store_retrieve_region_shortcuts.html.md.erb |   77 +
 .../setting_distributed_properties.html.md.erb  |   81 +
 .../the_cache/chapter_overview.html.md.erb      |   48 +
 .../intro_cache_management.html.md.erb          |   96 +
 .../managing_a_client_cache.html.md.erb         |   84 +
 .../managing_a_multiuser_cache.html.md.erb      |   66 +
 .../managing_a_peer_server_cache.html.md.erb    |   81 +
 .../managing_a_secure_cache.html.md.erb         |   67 +
 .../setting_cache_initializer.html.md.erb       |   76 +
 .../setting_cache_properties.html.md.erb        |   39 +
 .../configuring/chapter_overview.html.md.erb    |   84 +
 .../deploying_application_jars.html.md.erb      |  131 +
 .../cluster_config/export-import.html.md.erb    |   56 +
 .../gfsh_config_troubleshooting.html.md.erb     |   75 +
 .../gfsh_load_from_shared_dir.html.md.erb       |   44 +
 .../cluster_config/gfsh_persist.html.md.erb     |  125 +
 .../cluster_config/gfsh_remote.html.md.erb      |   78 +
 .../persisting_configurations.html.md.erb       |  337 ++
 .../using_member_groups.html.md.erb             |   44 +
 .../running/change_file_spec.html.md.erb        |   57 +
 .../running/default_file_specs.html.md.erb      |   76 +
 .../deploy_config_files_intro.html.md.erb       |   34 +
 .../running/deploying_config_files.html.md.erb  |   45 +
 .../deploying_config_jar_files.html.md.erb      |   52 +
 .../running/firewall_ports_config.html.md.erb   |   32 +
 .../running/firewalls_connections.html.md.erb   |   35 +
 .../running/firewalls_multisite.html.md.erb     |   87 +
 .../running/firewalls_ports.html.md.erb         |  246 ++
 .../running/managing_output_files.html.md.erb   |   33 +
 .../running/running_the_cacheserver.html.md.erb |  199 +
 .../running/running_the_locator.html.md.erb     |  257 ++
 .../starting_up_shutting_down.html.md.erb       |  146 +
 geode-docs/developing/book_intro.html.md.erb    |   74 +
 .../chapter_overview.html.md.erb                |   38 +
 .../continuous_querying_whats_next.html.md.erb  |   88 +
 .../how_continuous_querying_works.html.md.erb   |   98 +
 ...implementing_continuous_querying.html.md.erb |  202 ++
 .../PDX_Serialization_Features.html.md.erb      |   40 +
 .../auto_serialization.html.md.erb              |  141 +
 ...ation_with_class_pattern_strings.html.md.erb |   85 +
 .../chapter_overview.html.md.erb                |   40 +
 .../data_serialization_options.html.md.erb      |   68 +
 .../extending_the_autoserializer.html.md.erb    |  123 +
 .../gemfire_data_serialization.html.md.erb      |   52 +
 .../gemfire_pdx_serialization.html.md.erb       |   64 +
 .../java_serialization.html.md.erb              |   29 +
 .../jsonformatter_pdxinstances.html.md.erb      |   46 +
 .../persist_pdx_metadata_to_disk.html.md.erb    |   53 +
 .../program_application_for_pdx.html.md.erb     |  107 +
 .../use_pdx_high_level_steps.html.md.erb        |   49 +
 .../use_pdx_serializable.html.md.erb            |  115 +
 .../use_pdx_serializer.html.md.erb              |  145 +
 .../using_PdxInstanceFactory.html.md.erb        |   51 +
 .../using_pdx_region_entry_keys.html.md.erb     |   31 +
 .../chapter_overview.html.md.erb                |   48 +
 .../delta_propagation_example.html.md.erb       |  130 +
 .../delta_propagation_properties.html.md.erb    |   96 +
 .../errors_in_delta_propagation.html.md.erb     |   35 +
 .../how_delta_propagation_works.html.md.erb     |   69 +
 .../implementing_delta_propagation.html.md.erb  |   41 +
 .../when_to_use_delta_prop.html.md.erb          |   34 +
 .../chapter_overview.html.md.erb                |   44 +
 .../choosing_level_of_dist.html.md.erb          |   36 +
 .../how_distribution_works.html.md.erb          |   48 +
 .../how_region_versioning_works.html.md.erb     |  127 +
 .../how_region_versioning_works_wan.html.md.erb |   42 +
 .../how_replication_works.html.md.erb           |   51 +
 .../locking_in_global_regions.html.md.erb       |  109 +
 .../managing_distributed_regions.html.md.erb    |   64 +
 .../region_entry_versions.html.md.erb           |   51 +
 .../cache_event_handler_examples.html.md.erb    |  155 +
 .../events/chapter_overview.html.md.erb         |   44 +
 ...re_client_server_event_messaging.html.md.erb |   81 +
 ...figure_multisite_event_messaging.html.md.erb |   39 +
 .../configure_p2p_event_messaging.html.md.erb   |   50 +
 ...uring_gateway_concurrency_levels.html.md.erb |  158 +
 ..._highly_available_gateway_queues.html.md.erb |  119 +
 ...iguring_highly_available_servers.html.md.erb |   55 +
 ...conflate_multisite_gateway_queue.html.md.erb |  130 +
 ...nflate_server_subscription_queue.html.md.erb |   53 +
 .../events/event_handler_overview.html.md.erb   |   40 +
 .../filtering_multisite_events.html.md.erb      |  126 +
 .../ha_event_messaging_whats_next.html.md.erb   |   95 +
 .../events/how_cache_events_work.html.md.erb    |   71 +
 ...client_server_distribution_works.html.md.erb |  137 +
 .../events/how_events_work.html.md.erb          |  111 +
 ...how_multisite_distribution_works.html.md.erb |   68 +
 ...mplementing_cache_event_handlers.html.md.erb |  153 +
 ..._durable_client_server_messaging.html.md.erb |  199 +
 ...nting_write_behind_event_handler.html.md.erb |  245 ++
 ...t_server_subscription_queue_size.html.md.erb |   74 +
 ...ist_of_event_handlers_and_events.html.md.erb |  181 +
 .../resolving_multisite_conflicts.html.md.erb   |   80 +
 ..._client_message_tracking_timeout.html.md.erb |   43 +
 ...ne_client_server_event_messaging.html.md.erb |   37 +
 ..._callbacks_that_modify_the_cache.html.md.erb |   65 +
 .../eviction/chapter_overview.html.md.erb       |   34 +
 .../configuring_data_eviction.html.md.erb       |   88 +
 .../eviction/how_eviction_works.html.md.erb     |   36 +
 .../expiration/chapter_overview.html.md.erb     |   32 +
 .../configuring_data_expiration.html.md.erb     |   83 +
 .../expiration/how_expiration_works.html.md.erb |   70 +
 .../function_exec/chapter_overview.html.md.erb  |   36 +
 .../function_execution.html.md.erb              |  254 ++
 .../how_function_execution_works.html.md.erb    |  131 +
 .../chapter_overview.html.md.erb                |   40 +
 .../chapter_overview.html.md.erb                |   34 +
 .../how_data_loaders_work.html.md.erb           |   52 +
 .../implementing_data_loaders.html.md.erb       |   88 +
 .../sync_outside_data.html.md.erb               |   36 +
 .../chapter_overview.html.md.erb                |   60 +
 .../checking_region_redundancy.html.md.erb      |   55 +
 ...locating_partitioned_region_data.html.md.erb |  128 +
 .../configure_pr_single_hop.html.md.erb         |   39 +
 .../configuring_bucket_for_pr.html.md.erb       |   70 +
 .../configuring_ha_for_pr.html.md.erb           |   58 +
 ...partitioning_and_data_colocation.html.md.erb |   58 +
 .../how_partitioning_works.html.md.erb          |   58 +
 .../how_pr_ha_works.html.md.erb                 |   61 +
 .../how_pr_single_hop_works.html.md.erb         |   48 +
 .../join_query_partitioned_regions.html.md.erb  |   97 +
 .../managing_partitioned_regions.html.md.erb    |   42 +
 .../moving_partitioned_data.html.md.erb         |   76 +
 ...partitioning_and_data_colocation.html.md.erb |   36 +
 .../overview_how_pr_ha_works.html.md.erb        |   32 +
 ...overview_how_pr_single_hop_works.html.md.erb |   32 +
 .../rebalancing_pr_data.html.md.erb             |  106 +
 .../set_crash_redundancy_recovery.html.md.erb   |   60 +
 .../set_enforce_unique_host.html.md.erb         |   34 +
 .../set_join_redundancy_recovery.html.md.erb    |   66 +
 .../set_pr_redundancy.html.md.erb               |   51 +
 .../set_redundancy_zones.html.md.erb            |   40 +
 ...using_custom_partition_resolvers.html.md.erb |  221 ++
 .../advanced_querying.html.md.erb               |   48 +
 .../case_sensitivity.html.md.erb                |   36 +
 .../query_additional/literals.html.md.erb       |   82 +
 .../query_additional/operators.html.md.erb      |   57 +
 .../order_by_on_partitioned_regions.html.md.erb |   35 +
 ...tioned_region_key_or_field_value.html.md.erb |   83 +
 ...tioned_region_query_restrictions.html.md.erb |   50 +
 .../query_debugging.html.md.erb                 |  104 +
 .../query_language_features.html.md.erb         |   41 +
 .../query_on_a_single_node.html.md.erb          |  172 +
 .../supported_keywords.html.md.erb              |   48 +
 .../using_query_bind_parameters.html.md.erb     |   65 +
 .../create_multiple_indexes.html.md.erb         |   78 +
 .../query_index/creating_an_index.html.md.erb   |  111 +
 .../creating_hash_indexes.html.md.erb           |   68 +
 .../creating_key_indexes.html.md.erb            |   66 +
 .../creating_map_indexes.html.md.erb            |   61 +
 .../query_index/index_samples.html.md.erb       |   80 +
 ...indexes_on_single_region_queries.html.md.erb |   50 +
 .../indexes_with_overflow_regions.html.md.erb   |   58 +
 .../query_index/indexing_guidelines.html.md.erb |   41 +
 .../query_index/maintaining_indexes.html.md.erb |   69 +
 .../query_index/query_index.html.md.erb         |   79 +
 .../query_index/query_index_hints.html.md.erb   |   40 +
 ...ng_indexes_with_equijoin_queries.html.md.erb |   69 +
 ...quijoin_queries_multiple_regions.html.md.erb |   79 +
 .../query_select/aggregates.html.md.erb         |  109 +
 .../query_select/the_from_clause.html.md.erb    |  103 +
 .../the_import_statement.html.md.erb            |   31 +
 .../the_select_statement.html.md.erb            |  220 ++
 .../query_select/the_where_clause.html.md.erb   |  353 ++
 .../chapter_overview.html.md.erb                |   38 +
 .../comments_in_query_strings.html.md.erb       |   30 +
 .../monitor_queries_for_low_memory.html.md.erb  |   41 +
 .../oql_compared_to_sql.html.md.erb             |   31 +
 .../performance_considerations.html.md.erb      |   33 +
 .../querying_basics/query_basics.html.md.erb    |   57 +
 ...query_grammar_and_reserved_words.html.md.erb |  163 +
 .../querying_partitioned_regions.html.md.erb    |   41 +
 .../querying_basics/reserved_words.html.md.erb  |  129 +
 ...ictions_and_unsupported_features.html.md.erb |   35 +
 .../querying_basics/running_a_query.html.md.erb |   87 +
 .../supported_character_sets.html.md.erb        |   24 +
 .../what_is_a_query_string.html.md.erb          |   50 +
 .../region_options/chapter_overview.html.md.erb |   40 +
 .../data_hosts_and_accessors.html.md.erb        |   31 +
 .../dynamic_region_creation.html.md.erb         |  197 +
 .../region_options/region_types.html.md.erb     |  146 +
 .../storage_distribution_options.html.md.erb    |   40 +
 .../chapter_overview.html.md.erb                |   41 +
 .../how_persist_overflow_work.html.md.erb       |   64 +
 .../overflow_config_examples.html.md.erb        |   53 +
 .../storing_data_on_disk.html.md.erb            |   79 +
 .../transactions/JTA_transactions.html.md.erb   |  243 ++
 .../transactions/about_transactions.html.md.erb |   47 +
 .../cache_plugins_with_jta.html.md.erb          |   28 +
 .../cache_transaction_performance.html.md.erb   |   29 +
 .../transactions/cache_transactions.html.md.erb |   51 +
 ...ache_transactions_by_region_type.html.md.erb |  156 +
 .../transactions/chapter_overview.html.md.erb   |   48 +
 .../client_server_transactions.html.md.erb      |   55 +
 ...guring_db_connections_using_JNDI.html.md.erb |  330 ++
 ...data_location_cache_transactions.html.md.erb |   32 +
 .../how_cache_transactions_work.html.md.erb     |   73 +
 .../jca_adapter_example.html.md.erb             |   51 +
 ...onitor_troubleshoot_transactions.html.md.erb |   56 +
 .../run_a_cache_transaction.html.md.erb         |   90 +
 ...che_transaction_with_external_db.html.md.erb |   54 +
 .../transaction_coding_examples.html.md.erb     |   44 +
 .../transaction_event_management.html.md.erb    |   56 +
 .../transaction_jta_gemfire_example.html.md.erb |   48 +
 .../transaction_semantics.html.md.erb           |   54 +
 ...ansaction_suspend_resume_example.html.md.erb |   38 +
 ...ctional_and_nontransactional_ops.html.md.erb |  117 +
 .../transactional_function_example.html.md.erb  |   72 +
 .../transactions_overview.html.md.erb           |   67 +
 .../transactions/turning_off_jta.html.md.erb    |   40 +
 .../working_with_transactions.html.md.erb       |  229 ++
 .../15_minute_quickstart_gfsh.html.md.erb       |  516 +++
 .../getting_started/book_intro.html.md.erb      |   40 +
 .../getting_started/geode_overview.html.md.erb  |   37 +
 .../installation/install_standalone.html.md.erb |  138 +
 .../getting_started/product_intro.html.md.erb   |  101 +
 .../querying_quick_reference.html.md.erb        |  711 ++++
 .../getting_started/setup_classpath.html.md.erb |  122 +
 .../host_machine.html.md.erb                    |   49 +
 .../uninstall_gemfire.html.md.erb               |   26 +
 .../images/ClientServerAdvancedTopics-5.gif     |  Bin 0 -> 10798 bytes
 .../images/ClientServerAdvancedTopics-6.gif     |  Bin 0 -> 12056 bytes
 .../images/ClientServerAdvancedTopics-7.gif     |  Bin 0 -> 5000 bytes
 geode-docs/images/ContinuousQuerying-1.gif      |  Bin 0 -> 6955 bytes
 geode-docs/images/ContinuousQuerying-3.gif      |  Bin 0 -> 9141 bytes
 geode-docs/images/DataManagement-9.png          |  Bin 0 -> 48188 bytes
 geode-docs/images/DeltaPropagation-1.gif        |  Bin 0 -> 7593 bytes
 geode-docs/images/DeltaPropagation-3.gif        |  Bin 0 -> 6843 bytes
 geode-docs/images/Events-2.gif                  |  Bin 0 -> 8793 bytes
 geode-docs/images/Events-3.gif                  |  Bin 0 -> 6432 bytes
 geode-docs/images/FuncExecOnMembers.png         |  Bin 0 -> 64959 bytes
 .../images/FuncExecOnRegionHAWithFilter.png     |  Bin 0 -> 80141 bytes
 .../images/FuncExecOnRegionNoMetadata.png       |  Bin 0 -> 70177 bytes
 .../images/FuncExecOnRegionPeersWithFilter.png  |  Bin 0 -> 86576 bytes
 .../images/FuncExecOnRegionWithFilter.png       |  Bin 0 -> 60773 bytes
 .../images/FuncExecOnRegionWithMetadata.png     |  Bin 0 -> 59576 bytes
 geode-docs/images/FuncExecOnServers.png         |  Bin 0 -> 57470 bytes
 geode-docs/images/Gemcached.png                 |  Bin 0 -> 87366 bytes
 geode-docs/images/JConsole.png                  |  Bin 0 -> 64272 bytes
 geode-docs/images/MultiSite-4.gif               |  Bin 0 -> 4991 bytes
 .../images/MultisiteConcurrency_WAN_Gateway.png |  Bin 0 -> 103701 bytes
 geode-docs/images/SQLite_Persistence_Mgr.png    |  Bin 0 -> 58388 bytes
 geode-docs/images/Transaction-simple.png        |  Bin 0 -> 119831 bytes
 geode-docs/images/consistent_multisite.png      |  Bin 0 -> 39442 bytes
 geode-docs/images/diskStores-1.gif              |  Bin 0 -> 7292 bytes
 geode-docs/images/diskStores-3.gif              |  Bin 0 -> 5898 bytes
 geode-docs/images/jconsole_mbeans.png           |  Bin 0 -> 211394 bytes
 geode-docs/images/jvisualvm.png                 |  Bin 0 -> 90153 bytes
 geode-docs/images/logging-1.gif                 |  Bin 0 -> 2778 bytes
 geode-docs/images/member_view_list.png          |  Bin 0 -> 107811 bytes
 .../images/multisite-topology-avoid-3.png       |  Bin 0 -> 9794 bytes
 .../images/multisite-topology-hybrid-1.png      |  Bin 0 -> 7627 bytes
 .../images/multisite-topology-hybrid-2.png      |  Bin 0 -> 7631 bytes
 .../images/multisite-topology-parallel.png      |  Bin 0 -> 7838 bytes
 geode-docs/images/multisite-topology-serial.png |  Bin 0 -> 6886 bytes
 geode-docs/images/parallel_sender.png           |  Bin 0 -> 36089 bytes
 geode-docs/images/pulse-data-browser.png        |  Bin 0 -> 99987 bytes
 geode-docs/images/pulse-region-detail.png       |  Bin 0 -> 61149 bytes
 geode-docs/images/pulse_alerts_widget.png       |  Bin 0 -> 137883 bytes
 geode-docs/images/pulse_cluster_view.png        |  Bin 0 -> 154531 bytes
 geode-docs/images/pulse_data_view.png           |  Bin 0 -> 138140 bytes
 geode-docs/images/pulse_locator.png             |  Bin 0 -> 228513 bytes
 geode-docs/images/pulse_member_view.png         |  Bin 0 -> 132309 bytes
 .../images/rest_example_java_packages.png       |  Bin 0 -> 30206 bytes
 geode-docs/images/security-1.gif                |  Bin 0 -> 8343 bytes
 geode-docs/images/security-3.gif                |  Bin 0 -> 8287 bytes
 geode-docs/images/security-4.gif                |  Bin 0 -> 7028 bytes
 geode-docs/images/security-5.gif                |  Bin 0 -> 7408 bytes
 geode-docs/images/serial_sender.png             |  Bin 0 -> 32385 bytes
 geode-docs/images/statistics-1.gif              |  Bin 0 -> 8644 bytes
 geode-docs/images/swagger_home.png              |  Bin 0 -> 187378 bytes
 geode-docs/images/swagger_post_region.png       |  Bin 0 -> 170963 bytes
 .../images/swagger_post_region_response.png     |  Bin 0 -> 176783 bytes
 geode-docs/images/swagger_v1.png                |  Bin 0 -> 149806 bytes
 geode-docs/images/swagger_v1_response.png       |  Bin 0 -> 143134 bytes
 geode-docs/images/transactions-client-1.png     |  Bin 0 -> 113816 bytes
 geode-docs/images/transactions_jca_adapter.png  |  Bin 0 -> 104775 bytes
 geode-docs/images/transactions_jta.png          |  Bin 0 -> 104780 bytes
 .../images/transactions_jta_app_server.png      |  Bin 0 -> 91885 bytes
 geode-docs/images_svg/JMX_Architecture.svg      |    3 +
 geode-docs/images_svg/MBeans.svg                |    3 +
 .../async_system_queue_conflation.svg           |    3 +
 geode-docs/images_svg/cache_data_loader.svg     |    3 +
 geode-docs/images_svg/cache_data_loader_2.svg   |    3 +
 .../images_svg/client_server_deployment.svg     |    3 +
 .../images_svg/client_server_event_dist.svg     |    3 +
 .../client_server_message_tracking.svg          |    3 +
 geode-docs/images_svg/cluster-group-config.svg  |    3 +
 .../images_svg/cluster_config_overview.svg      |    3 +
 .../colocated_partitioned_regions.svg           |    3 +
 geode-docs/images_svg/cs_connection_pool.svg    |    3 +
 geode-docs/images_svg/cs_locator_discovery.svg  |    3 +
 geode-docs/images_svg/cs_subscriptions.svg      |    3 +
 geode-docs/images_svg/cs_topology.svg           |    3 +
 geode-docs/images_svg/custom_partitioned.svg    |    3 +
 geode-docs/images_svg/developing_overflow.svg   |    3 +
 .../images_svg/developing_persistence.svg       |    3 +
 .../developing_persistence_and_overflow.svg     |    3 +
 geode-docs/images_svg/distributed_how_1.svg     |    3 +
 geode-docs/images_svg/distributed_how_2.svg     |    3 +
 geode-docs/images_svg/distributed_how_3.svg     |    3 +
 geode-docs/images_svg/distributed_preload.svg   |    3 +
 geode-docs/images_svg/distributed_replica.svg   |    3 +
 .../images_svg/distributed_replica_preload.svg  |    3 +
 geode-docs/images_svg/expiration.svg            |    3 +
 .../images_svg/how_partitioning_works_1.svg     |    3 +
 .../images_svg/how_partitioning_works_2.svg     |    3 +
 .../images_svg/http_module_cs_with_locator.svg  |    3 +
 .../images_svg/http_module_p2p_with_locator.svg |    3 +
 geode-docs/images_svg/locator_discovery.svg     |    3 +
 geode-docs/images_svg/member_severe_alert.svg   |    3 +
 .../images_svg/network_partition_scenario.svg   |    3 +
 geode-docs/images_svg/p2p_topology.svg          |    3 +
 geode-docs/images_svg/partitioned_data_HA.svg   |    3 +
 .../images_svg/partitioned_data_buckets_1.svg   |    3 +
 .../images_svg/partitioned_data_buckets_2.svg   |    3 +
 .../images_svg/region_entry_versions_1.svg      |    3 +
 .../images_svg/region_entry_versions_2.svg      |    3 +
 .../images_svg/region_entry_versions_3.svg      |    3 +
 .../images_svg/server_client_event_dist.svg     |    3 +
 geode-docs/images_svg/server_discovery.svg      |    3 +
 geode-docs/images_svg/server_grouping.svg       |    3 +
 .../images_svg/transactions_partitioned_1.svg   |    3 +
 .../images_svg/transactions_partitioned_2.svg   |    3 +
 .../images_svg/transactions_replicate_1.svg     |    3 +
 .../images_svg/transactions_replicate_2.svg     |    3 +
 .../images_svg/transactions_replicate_3.svg     |    3 +
 .../images_svg/transactions_replicate_4.svg     |    3 +
 .../transactions_replicate_local_1.svg          |    3 +
 .../transactions_replicate_no_ack_1.svg         |    3 +
 .../transactions_replicate_no_ack_2.svg         |    3 +
 .../images_svg/tune_cs_event_messaging.svg      |    3 +
 .../unbalanced_network_capacity_probs.svg       |    3 +
 .../autoreconnect/member-reconnect.html.md.erb  |   59 +
 geode-docs/managing/book_intro.html.md.erb      |   69 +
 .../chapter_overview.html.md.erb                |   51 +
 .../exporting_a_snapshot.html.md.erb            |   74 +
 .../filtering_snapshot_entries.html.md.erb      |   46 +
 .../importing_a_snapshot.html.md.erb            |   81 +
 .../read_snapshots_programmatically.html.md.erb |   43 +
 ...using_cache_and_region_snapshots.html.md.erb |   40 +
 .../backup_restore_disk_store.html.md.erb       |  189 +
 .../disk_storage/chapter_overview.html.md.erb   |   56 +
 .../compacting_disk_stores.html.md.erb          |  133 +
 .../disk_free_space_monitoring.html.md.erb      |   57 +
 .../disk_store_configuration_params.html.md.erb |  123 +
 .../file_names_and_extensions.html.md.erb       |   96 +
 .../handling_missing_disk_stores.html.md.erb    |   72 +
 .../how_disk_stores_work.html.md.erb            |   60 +
 ...eping_offline_disk_store_in_sync.html.md.erb |   65 +
 .../managing_disk_buffer_flushes.html.md.erb    |   44 +
 .../managing_disk_stores.html.md.erb            |   42 +
 .../managing_disk_stores_cmds.html.md.erb       |   62 +
 .../disk_storage/operation_logs.html.md.erb     |   69 +
 ...ize_availability_and_performance.html.md.erb |   32 +
 .../overview_using_disk_stores.html.md.erb      |   36 +
 ...starting_system_with_disk_stores.html.md.erb |  128 +
 .../disk_storage/using_disk_stores.html.md.erb  |  216 ++
 .../using_the_default_disk_store.html.md.erb    |   70 +
 .../validating_disk_store.html.md.erb           |   37 +
 .../heap_use/heap_management.html.md.erb        |  225 ++
 .../managing/heap_use/lock_memory.html.md.erb   |   52 +
 .../heap_use/off_heap_management.html.md.erb    |  209 ++
 .../logging/configuring_log4j2.html.md.erb      |   68 +
 .../logging/how_logging_works.html.md.erb       |   39 +
 .../logging/log_collection_utility.html.md.erb  |   71 +
 geode-docs/managing/logging/logging.html.md.erb |   48 +
 .../logging/logging_categories.html.md.erb      |  247 ++
 .../logging/logging_whats_next.html.md.erb      |   56 +
 .../logging/setting_up_logging.html.md.erb      |   76 +
 .../configuring_rmi_connector.html.md.erb       |   34 +
 .../gfsh_and_management_api.html.md.erb         |   69 +
 .../management/jmx_manager_node.html.md.erb     |   37 +
 .../jmx_manager_operations.html.md.erb          |  212 ++
 .../list_of_mbean_notifications.html.md.erb     |   82 +
 .../management/list_of_mbeans.html.md.erb       |   38 +
 .../management/list_of_mbeans_full.html.md.erb  |  227 ++
 .../management_and_monitoring.html.md.erb       |   52 +
 ...nagement_and_monitoring_features.html.md.erb |   41 +
 .../management_system_overview.html.md.erb      |  112 +
 .../management/mbean_architecture.html.md.erb   |   76 +
 .../management/mbean_notifications.html.md.erb  |   34 +
 .../management/mbeans_jconsole.html.md.erb      |   53 +
 .../managing/management/mm_overview.html.md.erb |   94 +
 ...tification_federation_and_alerts.html.md.erb |   54 +
 .../management/programming_example.html.md.erb  |  237 ++
 .../monitor_tune/cache_consistency.html.md.erb  |   80 +
 .../monitor_tune/chapter_overview.html.md.erb   |   60 +
 .../gemfire_performance_on_vsphere.html.md.erb  |   64 +
 ...erformance_on_vsphere_guidelines.html.md.erb |  136 +
 .../multicast_communication.html.md.erb         |   46 +
 ...ication_configuring_speed_limits.html.md.erb |   51 +
 ...unication_provisioning_bandwidth.html.md.erb |   60 +
 ...unication_runtime_considerations.html.md.erb |   47 +
 ...n_testing_multicast_speed_limits.html.md.erb |  145 +
 ...st_communication_troubleshooting.html.md.erb |   38 +
 .../performance_controls.html.md.erb            |   46 +
 ..._controls_controlling_socket_use.html.md.erb |   51 +
 ...ance_controls_data_serialization.html.md.erb |   26 +
 ...e_controls_increasing_cache_hits.html.md.erb |   28 +
 ...controls_managing_slow_receivers.html.md.erb |   73 +
 ..._controls_setting_cache_timeouts.html.md.erb |   41 +
 .../monitor_tune/slow_messages.html.md.erb      |   38 +
 .../monitor_tune/slow_receivers.html.md.erb     |   34 +
 .../slow_receivers_managing.html.md.erb         |  116 +
 ...ow_receivers_preventing_problems.html.md.erb |   45 +
 .../socket_communication.html.md.erb            |   48 +
 ...cation_ephemeral_tcp_port_limits.html.md.erb |   58 +
 ...ommunication_have_enough_sockets.html.md.erb |  185 +
 ...tion_setting_socket_buffer_sizes.html.md.erb |  144 +
 ...ion_tcpip_p2p_handshake_timeouts.html.md.erb |   38 +
 .../socket_tcp_keepalive.html.md.erb            |   31 +
 .../sockets_and_gateways.html.md.erb            |  122 +
 .../system_member_performance.html.md.erb       |   42 +
 ...mance_connection_thread_settings.html.md.erb |   32 +
 ...rmance_distributed_system_member.html.md.erb |   28 +
 ...ystem_member_performance_garbage.html.md.erb |   53 +
 ...ber_performance_jvm_mem_settings.html.md.erb |   78 +
 .../monitor_tune/udp_communication.html.md.erb  |   50 +
 .../chapter_overview.html.md.erb                |   48 +
 .../failure_detection.html.md.erb               |   62 +
 .../handling_network_partitioning.html.md.erb   |   63 +
 ...rk_partitioning_management_works.html.md.erb |   59 +
 ...ators_lead_members_and_weighting.html.md.erb |   79 +
 .../network_partitioning_scenarios.html.md.erb  |   53 +
 .../preventing_network_partitions.html.md.erb   |   28 +
 .../region_compression.html.md.erb              |  226 ++
 .../authentication_examples.html.md.erb         |   70 +
 .../authentication_overview.html.md.erb         |   43 +
 .../security/authorization_example.html.md.erb  |   70 +
 .../security/authorization_overview.html.md.erb |   34 +
 .../security/chapter_overview.html.md.erb       |   47 +
 .../security/enable_security.html.md.erb        |   73 +
 .../security/encrypting_passwords.html.md.erb   |   49 +
 .../encrypting_with_diffie_helman.html.md.erb   |   66 +
 .../implementing_authentication.html.md.erb     |  142 +
 .../implementing_authorization.html.md.erb      |  265 ++
 .../security/implementing_security.html.md.erb  |   80 +
 .../security/implementing_ssl.html.md.erb       |  226 ++
 .../security/post_processing.html.md.erb        |   67 +
 .../security/properties_file.html.md.erb        |   34 +
 .../security/security-audit.html.md.erb         |   64 +
 .../security_audit_overview.html.md.erb         |   39 +
 .../security/security_intro.html.md.erb         |   38 +
 .../managing/security/ssl_example.html.md.erb   |  105 +
 .../managing/security/ssl_overview.html.md.erb  |   45 +
 .../application_defined_statistics.html.md.erb  |   39 +
 .../statistics/chapter_overview.html.md.erb     |   42 +
 .../statistics/how_statistics_work.html.md.erb  |   34 +
 .../setting_up_statistics.html.md.erb           |  151 +
 ...ient_region_and_entry_statistics.html.md.erb |   42 +
 .../statistics/viewing_statistics.html.md.erb   |   24 +
 .../chapter_overview.html.md.erb                |   60 +
 .../diagnosing_system_probs.html.md.erb         |  437 +++
 ...ent_and_recover_disk_full_errors.html.md.erb |   45 +
 ...ducing_troubleshooting_artifacts.html.md.erb |   92 +
 ...ring_conflicting_data_exceptions.html.md.erb |   75 +
 .../recovering_from_app_crashes.html.md.erb     |   32 +
 .../recovering_from_cs_crashes.html.md.erb      |   54 +
 .../recovering_from_machine_crashes.html.md.erb |   62 +
 .../recovering_from_network_outages.html.md.erb |   73 +
 .../recovering_from_p2p_crashes.html.md.erb     |  231 ++
 .../system_failure_and_recovery.html.md.erb     |  283 ++
 geode-docs/prereq_and_install.html.md.erb       |   40 +
 geode-docs/reference/book_intro.html.md.erb     |   48 +
 .../statistics/statistics_list.html.md.erb      | 1293 +++++++
 .../topics/cache-elements-list.html.md.erb      |  185 +
 .../reference/topics/cache_xml.html.md.erb      | 3107 ++++++++++++++++
 .../chapter_overview_cache_xml.html.md.erb      |   47 +
 ...chapter_overview_regionshortcuts.html.md.erb |  107 +
 .../client-cache-elements-list.html.md.erb      |  137 +
 .../reference/topics/client-cache.html.md.erb   | 2683 ++++++++++++++
 .../reference/topics/elements_ref.html.md.erb   |  117 +
 .../topics/gemfire_properties.html.md.erb       |  640 ++++
 .../reference/topics/gfe_cache_xml.html.md.erb  | 3414 ++++++++++++++++++
 .../reference/topics/glossary.html.md.erb       |  618 ++++
 ...handling_exceptions_and_failures.html.md.erb |   32 +
 ...mory_requirements_for_cache_data.html.md.erb |  301 ++
 ...on-ascii_strings_in_config_files.html.md.erb |   43 +
 .../region_shortcuts_reference.html.md.erb      | 1499 ++++++++
 .../topics/region_shortcuts_table.html.md.erb   |  519 +++
 geode-docs/rest_apps/book_intro.html.md.erb     |   59 +
 .../rest_apps/chapter_overview.html.md.erb      |   32 +
 .../rest_apps/delete_all_data.html.md.erb       |   56 +
 .../rest_apps/delete_data_for_key.html.md.erb   |   56 +
 .../delete_data_for_multiple_keys.html.md.erb   |   56 +
 .../rest_apps/delete_named_query.html.md.erb    |   60 +
 .../rest_apps/develop_rest_apps.html.md.erb     |  683 ++++
 .../get_execute_adhoc_query.html.md.erb         |  120 +
 geode-docs/rest_apps/get_functions.html.md.erb  |   67 +
 geode-docs/rest_apps/get_queries.html.md.erb    |   72 +
 .../rest_apps/get_region_data.html.md.erb       |  132 +
 ...et_region_data_for_multiple_keys.html.md.erb |  238 ++
 .../rest_apps/get_region_key_data.html.md.erb   |   87 +
 .../rest_apps/get_region_keys.html.md.erb       |   67 +
 geode-docs/rest_apps/get_regions.html.md.erb    |   95 +
 geode-docs/rest_apps/get_servers.html.md.erb    |   64 +
 .../rest_apps/head_region_size.html.md.erb      |   62 +
 geode-docs/rest_apps/ping_service.html.md.erb   |   54 +
 .../rest_apps/post_create_query.html.md.erb     |  106 +
 .../post_execute_functions.html.md.erb          |  142 +
 .../rest_apps/post_execute_query.html.md.erb    |  172 +
 .../rest_apps/post_if_absent_data.html.md.erb   |  144 +
 .../put_multiple_values_for_keys.html.md.erb    |  103 +
 .../rest_apps/put_replace_data.html.md.erb      |   83 +
 .../rest_apps/put_update_cas_data.html.md.erb   |  215 ++
 .../rest_apps/put_update_data.html.md.erb       |   82 +
 .../rest_apps/put_update_query.html.md.erb      |   85 +
 geode-docs/rest_apps/rest_admin.html.md.erb     |   32 +
 .../rest_apps/rest_api_reference.html.md.erb    |   43 +
 geode-docs/rest_apps/rest_examples.html.md.erb  |  708 ++++
 geode-docs/rest_apps/rest_functions.html.md.erb |   32 +
 geode-docs/rest_apps/rest_prereqs.html.md.erb   |   37 +
 geode-docs/rest_apps/rest_queries.html.md.erb   |   48 +
 geode-docs/rest_apps/rest_regions.html.md.erb   |   82 +
 geode-docs/rest_apps/setup_config.html.md.erb   |  179 +
 .../rest_apps/troubleshooting.html.md.erb       |  169 +
 geode-docs/rest_apps/using_swagger.html.md.erb  |   71 +
 geode-docs/tools_modules/book_intro.html.md.erb |   47 +
 .../gemcached/about_gemcached.html.md.erb       |   46 +
 .../gemcached/advantages.html.md.erb            |   36 +
 .../gemcached/chapter_overview.html.md.erb      |   40 +
 .../gemcached/deploying_gemcached.html.md.erb   |   97 +
 .../tools_modules/gfsh/about_gfsh.html.md.erb   |   42 +
 .../gfsh/cache_xml_2_gfsh.html.md.erb           |  105 +
 .../gfsh/chapter_overview.html.md.erb           |   66 +
 .../gfsh/command-pages/alter.html.md.erb        |  520 +++
 .../gfsh/command-pages/backup.html.md.erb       |   85 +
 .../gfsh/command-pages/change.html.md.erb       |   98 +
 .../gfsh/command-pages/clear.html.md.erb        |   50 +
 .../gfsh/command-pages/close.html.md.erb        |  141 +
 .../gfsh/command-pages/compact.html.md.erb      |  114 +
 .../gfsh/command-pages/configure.html.md.erb    |   85 +
 .../gfsh/command-pages/connect.html.md.erb      |  172 +
 .../gfsh/command-pages/create.html.md.erb       |  954 +++++
 .../gfsh/command-pages/debug.html.md.erb        |   54 +
 .../gfsh/command-pages/define.html.md.erb       |   68 +
 .../gfsh/command-pages/deploy.html.md.erb       |   78 +
 .../gfsh/command-pages/describe.html.md.erb     |  409 +++
 .../gfsh/command-pages/destroy.html.md.erb      |  178 +
 .../gfsh/command-pages/disconnect.html.md.erb   |   56 +
 .../gfsh/command-pages/echo.html.md.erb         |   66 +
 .../gfsh/command-pages/encrypt.html.md.erb      |   57 +
 .../gfsh/command-pages/execute.html.md.erb      |   59 +
 .../gfsh/command-pages/exit.html.md.erb         |   40 +
 .../gfsh/command-pages/export.html.md.erb       |  271 ++
 .../gfsh/command-pages/gc.html.md.erb           |   58 +
 .../gfsh/command-pages/get.html.md.erb          |   67 +
 .../gfsh/command-pages/help.html.md.erb         |   77 +
 .../gfsh/command-pages/hint.html.md.erb         |   78 +
 .../gfsh/command-pages/history.html.md.erb      |   59 +
 .../gfsh/command-pages/import.html.md.erb       |  100 +
 .../gfsh/command-pages/list.html.md.erb         |  474 +++
 .../gfsh/command-pages/load-balance.html.md.erb |   64 +
 .../gfsh/command-pages/locate.html.md.erb       |   72 +
 .../gfsh/command-pages/netstat.html.md.erb      |  139 +
 .../gfsh/command-pages/pause.html.md.erb        |   51 +
 .../gfsh/command-pages/pdx.html.md.erb          |   90 +
 .../gfsh/command-pages/put.html.md.erb          |   78 +
 .../gfsh/command-pages/query.html.md.erb        |   69 +
 .../gfsh/command-pages/rebalance.html.md.erb    |   73 +
 .../gfsh/command-pages/remove.html.md.erb       |   63 +
 .../gfsh/command-pages/resume.html.md.erb       |   51 +
 .../gfsh/command-pages/revoke.html.md.erb       |   65 +
 .../gfsh/command-pages/run.html.md.erb          |  105 +
 .../gfsh/command-pages/set.html.md.erb          |   70 +
 .../gfsh/command-pages/sh.html.md.erb           |   63 +
 .../gfsh/command-pages/show.html.md.erb         |  302 ++
 .../gfsh/command-pages/shutdown.html.md.erb     |   62 +
 .../gfsh/command-pages/sleep.html.md.erb        |   57 +
 .../gfsh/command-pages/start.html.md.erb        |  776 ++++
 .../gfsh/command-pages/status.html.md.erb       |  298 ++
 .../gfsh/command-pages/stop.html.md.erb         |  227 ++
 .../gfsh/command-pages/undeploy.html.md.erb     |   77 +
 .../gfsh/command-pages/validate.html.md.erb     |   48 +
 .../gfsh/command-pages/version.html.md.erb      |   60 +
 .../gfsh/command_scripting.html.md.erb          |   37 +
 .../gfsh/configuring_gfsh.html.md.erb           |  129 +
 .../gfsh/getting_started_gfsh.html.md.erb       |  156 +
 .../gfsh/gfsh_command_index.html.md.erb         |  224 ++
 .../gfsh/gfsh_quick_reference.html.md.erb       |   58 +
 .../gfsh/os_command_line_execution.html.md.erb  |   50 +
 .../gfsh/quick_ref_commands_by_area.html.md.erb |  318 ++
 .../gfsh/starting_gfsh.html.md.erb              |   75 +
 .../tools_modules/gfsh/tour_of_gfsh.html.md.erb |  457 +++
 .../useful_gfsh_shell_variables.html.md.erb     |   72 +
 .../chapter_overview.html.md.erb                |   60 +
 .../common_gemfire_topologies.html.md.erb       |   36 +
 .../http_why_use_gemfire.html.md.erb            |   56 +
 .../interactive_mode_ref.html.md.erb            |  142 +
 .../http_session_mgmt/quick_start.html.md.erb   |  118 +
 .../session_mgmt_tcserver.html.md.erb           |   38 +
 .../session_mgmt_tomcat.html.md.erb             |   38 +
 .../session_mgmt_weblogic.html.md.erb           |   34 +
 .../session_state_log_files.html.md.erb         |  111 +
 .../tc_additional_info.html.md.erb              |   52 +
 .../tc_changing_gf_default_cfg.html.md.erb      |   98 +
 .../tc_installing_the_module.html.md.erb        |   38 +
 .../tc_setting_up_the_module.html.md.erb        |  139 +
 .../tomcat_changing_gf_default_cfg.html.md.erb  |  170 +
 .../tomcat_installing_the_module.html.md.erb    |   38 +
 .../tomcat_setting_up_the_module.html.md.erb    |  120 +
 ...weblogic_changing_gf_default_cfg.html.md.erb |  179 +
 ...gic_common_configuration_changes.html.md.erb |   52 +
 .../weblogic_setting_up_the_module.html.md.erb  |  213 ++
 .../pulse/chapter_overview.html.md.erb          |   49 +
 .../tools_modules/pulse/quickstart.html.md.erb  |  827 +++++
 .../pulse/system_requirements.html.md.erb       |   35 +
 .../tools_modules/redis_adapter.html.md.erb     |   90 +
 .../topologies_and_comm/book_intro.html.md.erb  |   42 +
 .../chapter_overview.html.md.erb                |   52 +
 ...nt_server_example_configurations.html.md.erb |  164 +
 .../client_server_whats_next.html.md.erb        |   56 +
 ...gure_servers_into_logical_groups.html.md.erb |   54 +
 ...etting_up_a_client_server_system.html.md.erb |   87 +
 ...tandard_client_server_deployment.html.md.erb |   35 +
 .../chapter_overview.html.md.erb                |   44 +
 .../multisite_topologies.html.md.erb            |   67 +
 .../setting_up_a_multisite_system.html.md.erb   |  381 ++
 .../chapter_overview.html.md.erb                |   36 +
 .../configuring_peer_member_groups.html.md.erb  |   60 +
 .../setting_up_a_p2p_system.html.md.erb         |   42 +
 .../setting_up_peer_communication.html.md.erb   |   64 +
 .../topology_concepts/IPv4_and_IPv6.html.md.erb |   49 +
 .../chapter_overview.html.md.erb                |   48 +
 .../how_communication_works.html.md.erb         |   62 +
 .../how_member_discovery_works.html.md.erb      |   60 +
 .../how_multisite_systems_work.html.md.erb      |   44 +
 .../how_server_discovery_works.html.md.erb      |   77 +
 ...how_the_pool_manages_connections.html.md.erb |   78 +
 .../member_communication.html.md.erb            |   46 +
 .../multisite_overview.html.md.erb              |  122 +
 .../topology_types.html.md.erb                  |   48 +
 .../using_bind_addresses.html.md.erb            |  112 +
 gradle.properties                               |    2 +-
 gradle/rat.gradle                               |    8 +-
 gradle/sonar.gradle                             |    6 -
 settings.gradle                                 |    1 -
 711 files changed, 69036 insertions(+), 3687 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/40c19179/gradle.properties
----------------------------------------------------------------------



[44/50] [abbrv] incubator-geode git commit: Convert from ManagementTestCase to ManagementTestRule

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/24f496df/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
index f042f2c..71359be 100644
--- a/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
@@ -14,22 +14,46 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.geode.management;
 
-import static org.junit.Assert.*;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.geode.cache.Region.*;
+import static org.apache.geode.test.dunit.Host.*;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.assertj.core.api.Assertions.*;
 
+import java.lang.management.ManagementFactory;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
 
-import javax.management.InstanceNotFoundException;
-import javax.management.MBeanServer;
-import javax.management.MalformedObjectNameException;
 import javax.management.Notification;
 import javax.management.NotificationListener;
 import javax.management.ObjectName;
 
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -43,757 +67,505 @@ import org.apache.geode.cache.PartitionAttributes;
 import org.apache.geode.cache.PartitionAttributesFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.cache.Scope;
 import org.apache.geode.cache.query.data.Portfolio;
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.internal.cache.AbstractRegion;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.cache.LocalRegion;
 import org.apache.geode.internal.cache.TestObjectSizerImpl;
 import org.apache.geode.internal.cache.lru.LRUStatistics;
 import org.apache.geode.internal.cache.partitioned.fixed.SingleHopQuarterPartitionResolver;
 import org.apache.geode.management.internal.MBeanJMXAdapter;
 import org.apache.geode.management.internal.SystemManagementService;
-import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.FlakyTest;
 
 /**
  * This class checks and verifies various data and operations exposed through
  * RegionMXBean interface.
- * 
+ * <p>
  * Goal of the Test : RegionMBean gets created once region is created. Data like
  * Region Attributes data and stats are of proper value
- * 
- * 
  */
 @Category(DistributedTest.class)
+@SuppressWarnings({ "serial", "unused" })
 public class RegionManagementDUnitTest extends ManagementTestBase {
 
-  private static final long serialVersionUID = 1L;
-
-  private final String VERIFY_CONFIG_METHOD = "verifyConfigData";
+  private static final String REGION_NAME = "MANAGEMENT_TEST_REGION";
+  private static final String PARTITIONED_REGION_NAME = "MANAGEMENT_PAR_REGION";
+  private static final String FIXED_PR_NAME = "MANAGEMENT_FIXED_PR";
+  private static final String LOCAL_REGION_NAME = "TEST_LOCAL_REGION";
+  private static final String LOCAL_SUB_REGION_NAME = "TEST_LOCAL_SUB_REGION";
 
-  private final String VERIFY_REMOTE_CONFIG_METHOD = "verifyConfigDataRemote";
+  private static final String REGION_PATH = SEPARATOR + REGION_NAME;
+  private static final String PARTITIONED_REGION_PATH = SEPARATOR + PARTITIONED_REGION_NAME;
+  private static final String FIXED_PR_PATH = SEPARATOR + FIXED_PR_NAME;
+  private static final String LOCAL_SUB_REGION_PATH = SEPARATOR + LOCAL_REGION_NAME + SEPARATOR + LOCAL_SUB_REGION_NAME;
 
-  static final String REGION_NAME = "MANAGEMENT_TEST_REGION";
+  // field used in manager VM
+  private static Region fixedPartitionedRegion;
 
-  static final String PARTITIONED_REGION_NAME = "MANAGEMENT_PAR_REGION";
+  private static final AtomicReference<List<Notification>> MEMBER_NOTIFICATIONS_REF = new AtomicReference<>();
+  private static final AtomicReference<List<Notification>> SYSTEM_NOTIFICATIONS_REF = new AtomicReference<>();
 
-  static final String FIXED_PR_NAME = "MANAGEMENT_FIXED_PR";
-  
-  static final String REGION_PATH = "/MANAGEMENT_TEST_REGION";
+  @Manager
+  private VM managerVM;
 
-  static final String PARTITIONED_REGION_PATH = "/MANAGEMENT_PAR_REGION";
+  @Member
+  private VM[] memberVMs;
 
-  static final String FIXED_PR_PATH = "/MANAGEMENT_FIXED_PR";
-  
-  static final String LOCAL_REGION_NAME = "TEST_LOCAL_REGION";
-  static final String LOCAL_SUB_REGION_NAME = "TEST_LOCAL_SUB_REGION";
-  static final String LOCAL_REGION_PATH = "/TEST_LOCAL_REGION";
-  static final String LOCAL_SUB_REGION_PATH = "/TEST_LOCAL_REGION/TEST_LOCAL_SUB_REGION";
-  
-  private static final int MAX_WAIT = 70 * 1000;
+  @Before
+  public void before() throws Exception {
+    this.managerVM = getHost(0).getVM(0);
 
-  protected static final Region DiskRegion = null;
+    this.memberVMs = new VM[3];
+    this.memberVMs[0] = getHost(0).getVM(1);
+    this.memberVMs[1] = getHost(0).getVM(2);
+    this.memberVMs[2] = getHost(0).getVM(3);
+  }
 
-  static List<FixedPartitionAttributes> fpaList = new ArrayList<FixedPartitionAttributes>();
+  @After
+  public void after() throws Exception {
+    invokeInEveryVM(() -> MEMBER_NOTIFICATIONS_REF.set(null));
+    invokeInEveryVM(() -> SYSTEM_NOTIFICATIONS_REF.set(null));
+    disconnectAllFromDS_tmp();
+  }
 
+  private void createMembersAndThenManagers_tmp() throws Exception {
+    initManagement(false);
+  }
 
-  private static Region fixedPrRegion;
+  private void createManagersAndThenMembers_tmp() throws Exception {
+    initManagement(true);
+  }
 
+  private void disconnectAllFromDS_tmp() {
+    disconnectAllFromDS();
+  }
 
-  public RegionManagementDUnitTest() {
-    super();
+  private ManagementService getManagementService_tmp() {
+    return getManagementService();
+  }
 
+  private Cache getCache_tmp() {
+    return getCache();
   }
 
   /**
    * Tests all Region MBean related Management APIs
-   * 
+   * <p>
    * a) Notification propagated to member MBean while a region is created
-   * 
+   * <p>
    * b) Creates and check a Distributed Region
-   * 
-   * 
-   * @throws Exception
    */
-
-  @Category(FlakyTest.class) // GEODE-1538
   @Test
   public void testDistributedRegion() throws Exception {
+    createMembersAndThenManagers_tmp();
 
-    initManagement(false);
-
-
-    VM[] managedNodes = new VM[getManagedNodeList()
-        .size()];
-
-    getManagedNodeList().toArray(managedNodes);
-    // Adding notif listener for remote cache members
-    addMemberListener(managingNode);
+    // Adding notification listener for remote cache memberVMs
+    addMemberNotificationListener(this.managerVM, 3); // TODO: why?
 
-    for (int j = 0; j < managedNodes.length; j++) {
-
-      VM vm = managedNodes[j];
+    for (VM memberVM : this.memberVMs) {
+      createDistributedRegion_tmp(memberVM, REGION_NAME);
+      verifyReplicateRegionAfterCreate(memberVM);
+    }
 
-      createDistributedRegion(vm, REGION_NAME);
-      validateReplicateRegionAfterCreate(vm);
+    verifyRemoteDistributedRegion(this.managerVM, 3);
 
+    for (VM memberVM : this.memberVMs) {
+      closeRegion(memberVM, REGION_PATH);
+      verifyReplicatedRegionAfterClose(memberVM);
     }
 
-    verifyRemoteDistributedRegion(managingNode, 3);
+    verifyProxyCleanup(this.managerVM);
 
-
-    for (VM vm : getManagedNodeList()) {
-      closeRegion(vm, REGION_PATH);
-      validateReplicatedRegionAfterClose(vm);
-    }
-    
-    ensureProxyCleanup(managingNode);
+    verifyMemberNotifications(this.managerVM, REGION_NAME, 3);
   }
-  
+
   /**
    * Tests all Region MBean related Management APIs
-   * 
+   * <p>
    * a) Notification propagated to member MBean while a region is created
-   * 
+   * <p>
    * b) Created and check a Partitioned Region
-   * 
-   * @throws Exception
    */
   @Test
   public void testPartitionedRegion() throws Exception {
-    initManagement(false);
-
-    VM managingNode = getManagingNode();
-
-    VM[] managedNodes = new VM[getManagedNodeList()
-        .size()];
-
-    getManagedNodeList().toArray(managedNodes);
-    // Adding notif listener for remote cache members
+    createMembersAndThenManagers_tmp();
 
-    addMemberListener(managingNode);
+    // Adding notification listener for remote cache memberVMs
+    addMemberNotificationListener(this.managerVM, 3); // TODO: why?
 
-    for (int j = 0; j < managedNodes.length; j++) {
-
-      VM vm = managedNodes[j];
-      createPartitionRegion(vm, PARTITIONED_REGION_NAME);
-      validatePartitionRegionAfterCreate(vm);
+    for (VM memberVM : this.memberVMs) {
+      createPartitionRegion_tmp(memberVM, PARTITIONED_REGION_NAME);
+      verifyPartitionRegionAfterCreate(memberVM);
     }
-    
-
-    validateRemotePartitionRegion(managingNode);
 
-    for (VM vm : getManagedNodeList()) {
+    verifyRemotePartitionRegion(this.managerVM);
 
-      closeRegion(vm, PARTITIONED_REGION_PATH);
-      validatePartitionRegionAfterClose(vm);
+    for (VM memberVM : this.memberVMs) {
+      closeRegion(memberVM, PARTITIONED_REGION_PATH);
+      verifyPartitionRegionAfterClose(memberVM);
     }
+
+    verifyMemberNotifications(this.managerVM, PARTITIONED_REGION_NAME, 3);
   }
-  
+
   /**
    * Tests all Region MBean related Management APIs
-   * 
+   * <p>
    * a) Notification propagated to member MBean while a region is created
-   * 
+   * <p>
    * b) Creates and check a Fixed Partitioned Region
-   * 
-   * @throws Exception
    */
   @Test
   public void testFixedPRRegionMBean() throws Exception {
+    createMembersAndThenManagers_tmp();
 
-    initManagement(false);
-
-    VM managingNode = getManagingNode();
-
-    VM[] managedNodes = new VM[getManagedNodeList()
-        .size()];
-
-    getManagedNodeList().toArray(managedNodes);
-    // Adding notif listener for remote cache members
-    addMemberListener(managingNode);
+    // Adding notification listener for remote cache memberVMs
+    addMemberNotificationListener(this.managerVM, 3); // TODO: why?
 
-    for (int j = 0; j < managedNodes.length; j++) {
-
-      VM vm = managedNodes[j];
+    int primaryIndex = 0;
+    for (VM memberVM : this.memberVMs) {
+      List<FixedPartitionAttributes> fixedPartitionAttributesList = createFixedPartitionList(primaryIndex + 1);
+      memberVM.invoke(() -> createFixedPartitionRegion(fixedPartitionAttributesList));
+      primaryIndex++;
+    }
 
-      createFixedPartitionList(j + 1);
-      Object[] args = new Object[1];
-      args[0] = fpaList;
-      vm.invoke(RegionManagementDUnitTest.class, "createFixedPartitionRegion",
-          args);
+//    // TODO: Workaround for bug 46683. Reenable validation when bug is fixed.
+    verifyRemoteFixedPartitionRegion(this.managerVM);
 
+    for (VM memberVM : this.memberVMs) {
+      closeRegion(memberVM, FIXED_PR_PATH);
     }
-    // Workaround for bug 46683. Renable validation when bug is fixed.
-    validateRemoteFixedPartitionRegion(managingNode);
 
-    for (VM vm : getManagedNodeList()) {
-      closeFixedPartitionRegion(vm);
-    }
+    verifyMemberNotifications(this.managerVM, FIXED_PR_PATH, 3);
   }
 
   /**
    * Tests a Distributed Region at Managing Node side
    * while region is created in a member node asynchronously.
-   * @throws Exception
    */
   @Test
-  public void testRegionAggregate() throws Exception{
-    initManagement(true);
+  public void testRegionAggregate() throws Exception {
+    createManagersAndThenMembers_tmp();
 
-    VM managingNode = getManagingNode();
-
-    VM[] managedNodes = new VM[getManagedNodeList()
-        .size()];
-
-    getManagedNodeList().toArray(managedNodes);
-    // Adding notif listener for remote cache members
-    addDistrListener(managingNode);
-
-
-    for (int j = 0; j < managedNodes.length; j++) {
-
-      VM vm = managedNodes[j];
-
-      createDistributedRegion(vm, REGION_NAME);
+    // Adding notification listener for remote cache memberVMs
+    addSystemNotificationListener(this.managerVM); // TODO: why?
 
+    for (VM memberVM : this.memberVMs) {
+      createDistributedRegion_tmp(memberVM, REGION_NAME);
     }
 
-    
-    validateDistributedMBean(managingNode, 3);
-    
-    createDistributedRegion(managingNode, REGION_NAME);
-    validateDistributedMBean(managingNode, 4);
-    
-
-
-    for (int j = 0; j < managedNodes.length; j++) {
+    verifyDistributedMBean(this.managerVM, 3);
+    createDistributedRegion_tmp(this.managerVM, REGION_NAME);
+    verifyDistributedMBean(this.managerVM, 4);
 
-      VM vm = managedNodes[j];
+    for (VM memberVM : this.memberVMs) {
+      closeRegion(memberVM, REGION_PATH);
+    }
 
-      closeRegion(vm, REGION_PATH);
+    verifyProxyCleanup(this.managerVM);
 
-    }
-    ensureProxyCleanup(managingNode);
-    
-    validateDistributedMBean(managingNode, 1);
-    
-    closeRegion(managingNode, REGION_PATH);
-    validateDistributedMBean(managingNode, 0);
-    
+    verifyDistributedMBean(this.managerVM, 1);
+    closeRegion(this.managerVM, REGION_PATH);
+    verifyDistributedMBean(this.managerVM, 0);
 
+    verifySystemNotifications(this.managerVM, REGION_NAME, 3);
   }
-  
+
   @Test
   public void testNavigationAPIS() throws Exception {
-    initManagement(true);
-    for(VM vm : managedNodeList){
-      createDistributedRegion(vm, REGION_NAME);
-      createPartitionRegion(vm, PARTITIONED_REGION_NAME);
-    }
-    createDistributedRegion(managingNode, REGION_NAME);
-    createPartitionRegion(managingNode, PARTITIONED_REGION_NAME);
-    List<String> memberIds = new ArrayList<String>();
-    
-    for(VM vm : managedNodeList){
-      memberIds.add(getMemberId(vm));
+    createManagersAndThenMembers_tmp();
+
+    for (VM memberVM : this.memberVMs) {
+      createDistributedRegion_tmp(memberVM, REGION_NAME);
+      createPartitionRegion_tmp(memberVM, PARTITIONED_REGION_NAME);
     }
-    checkNavigationAPIS(managingNode, memberIds);
-    
 
+    createDistributedRegion_tmp(this.managerVM, REGION_NAME);
+    createPartitionRegion_tmp(this.managerVM, PARTITIONED_REGION_NAME);
+    List<String> memberIds = new ArrayList<>();
 
-    for(VM vm : managedNodeList){
-      closeRegion(vm, REGION_PATH);
+    for (VM memberVM : this.memberVMs) {
+      memberIds.add(getDistributedMemberId_tmp(memberVM));
     }
- 
-    closeRegion(managingNode, REGION_PATH);
 
-  }
+    verifyNavigationApis(this.managerVM, memberIds);
 
- 
-  
-  @Test
-  public void testSubRegions() throws Exception{
-    initManagement(false);
-    for (VM vm : managedNodeList) {
-      createLocalRegion(vm, LOCAL_REGION_NAME);
-      createSubRegion(vm, LOCAL_REGION_NAME, LOCAL_SUB_REGION_NAME);
-    }
-    
-    for (VM vm : managedNodeList) {
-      checkSubRegions(vm, LOCAL_SUB_REGION_PATH);
-    }
-    
-    for (VM vm : managedNodeList) {
-      closeRegion(vm, LOCAL_REGION_NAME);
-      checkNullRegions(vm, LOCAL_SUB_REGION_NAME);
+    for (VM memberVM : this.memberVMs) {
+      closeRegion(memberVM, REGION_PATH);
     }
-    
+    closeRegion(this.managerVM, REGION_PATH);
   }
-  
-  
-  
-  
+
   @Test
-  public void testSpecialRegions() throws Exception{
-    initManagement(false);
-    createSpecialRegion(managedNodeList.get(0));
-    DistributedMember member = getMember(managedNodeList.get(0));
-    checkSpecialRegion(managingNode,member);
-  }
-  
-  
-  public void createSpecialRegion(VM vm1) throws Exception{
-    {
-      vm1.invoke(new SerializableRunnable("Check Sub Regions") {
-
-        public void run() {
-          Cache cache = getCache();
-          AttributesFactory attributesFactory = new AttributesFactory();
-          attributesFactory.setValueConstraint(Portfolio.class);
-          RegionAttributes regionAttributes = attributesFactory.create();
-          
-          cache.createRegion("p-os",regionAttributes);
-          cache.createRegion("p_os",regionAttributes);
-        }
-      });
+  public void testSubRegions() throws Exception {
+    createMembersAndThenManagers_tmp();
 
+    for (VM memberVM : this.memberVMs) {
+      createLocalRegion_tmp(memberVM, LOCAL_REGION_NAME);
+      createSubRegion_tmp(memberVM, LOCAL_REGION_NAME, LOCAL_SUB_REGION_NAME);
     }
-  }
-  
-  public void checkSpecialRegion(VM vm1, final DistributedMember member)
-      throws Exception {
-    {
-      vm1.invoke(new SerializableRunnable("Check Sub Regions") {
-
-        public void run() {
-
-          ManagementService service = getManagementService();
-          
-          try {
-            MBeanUtil.getDistributedRegionMbean("/p-os", 1);
-            MBeanUtil.getDistributedRegionMbean("/p_os", 1);
-
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , DistributedRegionMXBean Should not be null"
-                    + e);
-          }
-
-        }
-      });
 
+    for (VM memberVM : this.memberVMs) {
+      verifySubRegions(memberVM, LOCAL_SUB_REGION_PATH);
     }
 
+    for (VM memberVM : this.memberVMs) {
+      closeRegion(memberVM, LOCAL_REGION_NAME);
+      verifyNullRegions(memberVM, LOCAL_SUB_REGION_NAME);
+    }
   }
-  
+
   @Test
-  public void testLruStats() throws Exception{
-    initManagement(false);
-    for (VM vm : managedNodeList) {
-      createDiskRegion(vm);
+  public void testSpecialRegions() throws Exception {
+    createMembersAndThenManagers_tmp();
+    createSpecialRegion(this.memberVMs[0]);
+    verifySpecialRegion(this.managerVM);
+  }
 
+  @Test
+  public void testLruStats() throws Exception {
+    createMembersAndThenManagers_tmp();
+    for (VM memberVM : this.memberVMs) {
+      createDiskRegion(memberVM);
     }
-    checkEntrySize(managingNode,3);
+    verifyEntrySize(this.managerVM, 3);
   }
-  
-  public void createDiskRegion(VM vm1) throws Exception{
-    {
-      vm1.invoke(new SerializableRunnable("Check Sub Regions") {
-
-        public void run() {
-          AttributesFactory factory = new AttributesFactory();
-          factory.setScope(Scope.LOCAL);
-          factory.setEvictionAttributes(EvictionAttributes
-                .createLRUMemoryAttributes(20, new TestObjectSizerImpl(),
-                    EvictionAction.LOCAL_DESTROY));
-          /*File d = new File("DiskRegions" + OSProcess.getId());
-          d.mkdirs();
-
-          DiskStoreFactory dsf = getCache().createDiskStoreFactory();
-          dsf.setDiskDirs(new File[]{d});
-          factory.setDiskSynchronous(true);
-          DiskStore ds = dsf.create(REGION_NAME);
-          factory.setDiskStoreName(ds.getName());
-*/
-          Region region = getCache().createRegion(REGION_NAME, factory.create());
-
-          LRUStatistics lruStats = getLRUStats(region);
-
-          assertNotNull(lruStats);
-          
-          RegionMXBean bean = managementService.getLocalRegionMBean(REGION_PATH);
-          
-          assertNotNull(bean);
-          
-          int total;
-          for (total = 0; total < 10000; total++) {
-            int[] array = new int[250];
-            array[0] = total;
-            region.put(new Integer(total), array);
-          }
-          assertTrue(bean.getEntrySize() > 0);
-          LogWriterUtils.getLogWriter().info("DEBUG: EntrySize =" + bean.getEntrySize());
-          
 
+  private void closeRegion(final VM anyVM, final String regionPath) {
+    anyVM.invoke("closeRegion", () -> getCache_tmp().getRegion(regionPath).close());
+  }
 
-        }
-      });
+  private void createSpecialRegion(final VM memberVM) throws Exception {
+    memberVM.invoke("createSpecialRegion", () -> {
+      AttributesFactory attributesFactory = new AttributesFactory();
+      attributesFactory.setValueConstraint(Portfolio.class);
+      RegionAttributes regionAttributes = attributesFactory.create();
 
-    }
-    
+      Cache cache = getCache_tmp();
+      cache.createRegion("p-os", regionAttributes);
+      cache.createRegion("p_os", regionAttributes);
+    });
   }
 
-  public void checkEntrySize(VM vm1, final int expectedMembers)
-      throws Exception {
-    {
-      vm1.invoke(new SerializableRunnable("Check Sub Regions") {
+  private void verifySpecialRegion(final VM managerVM) throws Exception {
+    managerVM.invoke("verifySpecialRegion", () -> {
+      awaitDistributedRegionMXBean("/p-os", 1); // TODO: why?
+      awaitDistributedRegionMXBean("/p_os", 1);
+    });
+  }
 
-        public void run() {
+  private void createDiskRegion(final VM memberVM) throws Exception {
+    memberVM.invoke("createDiskRegion", () -> {
+      AttributesFactory factory = new AttributesFactory();
+      factory.setScope(Scope.LOCAL);
+      factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(20, new TestObjectSizerImpl(), EvictionAction.LOCAL_DESTROY));
 
-          DistributedRegionMXBean bean = null;
-          try {
-            bean = MBeanUtil.getDistributedRegionMbean(REGION_PATH,
-                expectedMembers);
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , DistributedRegionMXBean Should not be null"
-                    + e);
-          }
+      Region region = getCache_tmp().createRegion(REGION_NAME, factory.create());
 
-          assertNotNull(bean);
+      LRUStatistics lruStats = ((AbstractRegion) region).getEvictionController().getLRUHelper().getStats();
+      assertThat(lruStats).isNotNull();
 
-          assertTrue(bean.getEntrySize() > 0);
-          LogWriterUtils.getLogWriter().info("DEBUG: EntrySize =" + bean.getEntrySize());
-        }
-      });
+      RegionMXBean regionMXBean = getManagementService_tmp().getLocalRegionMBean(REGION_PATH);
+      assertThat(regionMXBean).isNotNull();
 
-    }
+      int total;
+      for (total = 0; total < 10000; total++) { // TODO: why so many?
+        int[] array = new int[250];
+        array[0] = total;
+        region.put(new Integer(total), array);
+      }
+      assertThat(regionMXBean.getEntrySize()).isGreaterThan(0);
+    });
+  }
 
+  private void verifyEntrySize(final VM managerVM, final int expectedMembers) throws Exception {
+    managerVM.invoke("verifyEntrySize", () -> {
+      DistributedRegionMXBean distributedRegionMXBean = awaitDistributedRegionMXBean(REGION_PATH, expectedMembers);
+      assertThat(distributedRegionMXBean).isNotNull();
+      assertThat(distributedRegionMXBean.getEntrySize()).isGreaterThan(0);
+    });
   }
-  
-  protected LRUStatistics getLRUStats(Region region) {
-    final LocalRegion l = (LocalRegion) region;
-    return l.getEvictionController().getLRUHelper().getStats();
+
+  private void verifySubRegions(final VM memberVM, final String subRegionPath) throws Exception {
+    memberVM.invoke("verifySubRegions", () -> {
+      RegionMXBean regionMXBean = getManagementService_tmp().getLocalRegionMBean(subRegionPath);
+      assertThat(regionMXBean).isNotNull();
+    });
   }
-  
-  @SuppressWarnings("serial")
-  public void checkSubRegions(VM vm1, final String subRegionPath) throws Exception {
-    {
-      vm1.invoke(new SerializableRunnable("Check Sub Regions") {
 
-        public void run() {
+  private void verifyNullRegions(final VM memberVM, final String subRegionPath) throws Exception {
+    memberVM.invoke("verifyNullRegions", () -> {
+      RegionMXBean regionMXBean = getManagementService_tmp().getLocalRegionMBean(subRegionPath);
+      assertThat(regionMXBean).isNull();
+    });
+  }
 
-          RegionMXBean bean = managementService
-              .getLocalRegionMBean(subRegionPath);
-          assertNotNull(bean);
+  private void verifyNavigationApis(final VM managerVM, final List<String> memberIds) {
+    managerVM.invoke("verifyNavigationApis", () -> {
+      ManagementService service = getManagementService_tmp();
+      assertThat(service.getDistributedSystemMXBean()).isNotNull();
 
-        }
-      });
+      awaitMemberCount(4);
 
-    }
-  }
-  
-  @SuppressWarnings("serial")
-  public void checkNullRegions(VM vm1, final String subRegionPath) throws Exception {
-    {
-      vm1.invoke(new SerializableRunnable("Check Sub Regions") {
+      DistributedSystemMXBean distributedSystemMXBean = service.getDistributedSystemMXBean();
+      assertThat(distributedSystemMXBean.listDistributedRegionObjectNames()).hasSize(2);
 
-        public void run() {
+      assertThat(distributedSystemMXBean.fetchDistributedRegionObjectName(PARTITIONED_REGION_PATH)).isNotNull();
+      assertThat(distributedSystemMXBean.fetchDistributedRegionObjectName(REGION_PATH)).isNotNull();
 
-          RegionMXBean bean = managementService
-              .getLocalRegionMBean(subRegionPath);
-          assertNull(bean);
+      ObjectName actualName = distributedSystemMXBean.fetchDistributedRegionObjectName(PARTITIONED_REGION_PATH);
+      ObjectName expectedName = MBeanJMXAdapter.getDistributedRegionMbeanName(PARTITIONED_REGION_PATH);
+      assertThat(actualName).isEqualTo(expectedName);
 
-        }
-      });
+      actualName = distributedSystemMXBean.fetchDistributedRegionObjectName(REGION_PATH);
+      expectedName = MBeanJMXAdapter.getDistributedRegionMbeanName(REGION_PATH);
+      assertThat(actualName).isEqualTo(expectedName);
 
-    }
-  }
+      for (String memberId : memberIds) {
+        ObjectName objectName = MBeanJMXAdapter.getMemberMBeanName(memberId);
+        awaitMemberMXBeanProxy(objectName);
 
-  
-  
-  
-  protected void checkNavigationAPIS(final VM vm,
-      final List<String> managedNodeMemberIds) {
-    SerializableRunnable checkNavigationAPIS = new SerializableRunnable(
-        "checkNavigationAPIS") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        ManagementService service = getManagementService();
-        final DistributedSystemMXBean bean = service
-            .getDistributedSystemMXBean();
-
-        assertNotNull(service.getDistributedSystemMXBean());
-
-        waitForAllMembers(4);
-        assertTrue(bean.listDistributedRegionObjectNames().length == 2);
-        try {
-          assertNotNull(bean
-              .fetchDistributedRegionObjectName(PARTITIONED_REGION_PATH));
-          assertNotNull(bean.fetchDistributedRegionObjectName(REGION_PATH));
-          ObjectName actualName = bean
-              .fetchDistributedRegionObjectName(PARTITIONED_REGION_PATH);
-          ObjectName expectedName = MBeanJMXAdapter
-              .getDistributedRegionMbeanName(PARTITIONED_REGION_PATH);
-          assertEquals(expectedName, actualName);
-
-          actualName = bean.fetchDistributedRegionObjectName(REGION_PATH);
-          expectedName = MBeanJMXAdapter
-              .getDistributedRegionMbeanName(REGION_PATH);
-          assertEquals(expectedName, actualName);
-
-        } catch (Exception e) {
-          fail("fetchDistributedRegionObjectName () Unsuccessful " + e);
-        }
+        ObjectName[] objectNames = distributedSystemMXBean.fetchRegionObjectNames(objectName);
+        assertThat(objectNames).isNotNull();
+        assertThat(objectNames).hasSize(2);
 
-        for (String memberId : managedNodeMemberIds) {
-          ObjectName memberMBeanName = MBeanJMXAdapter
-              .getMemberMBeanName(memberId);
-          ObjectName expectedName;
-          try {
-            waitForProxy(memberMBeanName, MemberMXBean.class);
-            
-            ObjectName[] regionMBeanNames = bean
-                .fetchRegionObjectNames(memberMBeanName);
-            assertNotNull(regionMBeanNames);
-            assertTrue(regionMBeanNames.length == 2);
-            List<ObjectName> listOfNames = Arrays.asList(regionMBeanNames);
-
-            expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId,
-                PARTITIONED_REGION_PATH);
-            listOfNames.contains(expectedName);
-            expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId,
-                REGION_PATH);
-            listOfNames.contains(expectedName);
-          } catch (Exception e) {
-            fail("fetchRegionObjectNames () Unsuccessful " + e);
-          }
-        }
+        List<ObjectName> listOfNames = Arrays.asList(objectNames);
 
-        for (String memberId : managedNodeMemberIds) {
-          ObjectName expectedName;
-          ObjectName actualName;
-          ObjectName memberMBeanName = MBeanJMXAdapter
-          .getMemberMBeanName(memberId);
-          try {
-            waitForProxy(memberMBeanName, MemberMXBean.class);
-            expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId,
-                PARTITIONED_REGION_PATH);
-            waitForProxy(expectedName, RegionMXBean.class);
-            actualName = bean.fetchRegionObjectName(memberId,
-                PARTITIONED_REGION_PATH);
-
-            assertEquals(expectedName, actualName);
-            expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId,
-                REGION_PATH);
-            waitForProxy(expectedName, RegionMXBean.class);
-            actualName = bean.fetchRegionObjectName(memberId, REGION_PATH);
-
-            assertEquals(expectedName, actualName);
-          } catch (Exception e) {
-            fail("fetchRegionObjectName () Unsuccessful ");
-          }
-        }
+        expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId, PARTITIONED_REGION_PATH);
+        assertThat(listOfNames).contains(expectedName);
 
+        expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId, REGION_PATH);
+        assertThat(listOfNames).contains(expectedName);
       }
-    };
-    vm.invoke(checkNavigationAPIS);
-  }
-  
-  
-  protected void putBulkData(final VM vm, final int numKeys) {
-    SerializableRunnable putBulkData = new SerializableRunnable("putBulkData") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Region region = cache.getRegion(REGION_PATH);
-        for (int i = 0; i < numKeys; i++) {
-          region.put(i, i * i);
-        }
 
+      for (String memberId : memberIds) {
+        ObjectName objectName = MBeanJMXAdapter.getMemberMBeanName(memberId);
+        awaitMemberMXBeanProxy(objectName);
+
+        expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId, PARTITIONED_REGION_PATH);
+        awaitRegionMXBeanProxy(expectedName);
+
+        actualName = distributedSystemMXBean.fetchRegionObjectName(memberId, PARTITIONED_REGION_PATH);
+        assertThat(actualName).isEqualTo(expectedName);
+
+        expectedName = MBeanJMXAdapter.getRegionMBeanName(memberId, REGION_PATH);
+        awaitRegionMXBeanProxy(expectedName);
+
+        actualName = distributedSystemMXBean.fetchRegionObjectName(memberId, REGION_PATH);
+        assertThat(actualName).isEqualTo(expectedName);
       }
-    };
-    vm.invoke(putBulkData);
+    });
   }
- 
-  
 
   /**
-   * creates a Fixed Partition List to be used for Fixed Partition Region
-   * 
-   * @param primaryIndex
-   *          index for each fixed partition
+   * Invoked in controller VM
    */
-  private static void createFixedPartitionList(int primaryIndex) {
-    fpaList.clear();
+  private List<FixedPartitionAttributes> createFixedPartitionList(final int primaryIndex) {
+    List<FixedPartitionAttributes> fixedPartitionAttributesList = new ArrayList<>();
     if (primaryIndex == 1) {
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q1", true, 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q2", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q3", 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q1", true, 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q2", 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q3", 3));
     }
     if (primaryIndex == 2) {
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q1", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q2", true, 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q3", 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q1", 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q2", true, 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q3", 3));
     }
     if (primaryIndex == 3) {
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q1", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q2", 3));
-      fpaList.add(FixedPartitionAttributes.createFixedPartition("Q3", true, 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q1", 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q2", 3));
+      fixedPartitionAttributesList.add(FixedPartitionAttributes.createFixedPartition("Q3", true, 3));
     }
-
+    return fixedPartitionAttributesList;
   }
-  
-
 
   /**
-   * Creates a Fixed Partitioned Region
-   * @param fpaList partition list
+   * Invoked in member VMs
    */
-  protected static void createFixedPartitionRegion(
-      List<FixedPartitionAttributes> fpaList) {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    SystemManagementService service = (SystemManagementService)getManagementService();
+  private void createFixedPartitionRegion(final List<FixedPartitionAttributes> fixedPartitionAttributesList) {
+    SystemManagementService service = getSystemManagementService_tmp();
 
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
 
-    paf.setRedundantCopies(2).setTotalNumBuckets(12);
-    for (FixedPartitionAttributes fpa : fpaList) {
-      paf.addFixedPartitionAttributes(fpa);
+    partitionAttributesFactory.setRedundantCopies(2).setTotalNumBuckets(12);
+    for (FixedPartitionAttributes fixedPartitionAttributes : fixedPartitionAttributesList) {
+      partitionAttributesFactory.addFixedPartitionAttributes(fixedPartitionAttributes);
     }
-    paf.setPartitionResolver(new SingleHopQuarterPartitionResolver());
-
-    AttributesFactory attr = new AttributesFactory();
-    attr.setPartitionAttributes(paf.create());
-    fixedPrRegion = cache.createRegion(FIXED_PR_NAME, attr.create());
-    assertNotNull(fixedPrRegion);
-    LogWriterUtils.getLogWriter().info(
-        "Partitioned Region " + FIXED_PR_NAME + " created Successfully :"
-            + fixedPrRegion.toString());
+    partitionAttributesFactory.setPartitionResolver(new SingleHopQuarterPartitionResolver());
 
-    RegionMXBean bean = service.getLocalRegionMBean(FIXED_PR_PATH);
-    RegionAttributes regAttrs = fixedPrRegion.getAttributes();
+    AttributesFactory attributesFactory = new AttributesFactory();
+    attributesFactory.setPartitionAttributes(partitionAttributesFactory.create());
 
-    LogWriterUtils.getLogWriter().info(
-        "FixedPartitionAttribute From GemFire :"
-            + regAttrs.getPartitionAttributes().getFixedPartitionAttributes());
+    fixedPartitionedRegion = getCache_tmp().createRegion(FIXED_PR_NAME, attributesFactory.create());
+    assertThat(fixedPartitionedRegion).isNotNull();
 
-    RegionAttributesData data = bean.listRegionAttributes();
+    RegionMXBean regionMXBean = service.getLocalRegionMBean(FIXED_PR_PATH);
+    RegionAttributes regionAttributes = fixedPartitionedRegion.getAttributes();
 
-    PartitionAttributesData parData = bean.listPartitionAttributes();
+    PartitionAttributesData partitionAttributesData = regionMXBean.listPartitionAttributes();
+    verifyPartitionData(regionAttributes, partitionAttributesData);
 
-    assertPartitionData(regAttrs, parData);
+    FixedPartitionAttributesData[] fixedPartitionAttributesData = regionMXBean.listFixedPartitionAttributes();
+    assertThat(fixedPartitionAttributesData).isNotNull();
+    assertThat(fixedPartitionAttributesData).hasSize(3);
 
-    FixedPartitionAttributesData[] fixedPrData = bean
-        .listFixedPartitionAttributes();
-
-    assertNotNull(fixedPrData);
-
-    assertEquals(3, fixedPrData.length);
-    for (int i = 0; i < fixedPrData.length; i++) {
-      LogWriterUtils.getLogWriter().info(
-          "<ExpectedString> Fixed PR Data is " + fixedPrData[i]
-              + "</ExpectedString> ");
+    for (int i = 0; i < fixedPartitionAttributesData.length; i++) {
+      //LogWriterUtils.getLogWriter().info("<ExpectedString> Fixed PR Data is " + fixedPartitionAttributesData[i] + "</ExpectedString> ");
     }
   }
 
-  /**
-   * Verifies the Fixed Partition Region for partition related attributes
-   * 
-   * @param vm
-   */
-  protected void validateRemoteFixedPartitionRegion(final VM vm) throws Exception {
-    SerializableRunnable verifyFixedRegion = new SerializableRunnable(
-        "Verify Partition region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-
-        for (DistributedMember member : otherMemberSet) {
-          RegionMXBean bean = null;
-          try {
-            bean = MBeanUtil.getRegionMbeanProxy(member, FIXED_PR_PATH);
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , RegionMBean Should not be null");
-          }
-          PartitionAttributesData data = bean.listPartitionAttributes();
-          assertNotNull(data);
-          FixedPartitionAttributesData[] fixedPrData = bean
-              .listFixedPartitionAttributes();
-          assertNotNull(fixedPrData);
-          assertEquals(3, fixedPrData.length);
-          for (int i = 0; i < fixedPrData.length; i++) {
-            LogWriterUtils.getLogWriter().info(
-                "<ExpectedString> Remote PR Data is " + fixedPrData[i]
-                    + "</ExpectedString> ");
-          }
-        }
-
+//  /**
+//   * Invoked in manager VM
+//   */
+//  private void verifyRemoteFixedPartitionRegion(final VM vm) throws Exception {
+//    vm.invoke("Verify Partition region", () -> {
+//      Set<DistributedMember> otherMemberSet = getDistributionManager_tmp().getOtherNormalDistributionManagerIds();
+//
+//      for (DistributedMember member : otherMemberSet) {
+//        RegionMXBean regionMXBean = awaitRegionMXBeanProxy(member, FIXED_PR_PATH);
+//
+//        PartitionAttributesData partitionAttributesData = regionMXBean.listPartitionAttributes();
+//        assertNotNull(partitionAttributesData);
+//
+//        FixedPartitionAttributesData[] fixedPartitionAttributesData = regionMXBean.listFixedPartitionAttributes();
+//        assertNotNull(fixedPartitionAttributesData);
+//        assertEquals(3, fixedPartitionAttributesData.length);
+//
+//        for (int i = 0; i < fixedPartitionAttributesData.length; i++) {
+//          //LogWriterUtils.getLogWriter().info("<ExpectedString> Remote PR Data is " + fixedPartitionAttributesData[i] + "</ExpectedString> ");
+//        }
+//      }
+//    });
+//  }
+
+  private void addMemberNotificationListener(final VM managerVM, final int expectedMembers) {
+    managerVM.invoke("addMemberNotificationListener", () -> {
+      Set<DistributedMember> otherMemberSet = getOtherNormalMembers_tmp();
+      assertThat(otherMemberSet).hasSize(expectedMembers);
+
+      SystemManagementService service = getSystemManagementService_tmp();
+
+      List<Notification> notifications = new ArrayList<>();
+      MEMBER_NOTIFICATIONS_REF.set(notifications);
+
+      for (DistributedMember member : otherMemberSet) {
+        MemberNotificationListener listener = new MemberNotificationListener(notifications);
+        ObjectName objectName = service.getMemberMBeanName(member);
+        awaitMemberMXBeanProxy(objectName);
+
+        ManagementFactory.getPlatformMBeanServer().addNotificationListener(objectName, listener, null, null);
       }
-
-    };
-    vm.invoke(verifyFixedRegion);
-  }
-
-  /**
-   * Add a Notification listener to MemberMBean 
-   * @param vm
-   */
-  protected void addMemberListener(final VM vm) {
-    SerializableRunnable addMemberListener = new SerializableRunnable(
-        "addMemberListener") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        
-        SystemManagementService service = (SystemManagementService) getManagementService();
-
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-         
-        for (DistributedMember member : otherMemberSet) {
-          
-          MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
-
-          RegionNotif regionCreate = new RegionNotif();
-
-          ObjectName memberMBeanName;
-          try {
-            memberMBeanName = service.getMemberMBeanName(member);
-            Set<ObjectName> names = service.queryMBeanNames(member);
-            if(names != null){
-              for(ObjectName name : names){
-                LogWriterUtils.getLogWriter().info(
-                    "<ExpectedString> ObjectNames arr" + name
-                        + "</ExpectedString> ");
-              }
-            }
-            waitForProxy(memberMBeanName, MemberMXBean.class);
-            mbeanServer.addNotificationListener(memberMBeanName, regionCreate,
-                null, null);
-          } catch (NullPointerException e) {
-            Assert.fail("FAILED WITH EXCEPION", e);
-          } catch (InstanceNotFoundException e) {
-            Assert.fail("FAILED WITH EXCEPION", e);
-          } catch (Exception e) {
-            Assert.fail("FAILED WITH EXCEPION", e);
-          }
-
-        }
-
-      }
-    };
-    vm.invoke(addMemberListener);
-
+    });
   }
 
   /**
@@ -801,651 +573,557 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
    * all the notifications which are propagated through all individual
    * MemberMBeans Hence Region created/destroyed should be visible to this
    * listener
-   * 
-   * @param vm
    */
-  protected void addDistrListener(final VM vm) {
-    SerializableRunnable addDistrListener = new SerializableRunnable(
-        "addDistrListener") {
-      public void run() {
-        MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
+  private void addSystemNotificationListener(final VM managerVM) {
+    managerVM.invoke("addSystemNotificationListener", () -> {
+      awaitDistributedSystemMXBean();
 
-        DistrNotif regionCreate = new DistrNotif();
+      List<Notification> notifications = new ArrayList<>();
+      SYSTEM_NOTIFICATIONS_REF.set(notifications);
 
-        ObjectName systemMBeanName;
-        try {
-          systemMBeanName = MBeanJMXAdapter.getDistributedSystemName();
-          mbeanServer.addNotificationListener(systemMBeanName, regionCreate,
-              null, null);
-
-        } catch (NullPointerException e) {
-          Assert.fail("FAILED WITH EXCEPION", e);
-        } catch (InstanceNotFoundException e) {
-          Assert.fail("FAILED WITH EXCEPION", e);
+      DistributedSystemNotificationListener listener = new DistributedSystemNotificationListener(notifications);
+      ObjectName objectName = MBeanJMXAdapter.getDistributedSystemName();
+      ManagementFactory.getPlatformMBeanServer().addNotificationListener(objectName, listener, null, null);
+    });
+  }
 
+  private void verifyMemberNotifications(final VM managerVM, final String regionName, final int expectedMembers) {
+    managerVM.invoke("verifyMemberNotifications", () -> {
+      assertThat(MEMBER_NOTIFICATIONS_REF.get()).isNotNull();
+      assertThat(MEMBER_NOTIFICATIONS_REF.get()).hasSize(expectedMembers * 2);
+
+      int regionCreatedCount = 0;
+      int regionDestroyedCount = 0;
+      for (Notification notification : MEMBER_NOTIFICATIONS_REF.get()) {
+        if (JMXNotificationType.REGION_CREATED.equals(notification.getType())) {
+          regionCreatedCount++;
+          assertThat(notification.getMessage()).contains(regionName);
+        } else if (JMXNotificationType.REGION_CLOSED.equals(notification.getType())) {
+          regionDestroyedCount++;
+          assertThat(notification.getMessage()).contains(regionName);
+        } else {
+          fail("Unexpected notification type: " + notification.getType());
         }
-
       }
-    };
-    vm.invoke(addDistrListener);
 
+      assertThat(regionCreatedCount).isEqualTo(expectedMembers);
+      assertThat(regionDestroyedCount).isEqualTo(expectedMembers);
+    });
   }
-  
-  public void ensureProxyCleanup(final VM vm) {
-
-    SerializableRunnable ensureProxyCleanup = new SerializableRunnable(
-        "Ensure Proxy cleanup") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-
-        final SystemManagementService service = (SystemManagementService) getManagementService();
-
-        for (final DistributedMember member : otherMemberSet) {
-          RegionMXBean bean = null;
-          try {
-
-            Wait.waitForCriterion(new WaitCriterion() {
-
-              RegionMXBean bean = null;
-
-              public String description() {
-                return "Waiting for the proxy to get deleted at managing node";
-              }
-
-              public boolean done() {
-                ObjectName objectName = service.getRegionMBeanName(member, REGION_PATH);
-                bean = service.getMBeanProxy(objectName, RegionMXBean.class);
-                boolean done = (bean == null);
-                return done;
-              }
-
-            }, MAX_WAIT, 500, true);
-
-          } catch (Exception e) {
-            fail("could not remove proxies in required time");
-
-          }
-          assertNull(bean);
 
+  //  <[javax.management.Notification[source=10.118.33.232(17632)<v1>-32770][type=gemfire.distributedsystem.cache.region.created][message=Region Created With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=10.118.33.232(17633)<v2>-32771][type=gemfire.distributedsystem.cache.region.created][message=Region Created With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=10.118.33.232(17634)<v3>-32772][type=gemfire.distributedsystem.cache.region.created][message=Region Created With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=10.118.33.232(17632)<v1>-32770][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=10.118.33.232(17633)<v2>-32771][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=10.118.33.232(17634)<v3>-32772][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION]]>
+
+  private void verifySystemNotifications(final VM managerVM, final String regionName, final int expectedMembers) {
+    managerVM.invoke("verifySystemNotifications", () -> {
+      assertThat(SYSTEM_NOTIFICATIONS_REF.get()).isNotNull();
+      assertThat(SYSTEM_NOTIFICATIONS_REF.get()).hasSize(expectedMembers + 2); // 2 for the manager
+
+
+      int regionCreatedCount = 0;
+      int regionDestroyedCount = 0;
+      for (Notification notification : SYSTEM_NOTIFICATIONS_REF.get()) {
+        if (JMXNotificationType.REGION_CREATED.equals(notification.getType())) {
+          regionCreatedCount++;
+          assertThat(notification.getMessage()).contains(regionName);
+        } else if (JMXNotificationType.REGION_CLOSED.equals(notification.getType())) {
+          regionDestroyedCount++;
+          assertThat(notification.getMessage()).contains(regionName);
+        } else {
+          fail("Unexpected notification type: " + notification.getType());
         }
-
       }
-    };
-    vm.invoke(ensureProxyCleanup);
+
+      assertThat(regionCreatedCount).isEqualTo(1); // just the manager
+      assertThat(regionDestroyedCount).isEqualTo(expectedMembers + 1); // all 3 members + manager
+    });
   }
 
-  /**
-   * Verifies a Remote Distributed Region
-   * 
-   * @param vm
-   */
-  protected void verifyRemoteDistributedRegion(final VM vm, final int expectedMembers) throws Exception {
-    SerializableRunnable verifyRegion = new SerializableRunnable(
-        "Verify Distributed region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-
-        for (DistributedMember member : otherMemberSet) {
-          RegionMXBean bean = null;
-          try {
-            bean = MBeanUtil.getRegionMbeanProxy(member, REGION_PATH);
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , RegionMBean Should not be null" + e);
-
-          }
-          assertNotNull(bean);
-
-          RegionAttributesData data = bean.listRegionAttributes();
-          assertNotNull(data);
-          MembershipAttributesData membershipData = bean
-              .listMembershipAttributes();
-          EvictionAttributesData evictionData = bean.listEvictionAttributes();
-          assertNotNull(membershipData);
-          assertNotNull(evictionData);
-          LogWriterUtils.getLogWriter().info(
-              "<ExpectedString> Membership Data is "
-                  + membershipData.toString() + "</ExpectedString> ");
-          LogWriterUtils.getLogWriter().info(
-              "<ExpectedString> Eviction Data is " + membershipData.toString()
-                  + "</ExpectedString> ");
- 
-        }
-        DistributedRegionMXBean bean = null;
-        try {
-          bean = MBeanUtil.getDistributedRegionMbean(REGION_PATH, expectedMembers);
-        } catch (Exception e) {
-          InternalDistributedSystem.getLoggerI18n().fine(
-              "Undesired Result , DistributedRegionMXBean Should not be null"
-                  + e);
-        }
+  //  <[javax.management.Notification[source=192.168.1.72(18496)<v27>-32770][type=gemfire.distributedsystem.cache.region.created][message=Region Created With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=192.168.1.72(18497)<v28>-32771][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=192.168.1.72(18498)<v29>-32772][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=192.168.1.72(18499)<v30>-32773][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION],
+  //  javax.management.Notification[source=192.168.1.72(18496)<v27>-32770][type=gemfire.distributedsystem.cache.region.closed][message=Region Destroyed/Closed With Name /MANAGEMENT_TEST_REGION]]>
 
-        assertNotNull(bean);
-        assertEquals(REGION_PATH, bean.getFullPath());
-        
+  private void verifyProxyCleanup(final VM managerVM) {
+    managerVM.invoke("verifyProxyCleanup", () -> {
+      SystemManagementService service = getSystemManagementService_tmp();
 
+      Set<DistributedMember> otherMemberSet = getOtherNormalMembers_tmp();
+      for (final DistributedMember member : otherMemberSet) {
+        String alias = "Waiting for the proxy to get deleted at managing node";
+        await(alias).until(() -> assertThat(service.getMBeanProxy(service.getRegionMBeanName(member, REGION_PATH), RegionMXBean.class)).isNull());
       }
-    };
-    vm.invoke(verifyRegion);
+    });
   }
 
-  
-  protected void validateDistributedMBean(final VM vm, final int expectedMembers) {
-    SerializableRunnable verifyRegion = new SerializableRunnable(
-        "Verify Distributed region") {
-      public void run() {
-        DistributedRegionMXBean bean = null;
-        DistributedSystemMXBean sysMBean = null;
-        final ManagementService service = getManagementService();
-
-        if (expectedMembers == 0) {
-          try {
-            Wait.waitForCriterion(new WaitCriterion() {
-
-              RegionMXBean bean = null;
+  private void verifyRemoteDistributedRegion(final VM managerVM, final int expectedMembers) throws Exception {
+    managerVM.invoke("verifyRemoteDistributedRegion", () -> {
+      Set<DistributedMember> otherMemberSet = getOtherNormalMembers_tmp();
+      assertThat(otherMemberSet).hasSize(expectedMembers);
 
-              public String description() {
-                return "Waiting for the proxy to get deleted at managing node";
-              }
+      for (DistributedMember member : otherMemberSet) {
+        RegionMXBean regionMXBean = awaitRegionMXBeanProxy(member, REGION_PATH);
 
-              public boolean done() {
-                DistributedRegionMXBean bean = service
-                    .getDistributedRegionMXBean(REGION_PATH);
-                boolean done = (bean == null);
-                return done;
-              }
+        RegionAttributesData regionAttributesData = regionMXBean.listRegionAttributes();
+        assertThat(regionAttributesData).isNotNull();
 
-            }, MAX_WAIT, 500, true);
+        MembershipAttributesData membershipAttributesData = regionMXBean.listMembershipAttributes();
+        assertThat(membershipAttributesData).isNotNull();
 
-          } catch (Exception e) {
-            fail("could not remove Aggregate Bean in required time");
-
-          }
-          return;
-        }
+        EvictionAttributesData evictionAttributesData = regionMXBean.listEvictionAttributes();
+        assertThat(evictionAttributesData).isNotNull();
+      }
 
-        try {
-          bean = MBeanUtil.getDistributedRegionMbean(REGION_PATH,
-              expectedMembers);
-          sysMBean = service.getDistributedSystemMXBean();
-        } catch (Exception e) {
-          InternalDistributedSystem.getLoggerI18n().fine(
-              "Undesired Result , DistributedRegionMXBean Should not be null"
-                  + e);
-        }
+      DistributedRegionMXBean distributedRegionMXBean = awaitDistributedRegionMXBean(REGION_PATH, expectedMembers);
 
-        assertNotNull(bean);
-        assertEquals(REGION_PATH, bean.getFullPath());
-        assertEquals(expectedMembers, bean.getMemberCount());
-        assertEquals(expectedMembers, bean.getMembers().length);
-
-        // Check Stats related Data
-        // Add Mock testing
-        LogWriterUtils.getLogWriter()
-            .info(
-                "<ExpectedString> CacheListenerCallsAvgLatency is "
-                    + bean.getCacheListenerCallsAvgLatency()
-                    + "</ExpectedString> ");
-        LogWriterUtils.getLogWriter().info(
-            "<ExpectedString> CacheWriterCallsAvgLatency is "
-                + bean.getCacheWriterCallsAvgLatency() + "</ExpectedString> ");
-        LogWriterUtils.getLogWriter().info(
-            "<ExpectedString> CreatesRate is " + bean.getCreatesRate()
-                + "</ExpectedString> ");
+      assertThat(distributedRegionMXBean).isNotNull();
+      assertThat(distributedRegionMXBean.getFullPath()).isEqualTo(REGION_PATH);
+    });
+  }
 
+  private void verifyDistributedMBean(final VM managerVM, final int expectedMembers) {
+    managerVM.invoke("verifyDistributedMBean", () -> {
+      if (expectedMembers == 0) {
+        ManagementService service = getManagementService_tmp();
+        String alias = "Waiting for the proxy to get deleted at managing node";
+        await(alias).until(() -> assertThat(service.getDistributedRegionMXBean(REGION_PATH)).isNull());
+        return;
       }
-    };
-    // Test DistributedRegionMXBean
 
-    vm.invoke(verifyRegion);
-  }
-  /**
-   * Verifies a Remote Partition Region
-   * 
-   * @param vm
-   */
-  protected void validateRemotePartitionRegion(final VM vm) throws Exception {
-    SerializableRunnable verifyRegion = new SerializableRunnable(
-        "Verify Partition region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        Set<DistributedMember> otherMemberSet = cache.getDistributionManager()
-            .getOtherNormalDistributionManagerIds();
-
-        for (DistributedMember member : otherMemberSet) {
-          RegionMXBean bean = null;
-          try {
-            bean = MBeanUtil.getRegionMbeanProxy(member,
-                PARTITIONED_REGION_PATH);
-          } catch (Exception e) {
-            InternalDistributedSystem.getLoggerI18n().fine(
-                "Undesired Result , RegionMBean Should not be null");
-          }
-          PartitionAttributesData data = bean.listPartitionAttributes();
-          assertNotNull(data);
-        }
-        
-        ManagementService service = getManagementService();
-        DistributedRegionMXBean bean = service.getDistributedRegionMXBean(PARTITIONED_REGION_PATH);
-        assertEquals(3,bean.getMembers().length);
+      DistributedRegionMXBean distributedRegionMXBean = awaitDistributedRegionMXBean(REGION_PATH, expectedMembers);
 
-      }
+      assertThat(distributedRegionMXBean.getFullPath()).isEqualTo(REGION_PATH);
+      assertThat(distributedRegionMXBean.getMemberCount()).isEqualTo(expectedMembers);
+      assertThat(distributedRegionMXBean.getMembers()).hasSize(expectedMembers);
 
-    };
-    vm.invoke(verifyRegion);
+      // Check Stats related Data
+      //LogWriterUtils.getLogWriter().info("<ExpectedString> CacheListenerCallsAvgLatency is " + distributedRegionMXBean.getCacheListenerCallsAvgLatency() + "</ExpectedString> ");
+      //LogWriterUtils.getLogWriter().info("<ExpectedString> CacheWriterCallsAvgLatency is " + distributedRegionMXBean.getCacheWriterCallsAvgLatency() + "</ExpectedString> ");
+      //LogWriterUtils.getLogWriter().info("<ExpectedString> CreatesRate is " + distributedRegionMXBean.getCreatesRate() + "</ExpectedString> ");
+    });
   }
 
+  private void verifyRemotePartitionRegion(final VM managerVM) throws Exception {
+    managerVM.invoke("verifyRemotePartitionRegion", () -> {
+      Set<DistributedMember> otherMemberSet = getOtherNormalMembers_tmp();
 
-  
-  
-
-  /**
-   * Creates a Distributed Region
-   * 
-   * @param vm
-   */
-  protected void validateReplicateRegionAfterCreate(final VM vm) {
-    SerializableRunnable checkDistributedRegion = new SerializableRunnable(
-        "Check Distributed region") {
-      public void run() {
+      for (DistributedMember member : otherMemberSet) {
+        RegionMXBean regionMXBean = awaitRegionMXBeanProxy(member, PARTITIONED_REGION_PATH);
+        PartitionAttributesData partitionAttributesData = regionMXBean.listPartitionAttributes();
+        assertThat(partitionAttributesData).isNotNull();
+      }
 
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService)getManagementService();
+      ManagementService service = getManagementService_tmp();
+      DistributedRegionMXBean distributedRegionMXBean = service.getDistributedRegionMXBean(PARTITIONED_REGION_PATH);
+      assertThat(distributedRegionMXBean.getMembers()).hasSize(3);
+    });
+  }
 
-        MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
-        RegionNotif test = new RegionNotif();
+  private void verifyReplicateRegionAfterCreate(final VM memberVM) {
+    memberVM.invoke("verifyReplicateRegionAfterCreate", () -> {
+      Cache cache = getCache_tmp();
 
-        String memberId = MBeanJMXAdapter.getMemberNameOrId(cache
-            .getDistributedSystem().getDistributedMember());
+      String memberId = MBeanJMXAdapter.getMemberNameOrId(cache.getDistributedSystem().getDistributedMember());
+      ObjectName objectName = ObjectName.getInstance("GemFire:type=Member,member=" + memberId);
 
-        ObjectName memberMBeanName;
-        try {
-          memberMBeanName = ObjectName
-              .getInstance("GemFire:type=Member,member=" + memberId);
-          mbeanServer
-              .addNotificationListener(memberMBeanName, test, null, null);
-        } catch (MalformedObjectNameException e) {
+//      List<Notification> notifications = new ArrayList<>();
+//      MEMBER_NOTIFICATIONS_REF.set(notifications);
+//
+//      MemberNotificationListener listener = new MemberNotificationListener(notifications);
+//      ManagementFactory.getPlatformMBeanServer().addNotificationListener(objectName, listener, null, null);
 
-          Assert.fail("FAILED WITH EXCEPION", e);
-        } catch (NullPointerException e) {
-          Assert.fail("FAILED WITH EXCEPION", e);
+      SystemManagementService service = getSystemManagementService_tmp();
+      RegionMXBean regionMXBean = service.getLocalRegionMBean(REGION_PATH);
+      assertThat(regionMXBean).isNotNull();
 
-        } catch (InstanceNotFoundException e) {
-          Assert.fail("FAILED WITH EXCEPION", e);
+      Region region = cache.getRegion(REGION_PATH);
+      RegionAttributes regionAttributes = region.getAttributes();
 
-        }
+      RegionAttributesData regionAttributesData = regionMXBean.listRegionAttributes();
+      verifyRegionAttributes(regionAttributes, regionAttributesData);
 
-        assertNotNull(service.getLocalRegionMBean(REGION_PATH));
+      MembershipAttributesData membershipData = regionMXBean.listMembershipAttributes();
+      assertThat(membershipData).isNotNull();
 
-        RegionMXBean bean = service.getLocalRegionMBean(REGION_PATH);
-        Region region = cache.getRegion(REGION_PATH);
+      EvictionAttributesData evictionData = regionMXBean.listEvictionAttributes();
+      assertThat(evictionData).isNotNull();
+    });
+  }
 
-        RegionAttributes regAttrs = region.getAttributes();
+  private void verifyPartitionRegionAfterCreate(final VM memberVM) {
+    memberVM.invoke("verifyPartitionRegionAfterCreate", () -> {
+      Region region = getCache_tmp().getRegion(PARTITIONED_REGION_PATH);
 
-        RegionAttributesData data = bean.listRegionAttributes();
+      SystemManagementService service = getSystemManagementService_tmp();
+      RegionMXBean regionMXBean = service.getLocalRegionMBean(PARTITIONED_REGION_PATH);
 
-        assertRegionAttributes(regAttrs, data);
-        MembershipAttributesData membershipData = bean
-            .listMembershipAttributes();
-        EvictionAttributesData evictionData = bean.listEvictionAttributes();
-        assertNotNull(membershipData);
-        assertNotNull(evictionData);
-        LogWriterUtils.getLogWriter().info(
-            "<ExpectedString> Membership Data is " + membershipData.toString()
-                + "</ExpectedString> ");
-        LogWriterUtils.getLogWriter().info(
-            "<ExpectedString> Eviction Data is " + membershipData.toString()
-                + "</ExpectedString> ");
-      }
-    };
-    vm.invoke(checkDistributedRegion);
+      verifyPartitionData(region.getAttributes(), regionMXBean.listPartitionAttributes());
+    });
   }
 
-  /**
-   * Creates a partition Region
-   * 
-   * @param vm
-   */
-  protected void validatePartitionRegionAfterCreate(final VM vm) {
-    SerializableRunnable createParRegion = new SerializableRunnable(
-        "Create Partitioned region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService)getManagementService();
-        assertNotNull(service.getLocalRegionMBean(PARTITIONED_REGION_PATH));
-        RegionMXBean bean = service
-            .getLocalRegionMBean(PARTITIONED_REGION_PATH);
-        Region partitionedRegion = cache.getRegion(PARTITIONED_REGION_PATH);
-        RegionAttributes regAttrs = partitionedRegion.getAttributes();
-        RegionAttributesData data = bean.listRegionAttributes();
-        PartitionAttributesData parData = bean.listPartitionAttributes();
-        assertPartitionData(regAttrs, parData);
-        
-      }
-    };
-    vm.invoke(createParRegion);
-  }
+  private void verifyReplicatedRegionAfterClose(final VM memberVM) {
+    memberVM.invoke("verifyReplicatedRegionAfterClose", () -> {
+      SystemManagementService service = getSystemManagementService_tmp();
+      RegionMXBean regionMXBean = service.getLocalRegionMBean(REGION_PATH);
+      assertThat(regionMXBean).isNull();
 
-  /**
-   * closes a Distributed Region
-   * 
-   * @param vm
-   */
-  protected void validateReplicatedRegionAfterClose(final VM vm) {
-    SerializableRunnable closeRegion = new SerializableRunnable(
-        "Close Distributed region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        SystemManagementService service = (SystemManagementService)getManagementService();
-        RegionMXBean bean = null;
-        try {
-          bean = service.getLocalRegionMBean(REGION_PATH);
-        } catch (ManagementException mgtEx) {
-          LogWriterUtils.getLogWriter().info(
-              "<ExpectedString> Expected Exception  "
-                  + mgtEx.getLocalizedMessage() + "</ExpectedString> ");
-        }
-        assertNull(bean);
-        ObjectName regionObjectName = service.getRegionMBeanName(cache
-            .getDistributedSystem().getDistributedMember(), REGION_PATH);
-        assertNull(service.getLocalManager().getManagementResourceRepo()
-            .getEntryFromLocalMonitoringRegion(regionObjectName));
-      }
-    };
-    vm.invoke(closeRegion);
+      ObjectName objectName = service.getRegionMBeanName(getCache_tmp().getDistributedSystem().getDistributedMember(), REGION_PATH);
+      assertThat(service.getLocalManager().getManagementResourceRepo().getEntryFromLocalMonitoringRegion(objectName)).isNull();
+    });
   }
 
-  /**
-   * close a partition Region
-   * 
-   * @param vm
-   */
-  protected void validatePartitionRegionAfterClose(final VM vm) {
-    SerializableRunnable closeParRegion = new SerializableRunnable(
-        "Close Partition region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        ManagementService service = getManagementService();
-        LogWriterUtils.getLogWriter().info("Closing Par Region");
-        RegionMXBean bean = null;
-        try {
-          bean = service.getLocalRegionMBean(PARTITIONED_REGION_PATH);
-        } catch (ManagementException mgtEx) {
-          LogWriterUtils.getLogWriter().info(
-              "<ExpectedString> Expected Exception  "
-                  + mgtEx.getLocalizedMessage() + "</ExpectedString> ");
-        }
-        assertNull(bean);
-      }
-    };
-    vm.invoke(closeParRegion);
+  private void verifyPartitionRegionAfterClose(final VM memberVM) {
+    memberVM.invoke("verifyPartitionRegionAfterClose", () -> {
+      ManagementService service = getManagementService_tmp();
+      RegionMXBean regionMXBean = service.getLocalRegionMBean(PARTITIONED_REGION_PATH);
+      assertThat(regionMXBean).isNull();
+    });
   }
 
   /**
-   * Closes Fixed Partition region
-   * 
-   * @param vm
-   */
-  protected void closeFixedPartitionRegion(final VM vm) {
-    SerializableRunnable closeParRegion = new SerializableRunnable(
-        "Close Fixed Partition region") {
-      public void run() {
-        GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-        ManagementService service = getManagementService();
-        LogWriterUtils.getLogWriter().info("Closing Fixed Par Region");
-        Region region = cache.getRegion(FIXED_PR_PATH);
-        region.close();
-        RegionMXBean bean = null;
-        try {
-          bean = service.getLocalRegionMBean(FIXED_PR_PATH);
-        } catch (ManagementException mgtEx) {
-          LogWriterUtils.getLogWriter().info(
-              "<ExpectedString> Expected Exception  "
-                  + mgtEx.getLocalizedMessage() + "</ExpectedString> ");
-        }
-        assertNull(bean);
-      }
-    };
-    vm.invoke(closeParRegion);
-  }
-
-  /**
-   * Asserts and verifies all the partition related data
-   * 
-   * @param regAttrs
-   * @param partitionAttributesData
+   * Invoked in member VMs
    */
+  private void verifyPartitionData(final RegionAttributes expectedRegionAttributes, final PartitionAttributesData partitionAttributesData) {
+    PartitionAttributes expectedPartitionAttributes = expectedRegionAttributes.getPartitionAttributes();
 
-  protected static void assertPartitionData(RegionAttributes regAttrs,
-      PartitionAttributesData partitionAttributesData) {
-    PartitionAttributesData data = partitionAttributesData;
+    assertThat(partitionAttributesData.getRedundantCopies()).isEqualTo(expectedPartitionAttributes.getRedundantCopies());
 
-    PartitionAttributes partAttrs = regAttrs.getPartitionAttributes();
+    assertThat(partitionAttributesData.getTotalMaxMemory()).isEqualTo(expectedPartitionAttributes.getTotalMaxMemory());
 
-    int redundantCopies = partAttrs.getRedundantCopies();
-    assertEquals(redundantCopies, data.getRedundantCopies());
-    long totalMaxMemory = partAttrs.getTotalMaxMemory();
-    assertEquals(totalMaxMemory, data.getTotalMaxMemory());
     // Total number of buckets for whole region
-    int totalNumBuckets = partAttrs.getTotalNumBuckets();
-    assertEquals(totalNumBuckets, data.getTotalNumBuckets());
+    assertThat(partitionAttributesData.getTotalNumBuckets()).isEqualTo(expectedPartitionAttributes.getTotalNumBuckets());
 
-    int localMaxMemory = partAttrs.getLocalMaxMemory();
-    assertEquals(localMaxMemory, data.getLocalMaxMemory());
+    assertThat(partitionAttributesData.getLocalMaxMemory()).isEqualTo(expectedPartitionAttributes.getLocalMaxMemory());
 
-    String colocatedWith = partAttrs.getColocatedWith();
-    assertEquals(colocatedWith, data.getColocatedWith());
+    assertThat(partitionAttributesData.getColocatedWith()).isEqualTo(expectedPartitionAttributes.getColocatedWith());
 
     String partitionResolver = null;
-    if (partAttrs.getPartitionResolver() != null) {
-      partitionResolver = partAttrs.getPartitionResolver().getName();
+    if (expectedPartitionAttributes.getPartitionResolver() != null) { // TODO: these conditionals should be deterministic
+      partitionResolver = expectedPartitionAttributes.getPartitionResolver().getName();
     }
+    assertThat(partitionAttributesData.getPartitionResolver()).isEqualTo(partitionResolver);
 
-    assertEquals(partitionResolver, data.getPartitionResolver());
+    assertThat(partitionAttributesData.getRecoveryDelay()).isEqualTo(expectedPartitionAttributes.getRecoveryDelay());
 
-    long recoveryDelay = partAttrs.getRecoveryDelay();
-    assertEquals(recoveryDelay, data.getRecoveryDelay());
+    assertThat(partitionAttributesData.getStartupRecoveryDelay()).isEqualTo(expectedPartitionAttributes.getStartupRecoveryDelay());
 
-    long startupRecoveryDelay = partAttrs.getStartupRecoveryDelay();
-    assertEquals(startupRecoveryDelay, data.getStartupRecoveryDelay());
+    if (expectedPartitionAttributes.getPartitionListeners() != null) {
+      for (int i = 0; i < expectedPartitionAttributes.getPartitionListeners().length; i++) {
+        //assertEquals((expectedPartitionAttributes.getPartitionListeners())[i].getClass().getCanonicalName(), partitionAttributesData.getPartitionListeners()[i]);
+        assertThat(partitionAttributesData.getPartitionListeners()[i])
+          .isEqualTo(expectedPartitionAttributes.getPartitionListeners()[i].getClass().getCanonicalName());
 
-    if (partAttrs.getPartitionListeners() != null) {
-      for (int i = 0; i < partAttrs.getPartitionListeners().length; i++) {
-        assertEquals((partAttrs.getPartitionListeners())[i].getClass()
-            .getCanonicalName(), data.getPartitionListeners()[i]);
       }
-
     }
-
   }
 
   /**
-   * Checks all Region Attributes
-   * 
-   * @param regAttrs
-   * @param data
+   * Invoked in member VMs
    */
-  protected static void assertRegionAttributes(RegionAttributes regAttrs,
-      RegionAttributesData data) {
-
+  private void verifyRegionAttributes(final RegionAttributes regionAttributes, final RegionAttributesData regionAttributesData) {
     String compressorClassName = null;
-    if (regAttrs.getCompressor() != null) {
-      compressorClassName = regAttrs.getCompressor().getClass()
-          .getCanonicalName();
+    if (regionAttributes.getCompressor() != null) { // TODO: these conditionals should be deterministic
+      compressorClassName = regionAttributes.getCompressor().getClass().getCanonicalName();
     }
-    assertEquals(compressorClassName, data.getCompressorClassName());
+    assertThat(regionAttributesData.getCompressorClassName()).isEqualTo(compressorClassName);
+
     String cacheLoaderClassName = null;
-    if (regAttrs.getCacheLoader() != null) {
-      cacheLoaderClassName = regAttrs.getCacheLoader().getClass()
-          .getCanonicalName();
+    if (regionAttributes.getCacheLoader() != null) {
+      cacheLoaderClassName = regionAttributes.getCacheLoader().getClass().getCanonicalName();
     }
-    assertEquals(cacheLoaderClassName, data.getCacheLoaderClassName());
+    assertThat(regionAttributesData.getCacheLoaderClassName()).isEqualTo(cacheLoaderClassName);
+
     String cacheWriteClassName = null;
-    if (regAttrs.getCacheWriter() != null) {
-      cacheWriteClassName = regAttrs.getCacheWriter().getClass()
-          .getCanonicalName();
+    if (regionAttributes.getCacheWriter() != null) {
+      cacheWriteClassName = regionAttributes.getCacheWriter().getClass().getCanonicalName();
     }
-    assertEquals(cacheWriteClassName, data.getCacheWriterClassName());
+    assertThat(regionAttributesData.getCacheWriterClassName()).isEqualTo(cacheWriteClassName);
+
     String keyConstraintClassName = null;
-    if (regAttrs.getKeyConstraint() != null) {
-      keyConstraintClassName = regAttrs.getKeyConstraint().getName();
+    if (regionAttributes.getKeyConstraint() != null) {
+      keyConstraintClassName = regionAttributes.getKeyConstraint().getName();
     }
-    assertEquals(keyConstraintClassName, data.getKeyConstraintClassName());
+    assertThat(regionAttributesData.getKeyConstraintClassName()).isEqualTo(keyConstraintClassName);
+
     String valueContstaintClassName = null;
-    if (regAttrs.getValueConstraint() != null) {
-      valueContstaintClassName = regAttrs.getValueConstraint().getName();
+    if (regionAttributes.getValueConstraint() != null) {
+      valueContstaintClassName = regionAttributes.getValueConstraint().getName();
     }
-    assertEquals(valueContstaintClassName, data.getValueConstraintClassName());
-    CacheListener[] listeners = regAttrs.getCacheListeners();
-    
+    assertThat(regionAttributesData.getValueConstraintClassName()).isEqualTo(valueContstaintClassName);
 
+    CacheListener[] listeners = regionAttributes.getCacheListeners();
     if (listeners != null) {
-      String[] value = data.getCacheListeners();
+      String[] value = regionAttributesData.getCacheListeners();
       for (int i = 0; i < listeners.length; i++) {
-        assertEquals(value[i], listeners[i].getClass().getName());
+        assertThat(listeners[i].getClass().getName()).isEqualTo(value[i]);
       }
-      
     }
-    
- 
-    
-
-    int regionTimeToLive = regAttrs.getRegionTimeToLive().getTimeout();
-
-    assertEquals(regionTimeToLive, data.getRegionTimeToLive());
-
-    int regionIdleTimeout = regAttrs.getRegionIdleTimeout().getTimeout();
 
-    assertEquals(regionIdleTimeout, data.getRegionIdleTimeout());
+    assertThat(regionAttributesData.getRegionTimeToLive()).isEqualTo(regionAttributes.getRegionTimeToLive().getTimeout());
 
-    int entryTimeToLive = regAttrs.getEntryTimeToLive().getTimeout();
+    assertThat(regionAttributesData.getRegionIdleTimeout()).isEqualTo(regionAttributes.getRegionIdleTimeout().getTimeout());
 
-    assertEquals(entryTimeToLive, data.getEntryTimeToLive());
+    assertThat(regionAttributesData.getEntryTimeToLive()).isEqualTo(regionAttributes.getEntryTimeToLive().getTimeout());
 
-    int entryIdleTimeout = regAttrs.getEntryIdleTimeout().getTimeout();
+    assertThat(regionAttributesData.getEntryIdleTimeout()).isEqualTo(regionAttributes.getEntryIdleTimeout().getTimeout());
 
-    assertEquals(entryIdleTimeout, data.getEntryIdleTimeout());
     String customEntryTimeToLive = null;
-    Object o1 = regAttrs.getCustomEntryTimeToLive();
+    Object o1 = regionAttributes.getCustomEntryTimeToLive();
     if (o1 != null) {
       customEntryTimeToLive = o1.toString();
     }
-    assertEquals(customEntryTimeToLive, data.getCustomEntryTimeToLive());
+    assertThat(regionAttributesData.getCustomEntryTimeToLive()).isEqualTo(customEntryTimeToLive);
 
     String customEntryIdleTimeout = null;
-    Object o2 = regAttrs.getCustomEntryIdleTimeout();
+    Object o2 = regionAttributes.getCustomEntryIdleTimeout();
     if (o2 != null) {
       customEntryIdleTimeout = o2.toString();
     }
-    assertEquals(customEntryIdleTimeout, data.getCustomEntryIdleTimeout());
+    assertThat(regionAttributesData.getCustomEntryIdleTimeout()).isEqualTo(customEntryIdleTimeout);
 
-    boolean ignoreJTA = regAttrs.getIgnoreJTA();
-    assertEquals(ignoreJTA, data.isIgnoreJTA());
+    assertThat(regionAttributesData.isIgnoreJTA()).isEqualTo(regionAttributes.getIgnoreJTA());
 
-    String dataPolicy = regAttrs.getDataPolicy().toString();
-    assertEquals(dataPolicy, data.getDataPolicy());
+    assertThat(regionAttributesData.getDataPolicy()).isEqualTo(regionAttributes.getDataPolicy().toString());
 
-    String scope = regAttrs.getScope().toString();
-    assertEquals(scope, data.getScope());
+    assertThat(regionAttributesData.getScope()).isEqualTo(regionAttributes.getScope().toString());
 
-    int initialCapacity = regAttrs.getInitialCapacity();
-    assertEquals(initialCapacity, data.getInitialCapacity());
-    float loadFactor = regAttrs.getLoadFactor();
-    assertEquals(loadFactor, data.getLoadFactor(),0);
+    assertThat(regionAttributesData.getInitialCapacity()).isEqualTo(regionAttributes.getInitialCapacity());
 
-    boolean lockGrantor = regAttrs.isLockGrantor();
-    assertEquals(lockGrantor, data.isLockGrantor());
+    assertThat(regionAttributesData.getLoadFactor()).isEqualTo(regionAttributes.getLoadFactor());
 
-    boolean multicastEnabled = regAttrs.getMulticastEnabled();
-    assertEquals(multicastEnabled, data.isMulticastEnabled());
+    assertThat(regionAttributesData.isLockGrantor()).isEqualTo(regionAttributes.isLockGrantor());
 
-    int concurrencyLevel = regAttrs.getConcurrencyLevel();
-    assertEquals(concurrencyLevel, data.getConcurrencyLevel());
+    assertThat(regionAttributesData.isMulticastEnabled()).isEqualTo(regionAttributes.getMulticastEnabled());
 
-    boolean indexMaintenanceSynchronous = regAttrs
-        .getIndexMaintenanceSynchronous();
-    assertEquals(indexMaintenanceSynchronous, data
-        .isIndexMaintenanceSynchronous());
+    assertThat(regionAttributesData.getConcurrencyLevel()).isEqualTo(regionAttributes.getConcurrencyLevel());
 
-    boolean statisticsEnabled = regAttrs.getStatisticsEnabled();
+    assertThat(regionAttributesData.isIndexMaintenanceSynchronous()).isEqualTo(regionAttributes.getIndexMaintenanceSynchronous());
 
-    assertEquals(statisticsEnabled, data.isStatisticsEnabled());
+    assertThat(regionAttributesData.isStatisticsEnabled()).isEqualTo(regionAttributes.getStatisticsEnabled());
 
-    boolean subsciptionConflationEnabled = regAttrs
-        .getEnableSubscriptionConflation();
-    assertEquals(subsciptionConflationEnabled, data
-        .isSubscriptionConflationEnabled());
+    assertThat(regionAttributesData.isSubscriptionConflationEnabled()).isEqualTo(regionAttributes.getEnableSubscriptionConflation());
 
-    boolean asyncConflationEnabled = regAttrs.getEnableAsyncConflation();
-    assertEquals(asyncConflationEnabled, data.isAsyncConflationEnabled());
+    assertThat(regionAttributesData.isAsyncConflationEnabled()).isEqualTo(regionAttributes.getEnableAsyncConflation());
 
-    String poolName = regAttrs.getPoolName();
-    assertEquals(poolName, data.getPoolName());
+    assertThat(regionAttributesData.getPoolName()).isEqualTo(regionAttributes.getPoolName());
 
-    boolean isCloningEnabled = regAttrs.getCloningEnabled();
-    assertEquals(isCloningEnabled, data.isCloningEnabled());
+    assertThat(regionAttributesData.isCloningEnabled()).isEqualTo(regionAttributes.getCloningEnabled());
 
-    String diskStoreName = regAttrs.getDiskStoreName();
-    assertEquals(diskStoreName, data.getDiskStoreName());
+    assertThat(regionAttributesData.getDiskStoreName()).isEqualTo(regionAttributes.getDiskStoreName());
 
     String interestPolicy = null;
-    if (regAttrs.getSubscriptionAttributes() != null) {
-      interestPolicy = regAttrs.getSubscriptionAttributes().getInterestPolicy()
-          .toString();
+    if (regionAttributes.getSubscriptionAttributes() != null) {
+      interestPolicy = regionAttributes.getSubscriptionAttributes().getInterestPolicy().toString();
     }
-    assertEquals(interestPolicy, data.getInterestPolicy());
-    boolean diskSynchronus = regAttrs.isDiskSynchronous();
-    assertEquals(diskSynchronus, data.isDiskSynchronous());
+    assertThat(regionAttributesData.getInterestPolicy()).isEqualTo(interestPolicy);
+
+    assertThat(regionAttributesData.isDiskSynchronous()).isEqualTo(regionAttributes.isDiskSynchronous());
   }
 
-  /**
-   * Verifies Region related Statistics
-   */
-  public void verifyStatistics() {
+  private void verifyRemoteFixedPartitionRegion(final VM managerVM) throws Exception {
+    managerVM.invoke("Verify Partition region", () -> {
+      Set<DistributedMember> otherMemberSet = getOtherNormalMembers_tmp();
+
+      for (DistributedMember member : otherMemberSet) {
+        RegionMXBean bean = awaitRegionMXBeanProxy(member, FIXED_PR_PATH);
+
+        PartitionAttributesData data = bean.listPartitionAttributes();
+        assertThat(data).isNotNull();
+
+        FixedPartitionAttributesData[] fixedPrData = bean.listFixedPartitionAttributes();
+        assertThat(fixedPrData).isNotNull();
+        assertThat(fixedPrData).hasSize(3);
+
+        for (int i = 0; i < fixedPrData.length; i++) {
+          //LogWriterUtils.getLogWriter().info("<ExpectedString> Remote PR Data is " + fixedPrData[i] + "</ExpectedString> ");
+        }
+      }
+    });
+  }
+
+  private void createDistributedRegion_tmp(final VM vm, final String regionName) {
+    vm.invoke(() -> createDistributedRegion_tmp(regionName));
+  }
+
+  private void createDistributedRegion_tmp(final String regionName) {
+    getCache_tmp().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
+  }
+
+  private void createPartitionRegion_tmp(final VM vm, final String partitionRegionName) {
+    vm.invoke("Create Partitioned region", () -> {
+      SystemManagementService service = getSystemManagementService_tmp();
+      RegionFactory regionFactory = getCache_tmp().createRegionFactory(RegionShortcut.PARTITION_REDUNDANT);
+      regionFactory.create(partitionRegionName);
+    });
+  }
+
+  private void createLocalRegion_tmp(final VM vm, final String localRegionName) {
+    vm.invoke("Create Local region", () -> {
+      SystemManagementService service = getSystemManagementService_tmp();
+      RegionFactory regionFactory = getCache_tmp().createRegionFactory(RegionShortcut.LOCAL);
+      regionFactory.create(localRegionName);
+    });
+  }
+
+  private void createSubRegion_tmp(final VM vm, final String parentRegionPath, final String subregionName) {
+    vm.invoke("Create Sub region", () -> {
+      SystemManagementService service = getSystemManagementService_tmp();
+      Region region = getCache_tmp().getRegion(parentRegionPath);
+      region.createSubregion(subregionName, region.getAttributes());
+    });
+  }
+
+  private String getDistributedMemberId_tmp(final VM vm) {
+    return vm.invoke("getMemberId", () -> getCache_tmp().getDistributedSystem().getDistributedMember().getId());
+  }
+
+  private DistributedMember getDistributedMember_tmp(final VM anyVM) {
+    return anyVM.invoke("getDistributedMember_tmp", () -> getCache_tmp().getDistributedSystem().getDistributedMember());
+  }
+
+  private SystemManagementService getSystemManagementService_tmp() {
+    return (SystemManagementService) getManagementService_tmp();
+  }
 
+  private DM getDistributionManager_tmp() {
+    return ((GemFireCacheImpl)getCache_tmp()).getDistributionManager();
   }
 
+  private DistributedMember getDistributedMember_tmp() {
+    return getCache_tmp().getDistributedSystem().getDistributedMember();
+  }
+
+  private Set<DistributedMember> getOtherNormalMembers_tmp() {
+    Set<DistributedMember> allMembers = new HashSet<>(getDistributionManager_tmp().getNormalDistributionManagerIds());
+    allMembers.remove(getDistributedMember_tmp());
+    return allMembers;
+  }
+
+  private void awaitMemberCount(final int expectedCount) {
+    DistributedSystemMXBean distributedSystemMXBean = awaitDistributedSystemMXBean();
+    await().until(() -> assertThat(distributedSystemMXBean.getMemberCount()).isEqualTo(expectedCount));
+  }
+
+  private DistributedRegionMXBean awaitDistributedRegionMXBean(final String name) {
+    SystemManagementService service = getSystemManagementService_tmp();
+
+    await().until(() -> assertThat(service.getDistributedRegionMXBean(name)).isNotNull());
+
+    return service.getDistributedRegionMXBean(name);
+  }
+
+  private DistributedRegionMXBean awaitDistributedRegionMXBean(final String name, final int memberCount) {
+    SystemManagementService service = getSystemManagementService_tmp();
+
+    await().until(() -> assertThat(service.getDistributedRegionMXBean(name)).isNotNull());
+    await().until(() -> assertThat(service.getDistributedRegionMXBean(name).getMemberCount()).isEqualTo(memberCount));
+
+    return service.getDistributedRegionMXBean(name);
+  }
+
+  private RegionMXBean awaitRegionMXBeanProxy(final DistributedMember member, final String name) {
+    SystemManagementService service = getSystemManagementService_tmp();
+    ObjectName objectName = service.getRegionMBeanName(member, name);
+    String alias = "awaiting RegionMXBean proxy for " + member;
+
+    await(alias).until(() -> assertThat(service.getMBeanProxy(objectName, RegionMXBean.class)).isNotNull());
+
+    return service.getMBeanProxy(objectName, RegionMXBean.class);
+  }
+
+  private RegionMXBean awaitRegionMXBeanProxy(final ObjectName objectName) {
+    SystemManagementService service = getSystemManagementService_tmp();
+
+    await().until(() -> assertThat(service.getMBeanProxy(objectName, RegionMXBean.class)).isNotNull());
+
+    return service.getMBeanProxy(objectName, RegionMXBean.class);
+  }
+
+  private MemberMXBean awaitMemberMXBeanProxy(final DistributedMember member) {
+    SystemManagementService service = getSystemManagementService_tmp();
+    ObjectName objectName = service.getMemberMBeanName(member);
+    Stri

<TRUNCATED>


[20/50] [abbrv] incubator-geode git commit: GEODE-2004: Create/update/delete query through rest api should require DATA:READ instead of DATA:WRITE

Posted by kl...@apache.org.
GEODE-2004: Create/update/delete query through rest api should require DATA:READ instead of DATA:WRITE

* This closes #262


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/cf09ac94
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/cf09ac94
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/cf09ac94

Branch: refs/heads/feature/GEODE-1930
Commit: cf09ac94ddbd3c0a8dca9a94eac53d95871f1691
Parents: 5abe957
Author: Kevin Duling <kd...@pivotal.io>
Authored: Mon Oct 17 11:02:54 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Mon Oct 17 11:55:44 2016 -0700

----------------------------------------------------------------------
 .../geode/rest/internal/web/RestSecurityIntegrationTest.java   | 6 +++---
 .../rest/internal/web/controllers/QueryAccessController.java   | 6 +++---
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cf09ac94/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
index ef019a4..6e91894 100644
--- a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
@@ -138,7 +138,7 @@ public class RestSecurityIntegrationTest {
     assertEquals(401, getCode(response));
     response = doPost("/queries?id=0&q=", "stranger", "1234567", "");
     assertEquals(403, getCode(response));
-    response = doPost("/queries?id=0&q=", "dataWriter", "1234567", "");
+    response = doPost("/queries?id=0&q=", "dataReader", "1234567", "");
     // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
     assertEquals(500, getCode(response));
   }
@@ -149,7 +149,7 @@ public class RestSecurityIntegrationTest {
     assertEquals(401, getCode(response));
     response = doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
     assertEquals(403, getCode(response));
-    response = doPost("/queries/id", "dataWriter", "1234567", "{\"id\" : \"foo\"}");
+    response = doPost("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
     // because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
     assertEquals(500, getCode(response));
   }
@@ -160,7 +160,7 @@ public class RestSecurityIntegrationTest {
     assertEquals(401, getCode(response));
     response = doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
     assertEquals(403, getCode(response));
-    response = doPut("/queries/id", "dataWriter", "1234567", "{\"id\" : \"foo\"}");
+    response = doPut("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
     // We should get a 404 because we're trying to update a query that doesn't exist
     assertEquals(404, getCode(response));
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cf09ac94/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
index e43e5e6..d13c99c 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
@@ -137,7 +137,7 @@ public class QueryAccessController extends AbstractBaseController {
     @ApiResponse( code = 409, message = "QueryId already assigned to other query." ),
     @ApiResponse( code = 500, message = "GemFire throws an error or exception." )
   } )
-  @PreAuthorize("@securityService.authorize('DATA', 'WRITE')")
+  @PreAuthorize("@securityService.authorize('DATA', 'READ')")
   public ResponseEntity<?> create(@RequestParam("id") final String queryId,
                                   @RequestParam(value = "q", required = false) String oqlInUrl,
                                   @RequestBody(required = false) final String oqlInBody)
@@ -234,7 +234,7 @@ public class QueryAccessController extends AbstractBaseController {
   } )
   @ResponseBody
   @ResponseStatus(HttpStatus.OK)
-  @PreAuthorize("@securityService.authorize('DATA', 'WRITE')")
+  @PreAuthorize("@securityService.authorize('DATA', 'READ')")
   public ResponseEntity<String> runNamedQuery(@PathVariable("query") String queryId,
                                               @RequestBody String arguments)
   {
@@ -310,7 +310,7 @@ public class QueryAccessController extends AbstractBaseController {
     @ApiResponse( code = 404, message = "queryId does not exist." ),
     @ApiResponse( code = 500, message = "GemFire throws an error or exception." )   
   } )
-  @PreAuthorize("@securityService.authorize('DATA', 'WRITE')")
+  @PreAuthorize("@securityService.authorize('DATA', 'READ')")
   public ResponseEntity<?> update( @PathVariable("query") final String queryId,
                                    @RequestParam(value = "q", required = false) String oqlInUrl,
                                    @RequestBody(required = false) final String oqlInBody) {


[42/50] [abbrv] incubator-geode git commit: GEODE-2021: Non colocated gets in a transaction should get TransactionDataNotColocatedException

Posted by kl...@apache.org.
GEODE-2021: Non colocated gets in a transaction should get TransactionDataNotColocatedException

Throw TransactionDataNotColocatedException when get locally failed with BucketNotFoundException
Added a dunit test with two transactions with gets that will use TXStateStub or TXState based on data location.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/56917a26
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/56917a26
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/56917a26

Branch: refs/heads/feature/GEODE-1930
Commit: 56917a26a8916b83f0cec6e85285b5040ff66ee6
Parents: fadd92b
Author: eshu <es...@pivotal.io>
Authored: Fri Oct 21 11:43:36 2016 -0700
Committer: eshu <es...@pivotal.io>
Committed: Fri Oct 21 11:43:36 2016 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/PartitionedRegion.java |   6 +
 .../apache/geode/disttx/PRDistTXDUnitTest.java  |   5 +
 .../disttx/PRDistTXWithVersionsDUnitTest.java   |   5 +
 .../cache/execute/PRColocationDUnitTest.java    |   6 +-
 .../cache/execute/PRTransactionDUnitTest.java   | 131 ++++++++++++++++++-
 5 files changed, 151 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index f7ecdaf..df52764 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -4105,6 +4105,12 @@ public class PartitionedRegion extends LocalRegion implements
             retryTime.waitToRetryNode();
           }
         } else {
+          if (prce instanceof BucketNotFoundException) {
+            TransactionException ex = new TransactionDataNotColocatedException(LocalizedStrings.
+                PartitionedRegion_KEY_0_NOT_COLOCATED_WITH_TRANSACTION.toLocalizedString(key));
+            ex.initCause(prce);
+            throw ex;
+          }
           Throwable cause = prce.getCause();
           if (cause instanceof PrimaryBucketException) {
             throw (PrimaryBucketException)cause;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
index f36085b..68a83f1 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
@@ -37,6 +37,11 @@ public class PRDistTXDUnitTest extends PRTransactionDUnitTest {
     return props;
   }
   
+  @Ignore("[DISTTX] TODO test overridden and intentionally left blank as it does not apply to disttx.")
+  @Test
+  public void testTxWithNonColocatedGet() {
+  }
+  
   @Ignore("[DISTTX] TODO test overridden and intentionally left blank as they fail.")
   @Test
   public void testBasicPRTransactionRedundancy0() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
index 268c2ed..d692468 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
@@ -37,6 +37,11 @@ public class PRDistTXWithVersionsDUnitTest extends PRTransactionWithVersionsDUni
     return props;
   }
   
+  @Ignore("[DISTTX] TODO test overridden and intentionally left blank as it does not apply to disttx.")
+  @Test
+  public void testTxWithNonColocatedGet() {
+  }
+  
   @Ignore("[DISTTX] TODO test overridden and intentionally left blank as they fail.")
   @Override
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
index 1b8d2d1..f6ee565 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
@@ -2388,11 +2388,15 @@ public class PRColocationDUnitTest extends JUnit4CacheTestCase {
     assertTrue("Region should have failed to close. regionName = " + partitionedRegionName , exceptionThrown);    
   }
   public static void putCustomerPartitionedRegion(String partitionedRegionName) {
+    putCustomerPartitionedRegion(partitionedRegionName, 10);
+  }
+  
+  public static void putCustomerPartitionedRegion(String partitionedRegionName, int numOfRecord) {
     assertNotNull(basicGetCache());
     Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR
         + partitionedRegionName);
     assertNotNull(partitionedregion);
-    for (int i = 1; i <= 10; i++) {
+    for (int i = 1; i <= numOfRecord; i++) {
       CustId custid = new CustId(i);
       Customer customer = new Customer("name" + i, "Address" + i);
       try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
index 516c240..332ec01 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
@@ -25,10 +25,12 @@ import static org.junit.Assert.*;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.assertj.core.api.Assertions;
 
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Set;
 
 import util.TestException;
@@ -45,6 +47,7 @@ import org.apache.geode.cache.execute.FunctionException;
 import org.apache.geode.cache.execute.FunctionService;
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.cache.ForceReattemptException;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.TXManagerImpl;
 import org.apache.geode.internal.cache.execute.data.CustId;
@@ -53,10 +56,13 @@ import org.apache.geode.internal.cache.execute.data.Order;
 import org.apache.geode.internal.cache.execute.data.OrderId;
 import org.apache.geode.internal.cache.execute.data.Shipment;
 import org.apache.geode.internal.cache.execute.data.ShipmentId;
+import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
+import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.test.dunit.Assert;
 import org.apache.geode.test.dunit.Invoke;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableCallable;
+import org.apache.geode.test.dunit.SerializableRunnable;
 
 /**
  * Test for co-located PR transactions.
@@ -316,18 +322,141 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
   }
 
   protected void createPRWithCoLocation(String prName, String coLocatedWith) {
+    setAttributes(prName, coLocatedWith);
+    createPartitionedRegion(attributeObjects);
+  }
+  
+  protected void setAttributes(String prName, String coLocatedWith) {
     this.regionName = prName;
     this.colocatedWith = coLocatedWith;
     this.isPartitionResolver = new Boolean(true);
     this.attributeObjects = new Object[] { regionName, redundancy, localMaxmemory,
         totalNumBuckets, colocatedWith, isPartitionResolver, getEnableConcurrency() };
-    createPartitionedRegion(attributeObjects);
   }
 
   protected boolean getEnableConcurrency() {
     return false;
   }
   
+  /**
+   * This method executes a transaction with get on non colocated entries and 
+   * expects the transaction to fail with TransactionDataNotColocatedException.
+   * @param bucketRedundancy redundancy for the colocated PRs
+   */
+  protected void baiscPRTXWithNonColocatedGet(int bucketRedundancy) {
+    dataStore1.invoke(runGetCache);
+    dataStore2.invoke(runGetCache);
+    redundancy = new Integer(bucketRedundancy);
+    localMaxmemory = new Integer(50);
+    totalNumBuckets = new Integer(2);
+    
+    setAttributes(CustomerPartitionedRegionName, null);
+
+    dataStore1.invoke(PRColocationDUnitTest.class, "createPR", this.attributeObjects);
+    dataStore2.invoke(PRColocationDUnitTest.class, "createPR", this.attributeObjects);
+
+    // Put the customer 1-2 in CustomerPartitionedRegion
+    dataStore1.invoke(() -> PRColocationDUnitTest.putCustomerPartitionedRegion(CustomerPartitionedRegionName, 2));
+
+    dataStore1.invoke(verifyNonColocated);
+    dataStore2.invoke(verifyNonColocated);
+    
+    dataStore1.invoke(getTx);
+  }
+  
+
+  @SuppressWarnings("serial")
+  private SerializableRunnable verifyNonColocated = new SerializableRunnable("verifyNonColocated") {
+    @Override
+    public void run() throws PRLocallyDestroyedException, ForceReattemptException {
+      containsKeyLocally();
+    }
+  };
+  
+  @SuppressWarnings("serial")
+  private SerializableRunnable getTx = new SerializableRunnable("getTx") {
+    @Override
+    public void run() {
+      performGetTx();
+    }
+  };
+
+  
+  @SuppressWarnings({ "unchecked", "rawtypes" })
+  private void containsKeyLocally() throws PRLocallyDestroyedException, ForceReattemptException {
+    PartitionedRegion pr = (PartitionedRegion) basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
+    
+    CustId cust1 = new CustId(1);
+    CustId cust2 = new CustId(2);
+    int bucketId1 = pr.getKeyInfo(cust1).getBucketId();
+    int bucketId2 = pr.getKeyInfo(cust2).getBucketId();
+    
+    List<Integer> localPrimaryBucketList = pr.getLocalPrimaryBucketsListTestOnly();
+    Set localBucket1Keys;
+    Set localBucket2Keys;
+    assertTrue(localPrimaryBucketList.size() == 1);
+    for (int bucketId: localPrimaryBucketList) {
+      if (bucketId == bucketId1) {
+        //primary bucket has cust1
+        localBucket1Keys = pr.getDataStore().getKeysLocally(bucketId1, false);
+        for (Object key: localBucket1Keys) {
+          LogService.getLogger().info("local key set contains " + key);
+        }
+        assertTrue(localBucket1Keys.size() == 1);
+      } else {
+        localBucket2Keys = pr.getDataStore().getKeysLocally(bucketId2, false);
+        for (Object key: localBucket2Keys) {
+          LogService.getLogger().info("local key set contains " + key);
+        }
+        assertTrue(localBucket2Keys.size() == 1);
+      }
+    }
+  }
+  
+  @SuppressWarnings("unchecked")
+  private void performGetTx() {
+    PartitionedRegion pr = (PartitionedRegion) basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
+    CacheTransactionManager mgr = pr.getCache().getCacheTransactionManager();
+    CustId cust1 = new CustId(1);
+    CustId cust2 = new CustId(2);
+    int bucketId1 = pr.getKeyInfo(cust1).getBucketId();
+    List<Integer> localPrimaryBucketList = pr.getLocalPrimaryBucketsListTestOnly();
+    assertTrue(localPrimaryBucketList.size() == 1);
+    boolean isCust1Local = (Integer)localPrimaryBucketList.get(0) == bucketId1;
+
+    //touch first get on remote node -- using TXStateStub
+    Assertions.assertThatThrownBy(()-> getTx(!isCust1Local, mgr, pr, cust1, cust2))
+    .isInstanceOf(TransactionDataNotColocatedException.class);
+
+   //touch first get on local node-- using TXState
+    Assertions.assertThatThrownBy(()-> getTx(isCust1Local, mgr, pr, cust1, cust2))
+    .isInstanceOf(TransactionDataNotColocatedException.class);
+  }
+  
+  private void getTx(boolean doCust1First, CacheTransactionManager mgr, PartitionedRegion pr, CustId cust1, CustId cust2) {
+    CustId first = doCust1First ? cust1 : cust2;
+    CustId second = !doCust1First ? cust1 : cust2;
+    
+    mgr.begin();
+    boolean doRollback = true;
+    try {
+      pr.get(first);
+      pr.get(second);
+      doRollback = false;
+    } finally {
+      if (doRollback) {
+        mgr.rollback();
+      } else {
+        mgr.commit();
+      }
+    }
+  }
+  
+  @Test
+  public void testTxWithNonColocatedGet() {
+    baiscPRTXWithNonColocatedGet(0);
+  }
+  
   @Test
   public void testPRTXInCacheListenerRedundancy0() {
     basicPRTXInCacheListener(0);