You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2016/05/04 22:57:01 UTC

[01/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-1276 d754e70d5 -> 9bdd0d595


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java b/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
index c06f4db..8678095 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
@@ -35,7 +35,6 @@ import org.junit.Rule;
 import com.gemstone.gemfire.admin.internal.AdminDistributedSystemImpl;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
 import com.gemstone.gemfire.cache.query.QueryTestUtils;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
@@ -415,7 +414,6 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     assertNotNull("defaultDiskStoreName must not be null", defaultDiskStoreName);
     setTestMethodName(methodName);
     GemFireCacheImpl.setDefaultDiskStoreName(defaultDiskStoreName);
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
     setUpCreationStackGenerator();
   }
 
@@ -568,7 +566,6 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
 
     // clear system properties -- keep alphabetized
     System.clearProperty("gemfire.log-level");
-    System.clearProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP);
     System.clearProperty("jgroups.resolve_dns");
 
     if (InternalDistributedSystem.systemAttemptingReconnect != null) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
index 0dda2e6..fe21fbf 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
@@ -78,46 +78,6 @@ com/gemstone/gemfire/cache/client/internal/locator/ServerLocationRequest,2
 fromData,9,2a2bb80003b50002b1
 toData,9,2ab400022bb80004b1
 
-com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl,2
-fromData,17,2a2bb7001b2a2bb8001cc0001db50009b1
-toData,14,2a2bb700172ab400092bb80018b1
-
-com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl,2
-fromData,104,2a2bb900110100b80012b500042a2bb900110100b500072a04b7000a99000e2a2bb80013b50005a700402a05b7000a9900302bb800134d2cc7000b2a01b50005a7001cb8001499000e2a2cb80015b50005a7000b2a2cb80016b50005a7000b2a2bb80017b50005b1
-toData,107,2b2ab40004b40008b9000902002b2ab40007b9000902002a04b7000a9900142ab40005c0000bc0000b2bb8000ca7003d2a05b7000a99002d2ab40005c1000d9900182ab40005c0000d4d2cb9000e01002bb8000fa700162ab400052bb8000fa7000b2ab400052bb80010b1
-
-com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent,2
-fromData,14,2a2bb7000d2a2bb8000eb5000ab1
-toData,14,2a2bb7000b2ab4000a2bb8000cb1
-
-com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent,2
-fromData,37,2a2bb700092ab6000a9900112a2bb8000bc0000cb50002a7000d2a2bb9000d0100b50003b1
-toData,34,2a2bb700062ab40002c700102b2ab40003b900070300a7000b2ab400022bb80008b1
-
-com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent,2
-fromData,14,2a2bb7000c2a2bb8000db50008b1
-toData,14,2a2bb7000a2ab400082bb8000bb1
-
-com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent,2
-fromData,17,2a2bb700072a2bb80008b60009b50003b1
-toData,17,2a2bb700042ab40003b800052bb80006b1
-
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus,2
-fromData,21,2a2bb900060100b500022a2bb900070100b50003b1
-toData,21,2b2ab40002b9000402002b2ab40003b900050200b1
-
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus,2
-fromData,11,2a2bb900060100b50004b1
-toData,11,2b2ab40004b900050200b1
-
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs,2
-fromData,19,2a2bb80008b500042a2bb900090100b50005b1
-toData,19,2ab400042bb800062b2ab40005b900070300b1
-
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs,2
-fromData,29,2a2bb8000bb500042a2bb9000c0100b500052a2bb9000d0100b50007b1
-toData,29,2ab400042bb800082b2ab40005b9000902002b2ab40007b9000a0200b1
-
 com/gemstone/gemfire/cache/query/internal/CqEntry,2
 fromData,17,2a2bb80009b500022a2bb80009b50003b1
 toData,17,2ab400022bb8000b2ab400032bb8000bb1
@@ -365,8 +325,8 @@ fromData,22,2a2bb900130100b500022a2bb80014c00015b50003b1
 toData,19,2b2ab40002b9001102002ab400032bb80012b1
 
 com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage,2
-fromData,49,2a2bb700122a2bb900130100b500072ab800142bb90013010032b500042a2bb80015c00016b500022a2bb80015b50005b1
-toData,45,2a2bb7000e2b2ab40007b9000f02002b2ab40004b60010b9000f02002ab400022bb800112ab400052bb80011b1
+fromData,49,2a2bb700112a2bb900120100b500072ab800132bb90012010032b500052a2bb80014c00015b500022a2bb80014b50006b1
+toData,45,2a2bb7000d2b2ab40007b9000e02002b2ab40005b6000fb9000e02002ab400022bb800102ab400062bb80010b1
 
 com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinRequestMessage,2
 fromData,38,2a2bb80019c0001ab500042a2bb80019b500052a2bb8001bb500022a2bb9001c0100b6001db1
@@ -747,8 +707,8 @@ fromData,27,2a2bb80013b500042a2bb80013b500062a2bb900140100b50008b1
 toData,27,2ab400042bb800112ab400062bb800112b2ab40008b900120200b1
 
 com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes,2
-fromData,412,2a2bb80084b500082a2bb80084b5000a2a2bb80085b5000d2a2bb80084b5007e2a2bb80086c00087b5000f2a2bb80086c00087b500112a2bb80086c00088b500132a2bb80086c00088b500152a2bb80086c00088b500172a2bb80084b500192a2bb80086c00088b5001b2a2bb80084b5001d2a2bb80086c00089b5001f2a2bb80086c0008ab500212a2bb9008b0100b500232a2bb9008b0100b500252a2bb9008c0100b500272a2bb9008d0100b5002b2a2bb9008c0100b5002d2a2bb9008b0100b5002f2a2bb9008b0100b500312a2bb9008b0100b500332a2bb9008b0100b500352a2bb9008b0100b500372a2bb80086c0008eb5003b2a2bb80086c0008fc0008fb5003d2a2bb80086c00090c00090b5003f2a2bb9008b0100b500832a2bb80086c00091b500412a2bb80086c00092b500432a2bb80086c00093b500452a2bb80086c00002b500042a2bb9008b0100b500472a2bb80084b500392a2bb9008b0100b5004b2a2bb80085b5004e2a2bb9008b0100b500052a2bb9008b0100b500292a2bb80084b500562a2bb9008b0100b500582a2bb80084b50052b1
-toData,361,2ab400082bb8007c2ab4000a2bb8007c2ab4000d2bb8007d2ab4007e2bb8007c2ab4000f2bb8007f2ab400112bb8007f2ab400132bb8007f2ab400152bb8007f2ab400172bb8007f2ab400192bb8007c2ab4001b2bb8007f2ab4001d2bb8007c2ab4001f2bb8007f2ab400212bb8007f2b2ab40023b9008002002b2ab40025b9008002002b2ab40027b9008102002b2ab4002bb9008202002b2ab4002db9008102002b2ab4002fb9008002002b2ab40031b9008002002b2ab40033b9008002002b2ab40035b9008002002b2ab40037b9008002002ab4003b2bb8007f2ab4003d2bb8007f2ab4003f2bb8007f2b2ab40083b9008002002ab400412bb8007f2ab400432bb8007f2ab400452bb8007f2ab400042bb8007f2b2ab40047b9008002002ab400392bb8007c2b2ab4004bb9008002002ab4004e2bb8007d2b2ab40005b9008002002b2ab40029b9008002002ab400562bb8007c2b2ab40058b9008002002ab400522bb8007cb1
+fromData,404,2a2bb80080b500082a2bb80080b5000a2a2bb80081b5000d2a2bb80080b5007a2a2bb80082c00083b5000f2a2bb80082c00083b500112a2bb80082c00084b500132a2bb80082c00084b500152a2bb80082c00084b500172a2bb80080b500192a2bb80082c00084b5001b2a2bb80080b5001d2a2bb80082c00085b5001f2a2bb80082c00086b500212a2bb900870100b500232a2bb900870100b500252a2bb900880100b500272a2bb900890100b5002b2a2bb900880100b5002d2a2bb900870100b5002f2a2bb900870100b500312a2bb900870100b500332a2bb900870100b500352a2bb900870100b500372a2bb80082c0008ab5003b2a2bb80082c0008bc0008bb5003d2a2bb80082c0008cc0008cb5003f2a2bb900870100b5007f2a2bb80082c0008db500412a2bb80082c0008eb500432a2bb80082c0008fb500452a2bb80082c00002b500042a2bb900870100b500472a2bb80080b500392a2bb900870100b5004b2a2bb80081b5004e2a2bb900870100b500052a2bb900870100b500292a2bb80080b500522a2bb900870100b50054b1
+toData,353,2ab400082bb800782ab4000a2bb800782ab4000d2bb800792ab4007a2bb800782ab4000f2bb8007b2ab400112bb8007b2ab400132bb8007b2ab400152bb8007b2ab400172bb8007b2ab400192bb800782ab4001b2bb8007b2ab4001d2bb800782ab4001f2bb8007b2ab400212bb8007b2b2ab40023b9007c02002b2ab40025b9007c02002b2ab40027b9007d02002b2ab4002bb9007e02002b2ab4002db9007d02002b2ab4002fb9007c02002b2ab40031b9007c02002b2ab40033b9007c02002b2ab40035b9007c02002b2ab40037b9007c02002ab4003b2bb8007b2ab4003d2bb8007b2ab4003f2bb8007b2b2ab4007fb9007c02002ab400412bb8007b2ab400432bb8007b2ab400452bb8007b2ab400042bb8007b2b2ab40047b9007c02002ab400392bb800782b2ab4004bb9007c02002ab4004e2bb800792b2ab40005b9007c02002b2ab40029b9007c02002ab400522bb800782b2ab40054b9007c0200b1
 
 com/gemstone/gemfire/internal/admin/remote/RemoteRegionSnapshot,2
 fromData,59,2a2bb80029b500032a2bb8002ac00009b5000c2a2bb8002ac00005b500072a2bb9002b0100b500102a2bb9002b0100b500122a2bb8002ab5001cb1
@@ -883,8 +843,8 @@ fromData,1,b1
 toData,1,b1
 
 com/gemstone/gemfire/internal/cache/AbstractRegion,2
-toData,6,2a2bb80184b1
-fromData,8,bb018559b70186bf
+toData,6,2a2bb80176b1
+fromData,8,bb017759b70178bf
 
 com/gemstone/gemfire/internal/cache/AbstractUpdateOperation$AbstractUpdateMessage,2
 fromData,16,2a2bb700192a2bb9001a0100b5000db1
@@ -987,8 +947,8 @@ fromData,14,2a2bb7001a2a2bb8001bb50004b1
 toData,14,2a2bb700182ab400042bb80019b1
 
 com/gemstone/gemfire/internal/cache/DistributedCacheOperation$CacheOperationMessage,2
-fromData,338,2bb9009501003d2bb9009501003e2a1cb500962a1c2bb600972a2bb80098b500232a2bb900990100b8009ab500092a1c1100807e99000704a7000403b500042a1c10087e99000704a7000403b500581c1102007e99000b2a2bb8009bb500882a1c1104007e99000704a7000403b500072a1c10407e99000704a7000403b5001d2ab4001d9900382bb900990100360415049a000b2a03b5001ea7001b150404a0000b2a04b5001ea7000dbb009c59129db7009ebf2a2bb8009fb5001f1c1101007e99000704a700040336042a1c1108007e99000704a7000403b500a015049900162abb00a159b700a2b5000e2ab4000e2bb800a31c1110007e99001c1c1120007e99000704a700040336052a15052bb800a4b5000a1d1104007e9900372a04b5000f2ac100a599002b2ac000a51d1102007e99000704a7000403b600a62ac000a51d1101007e99000704a7000403b600a7b1
-toData,202,033d033e2a1cb600a83d2a1db600a93e2b1cb900aa02002b1db900aa02002ab4000d9e000d2b2ab4000db900ab02002ab400232bb800ac2b2ab40009b400adb900ae02002ab40088c6000b2ab400882bb800af2ab4001d9900542b2ab4001e99000704a7000403b900ae02002ab4001eb800b036042ab4001e9a001f2ab4001fc10020990015013a052ab4001fc00020c000203a06a7000c2ab4001f3a05013a061504190519062bb800b12ab4000ec6000b2ab4000e2bb800b22ab4000ac6000b2ab4000a2bb800b2b1
+fromData,318,2bb9009501003d2bb9009501003e2a1cb500962a1c2bb600972a2bb80098b500232a2bb900990100b8009ab500092a1c1100807e99000704a7000403b500042a1c10087e99000704a7000403b500581c1102007e99000b2a2bb8009bb500882a1c1104007e99000704a7000403b500072a1c10407e99000704a7000403b5001d2ab4001d9900382bb900990100360415049a000b2a03b5001ea7001b150404a0000b2a04b5001ea7000dbb009c59129db7009ebf2a2bb8009fb5001f1c1101007e99000704a700040336042a1c1108007e99000704a7000403b500a015049900162abb00a159b700a2b5000e2ab4000e2bb800a31c1110007e99001c1c1120007e99000704a700040336052a15052bb800a4b5000a1d1104007e9900232a04b5000f2ac100a59900172ac000a51d1101007e99000704a7000403b600a6b1
+toData,202,033d033e2a1cb600a73d2a1db600a83e2b1cb900a902002b1db900a902002ab4000d9e000d2b2ab4000db900aa02002ab400232bb800ab2b2ab40009b400acb900ad02002ab40088c6000b2ab400882bb800ae2ab4001d9900542b2ab4001e99000704a7000403b900ad02002ab4001eb800af36042ab4001e9a001f2ab4001fc10020990015013a052ab4001fc00020c000203a06a7000c2ab4001f3a05013a061504190519062bb800b02ab4000ec6000b2ab4000e2bb800b12ab4000ac6000b2ab4000a2bb800b1b1
 
 com/gemstone/gemfire/internal/cache/DistributedClearOperation$ClearRegionMessage,2
 fromData,53,2a2bb700212ab800222bb90023010032b500022a2bb80024c00025b500062a2bb80024c00026b500172a2bb80024c00027b50011b1
@@ -1006,8 +966,8 @@ com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$PutAllEntryData,1
 toData,236,2ab4000a4e2ab4000c3a042d2bb8003d1904c1003e9a00081904c700192b03b9003f02001904c0003ec0003e2bb80040a700341904c1004199001f1904c000413a052b04b9003f02001905b9004201002bb80040a700102b04b9003f020019042bb800432b2ab40012b40044b9003f02002ab4000636052ab40026c6000a150507809136052ab40017c6001d15051008809136052ab40017c1004599000b150510208091360515051080809136052b1505b9003f02002ab40026c6000b2ab400262bb8003d2ab40017c6000b2ab400172bb800462ab6002899000b2ab400142bb800462ab400082bb80047b1
 
 com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$PutAllMessage,2
-fromData,197,2a2bb700402a2bb80041c00042b500082a2bb8004388b500182a2ab40018bd0044b500092ab400189e00722bb800454dbb004659b700474e03360415042ab40018a200202ab400091504bb0044592b2ab4000815042c2db7004853840401a7ffdd2bb9004901003604150499002f2bb8004a3a0503360615062ab40018a2001d2ab4000915063219051506b6004bc0004cb50032840601a7ffe02ab4004d1140007e99000e2a2bb80041c0004eb5000e2a2ab4004d1180007e99000704a7000403b5001db1
-toData,181,2a2bb7004f2ab400082bb800502ab40018852bb800512ab400189e008bbb0052592ab40018b700534d033e2ab400090332b40054c10028360403360515052ab40018a200531d9a00122ab40009150532b40032c60005043e2ab40009150532b400323a062c1906b60055572ab4000915053201b500322ab400091505322b1504b600562ab400091505321906b50032840501a7ffaa2b1db9005702001d9900082c2bb800582ab4000ec6000b2ab4000e2bb80050b1
+fromData,197,2a2bb7003e2a2bb8003fc00040b500072a2bb8004188b500172a2ab40017bd0042b500082ab400179e00722bb800434dbb004459b700454e03360415042ab40017a200202ab400081504bb0042592b2ab4000715042c2db7004653840401a7ffdd2bb9004701003604150499002f2bb800483a0503360615062ab40017a2001d2ab4000815063219051506b60049c0004ab50030840601a7ffe02ab4004b1140007e99000e2a2bb8003fc0004cb5000d2a2ab4004b1180007e99000704a7000403b5001cb1
+toData,181,2a2bb7004d2ab400072bb8004e2ab40017852bb8004f2ab400179e008bbb0050592ab40017b700514d033e2ab400080332b40052c10026360403360515052ab40017a200531d9a00122ab40008150532b40030c60005043e2ab40008150532b400303a062c1906b60053572ab4000815053201b500302ab400081505322b1504b600542ab400081505321906b50030840501a7ffaa2b1db9005502001d9900082c2bb800562ab4000dc6000b2ab4000d2bb8004eb1
 
 com/gemstone/gemfire/internal/cache/DistributedRegionFunctionStreamingMessage,2
 fromData,171,2a2bb700622bb9006301003d1c047e9900142a2bb900640100b500092ab40009b800651c077e99000d2a2bb900640100b500061c057e99000e2a2bb80066c00067b500072bb800664e2dc100689900252a03b5000e2a2dc00068b80069b500082ab40008c7001b2a2dc00068b5004da700102a2dc0006ab500082a04b5000e2a2bb80066c0006bb5000a2a2bb8006cb5000c2a2bb8006db5000b2a1c10407e99000704a7000403b5000db1
@@ -1029,8 +989,8 @@ fromData,17,2a2bb80005b500022a2bb80005b50003b1
 toData,17,2ab400022bb800042ab400032bb80004b1
 
 com/gemstone/gemfire/internal/cache/EntryEventImpl,2
-fromData,214,2a2bb8001dc0001eb5001f2bb8001d4d2bb8001d4e2abb0020592c2d01b70021b500222a2bb900230100b80024b500252a2bb900260100b500082ab400222bb8001db600272a2bb8001dc00028b500092bb9002901009900112a2bb8001dc0002ab50007a700322bb9002901009900212a2bb8002bb5002c2a2ab4002cb500052a2ab4002cb8002db50004a7000b2a2bb8001db500042bb9002901009900192a2bb8002bb5002e2a2ab4002eb8002db50006a7000b2a2bb8001db500062a2bb8002fb500302a2bb80031b5000a2a2bb80032b50016b1
-toData,312,2ab400202bb801632ab6008f2bb801632ab40023b6018c2bb801632b2ab40026b4018db9018e02002b2ab4000911c03f7eb9018f02002ab6004e2bb801632ab4000a2bb801632ab40008c6000704a70004033d2b1cb9019002001c99000e2ab400082bb80163a700682ab600444e2dc100853604150499000e2dc00085b900bb010036042b1504b901900200150499003b2ab4002dc6000e2ab4002d2bb80191a7002e2ab40006c6000e2ab400062bb80191a7001c2dc000853a051905b900c901002bb80192a700082d2bb801632ab700464d2cc100853e1d99000d2cc00085b900bb01003e2b1db9019002001d9900292ab4002fc6000e2ab4002f2bb80191a7001c2cc000853a041904b900c901002bb80192a700082c2bb801632ab40031c001932bb801942ab6005b2bb801632ab400172bb80195b1
+fromData,214,2a2bb8001bc0001cb5001d2bb8001b4d2bb8001b4e2abb001e592c2d01b7001fb500202a2bb900210100b80022b500232a2bb900240100b500092ab400202bb8001bb600252a2bb8001bc00026b5000a2bb9002701009900112a2bb8001bc00028b50008a700322bb9002701009900212a2bb80029b5002a2a2ab4002ab500062a2ab4002ab8002bb50005a7000b2a2bb8001bb500052bb9002701009900192a2bb80029b5002c2a2ab4002cb8002bb50007a7000b2a2bb8001bb500072a2bb8002db5002e2a2bb8002fb5000b2a2bb80030b50014b1
+toData,312,2ab4001d2bb801602ab6008c2bb801602ab40020b6018d2bb801602b2ab40023b4018eb9018f02002b2ab4000911c03f7eb9019002002ab6004b2bb801602ab4000a2bb801602ab40008c6000704a70004033d2b1cb9019102001c99000e2ab400082bb80160a700682ab600414e2dc100823604150499000e2dc00082b900b8010036042b1504b901910200150499003b2ab4002ac6000e2ab4002a2bb80192a7002e2ab40006c6000e2ab400062bb80192a7001c2dc000823a051905b900c601002bb80193a700082d2bb801602ab700434d2cc100823e1d99000d2cc00082b900b801003e2b1db9019102001d9900292ab4002cc6000e2ab4002c2bb80192a7001c2cc000823a041904b900c601002bb80193a700082c2bb801602ab4002ec001942bb801952ab600582bb801602ab400142bb80196b1
 
 com/gemstone/gemfire/internal/cache/EntrySnapshot,2
 fromData,50,2a03b500052bb9004101003d1c9900112abb000759b70042b50004a7000e2abb000359b70043b500042ab400042bb60044b1
@@ -1119,8 +1079,8 @@ fromData,107,2a2bb9001b0100b500032bb9001b01003d2a2bb8001cb500122ab40003b80014990
 toData,125,2b2ab40003b9000f02002ab4000dc6000704a70004033d1c2ab4000dc1001199000705a700040380913d2b1cb9000f02002ab400122bb800132ab40003b800149a00232ab600159a000e2ab400022bb80016a700112ab40002c00017c000172bb800182b2ab40004b9001903002ab4000dc6000b2ab4000d2bb8001ab1
 
 com/gemstone/gemfire/internal/cache/InitialImageOperation$FilterInfoMessage,2
-fromData,230,2a2bb7008b2a2bb8008cb500202ab4000403322bb8008cb5003d2ab4000403322bb8008cb500402ab4000403322bb8008cb500422ab4000403322bb8008cb500442ab4000403322bb8008cb500462ab4000403322bb8008cb500482ab4000403322bb8008cb5004a2ab4000403322bb8008cb5004c2ab4000404322bb8008cb5003d2ab4000404322bb8008cb500402ab4000404322bb8008cb500422ab4000404322bb8008cb500442ab4000404322bb8008cb500462ab4000404322bb8008cb500482ab4000404322bb8008cb5004a2ab4000404322bb8008cb5004c2a2bb8008cb50033b1
-toData,284,2a2bb700892ab40020c000312bb8008a2ab400040332b4003dc000312bb8008a2ab400040332b40040c000312bb8008a2ab400040332b40042c000312bb8008a2ab400040332b40044c000312bb8008a2ab400040332b40046c000312bb8008a2ab400040332b40048c000312bb8008a2ab400040332b4004ac000312bb8008a2ab400040332b4004cc000312bb8008a2ab400040432b4003dc000312bb8008a2ab400040432b40040c000312bb8008a2ab400040432b40042c000312bb8008a2ab400040432b40044c000312bb8008a2ab400040432b40046c000312bb8008a2ab400040432b40048c000312bb8008a2ab400040432b4004ac000312bb8008a2ab400040432b4004cc000312bb8008a2ab40033c000312bb8008ab1
+fromData,230,2a2bb7008c2a2bb8008db500202ab4000403322bb8008db5003d2ab4000403322bb8008db500402ab4000403322bb8008db500422ab4000403322bb8008db500442ab4000403322bb8008db500462ab4000403322bb8008db500482ab4000403322bb8008db5004a2ab4000403322bb8008db5004c2ab4000404322bb8008db5003d2ab4000404322bb8008db500402ab4000404322bb8008db500422ab4000404322bb8008db500442ab4000404322bb8008db500462ab4000404322bb8008db500482ab4000404322bb8008db5004a2ab4000404322bb8008db5004c2a2bb8008db50033b1
+toData,284,2a2bb7008a2ab40020c000312bb8008b2ab400040332b4003dc000312bb8008b2ab400040332b40040c000312bb8008b2ab400040332b40042c000312bb8008b2ab400040332b40044c000312bb8008b2ab400040332b40046c000312bb8008b2ab400040332b40048c000312bb8008b2ab400040332b4004ac000312bb8008b2ab400040332b4004cc000312bb8008b2ab400040432b4003dc000312bb8008b2ab400040432b40040c000312bb8008b2ab400040432b40042c000312bb8008b2ab400040432b40044c000312bb8008b2ab400040432b40046c000312bb8008b2ab400040432b40048c000312bb8008b2ab400040432b4004ac000312bb8008b2ab400040432b4004cc000312bb8008b2ab40033c000312bb8008bb1
 
 com/gemstone/gemfire/internal/cache/InitialImageOperation$ImageReplyMessage,2
 fromData,224,2a2bb7001d2bb8001e4d014e2cc600102cb6001f9e00092c03b600204e2dc1002199000e2a2dc00022b50010a700082a2cb500102a2bb900230100b500112a2bb900230100b500122a2bb900230100b500132a2bb900240100b500142a2bb900230100b500152a2bb80025b500042a2bb900240100b500032a2bb900240100b500162ab4001699000f2abb0026592bb70027b500022bb900280100360415049b00102abb0029591504b7002ab5000103360515051504a200292bb8002bc0002c3a062bb8002d37072ab4000119061607b8002eb9002f030057840501a7ffd6b1
@@ -1234,8 +1194,8 @@ fromData,16,2a2bb700152a2bb900160100b50003b1
 toData,16,2a2bb700172b2ab40003b900180200b1
 
 com/gemstone/gemfire/internal/cache/RemoteDestroyMessage,2
-fromData,131,2a2bb7008c2a2bb8008db7008e2a2bb8008db5000c2a2bb9008f0100b80090b5000e2ab400911102007e99000e2a2bb8008dc00092b500102ab400911104007e99000e2a2bb8008dc00035b500672a2bb8008dc00093b500122ab400059900122bb9008f0100572a2bb80094b700232a2bb8008db500082a2bb8008dc00095b50017b1
-toData,135,2a2bb700962ab6006d2bb800972ab4000c2bb800972b2ab4000eb40098b9009902002ab40010c6000b2ab400102bb800972ab40067c6000b2ab400672bb800972ab400122bb800972ab4000599002a2b2ab4000699000704a7000403b9009902002ab40006b8009a3d1c2ab7009b2ab600712bb8009c2ab400082bb800972ab400172bb80097b1
+fromData,131,2a2bb7008b2a2bb8008cb7008d2a2bb8008cb5000c2a2bb9008e0100b8008fb5000e2ab400901102007e99000e2a2bb8008cc00091b500102ab400901104007e99000e2a2bb8008cc00034b500662a2bb8008cc00092b500122ab400059900122bb9008e0100572a2bb80093b700222a2bb8008cb500082a2bb8008cc00094b50017b1
+toData,135,2a2bb700952ab6006c2bb800962ab4000c2bb800962b2ab4000eb40097b9009802002ab40010c6000b2ab400102bb800962ab40066c6000b2ab400662bb800962ab400122bb800962ab4000599002a2b2ab4000699000704a7000403b9009802002ab40006b800993d1c2ab7009a2ab600702bb8009b2ab400082bb800962ab400172bb80096b1
 
 com/gemstone/gemfire/internal/cache/RemoteDestroyMessage$DestroyReplyMessage,2
 fromData,52,2a2bb700232bb9002401003d1c047e99000704a70004033e1c057e99000704a700040336041d99000d2a15042bb80025b50009b1
@@ -1664,8 +1624,8 @@ fromData,36,2a2bb700252a2bb900260100b500022a2bb900260100b500032a2bb900260100b500
 toData,36,2a2bb700272b2ab40002b9002802002b2ab40003b9002802002b2ab40004b900280200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/GetMessage,2
-fromData,43,2a2bb700552a2bb80056b500052a2bb80056b500062a2bb80056c00057b500072a2bb900580100b50008b1
-toData,40,2a2bb700592ab400052bb8005a2ab400062bb8005a2ab400072bb8005a2b2ab40008b9005b0200b1
+fromData,43,2a2bb700542a2bb80055b500052a2bb80055b500062a2bb80055c00056b500072a2bb900570100b50008b1
+toData,40,2a2bb700582ab400052bb800592ab400062bb800592ab400072bb800592b2ab40008b9005a0200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/GetMessage$GetReplyMessage,2
 fromData,77,2a2bb7002a2bb9002b01003d1c10087e99000704a7000403593e9900091c10f77e913d2a1cb500072a2bb8002cb5002d1c049f000b2a2bb8002eb5002f1d99000e2a2bb80030c00031b5000ab1
@@ -1752,8 +1712,8 @@ fromData,49,2a2bb700392a2bb8003ab500062a2bb9003b0100b8003cb500082a2bb8003ac0003d
 toData,43,2a2bb7003f2ab700122bb800402b2ab40008b40041b9004202002ab4000a2bb800402ab4000c2bb80040b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PartitionMessage,2
-fromData,58,2a2bb700732a2bb900740100b500052a2ab400052bb600752a2bb900760100b5000e2bb80077b20078b600799b000d2a2bb9007a0100b5000ab1
-toData,104,2a2bb7007e033d2a1cb6007f3d2b1cb9008002002ab4001099000d2b2ab40010b9008102002ab40008029f000d2b2ab40008b9008102002ab40006c6000b2ab400062bb800822b2ab4000eb9008102002bb80083b20078b600799b000d2b2ab4000ab900840200b1
+fromData,58,2a2bb700742a2bb900750100b500052a2ab400052bb600762a2bb900770100b5000e2bb80078b20079b6007a9b000d2a2bb9007b0100b5000ab1
+toData,104,2a2bb7007f033d2a1cb600803d2b1cb9008102002ab4001099000d2b2ab40010b9008202002ab40008029f000d2b2ab40008b9008202002ab40006c6000b2ab400062bb800832b2ab4000eb9008202002bb80084b20079b6007a9b000d2b2ab4000ab900850200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionFunctionStreamingMessage,2
 fromData,17,2a2bb7003c2a2bb8003dc0003eb50003b1
@@ -1768,16 +1728,16 @@ fromData,16,2a2bb700092a2bb9000a0100b50007b1
 toData,16,2a2bb7000b2b2ab40007b9000c0200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage,2
-fromData,183,2a2bb700402a2bb8004188b80009b5000a2ab400421110007e99000e2a2bb80043c00044b5003e2a2bb80043b500102a2bb8004588b500052a2ab40005bd000bb5000c2ab400059e006f2bb800464dbb004759b700484e03360415042ab40005a2001d2ab4000c1504bb000b592b0115042c2db7004953840401a7ffe02bb9004a01003604150499002f2bb8004b3a0503360615062ab40005a2001d2ab4000c15063219051506b6004cc0004db5004e840601a7ffe0b1
-toData,210,2a2bb7004f2ab4000ac7000d1400502bb80052a7000f2ab4000ab60053852bb800522ab4003ec6000b2ab4003e2bb800542ab400102bb800542ab40005852bb800552ab400059e008bbb0056592ab40005b700574d033e2ab4000c0332b60023c10058360403360515052ab40005a200531d9a00122ab4000c150532b4004ec60005043e2ab4000c150532b4004e3a062c1906b60059572ab4000c15053201b5004e2ab4000c1505322b1504b6005a2ab4000c1505321906b5004e840501a7ffaa2b1db9005b02001d9900082c2bb8005cb1
+fromData,183,2a2bb7003f2a2bb8004088b80009b5000a2ab400411110007e99000e2a2bb80042c00043b5003d2a2bb80042b500102a2bb8004488b500052a2ab40005bd000bb5000c2ab400059e006f2bb800454dbb004659b700474e03360415042ab40005a2001d2ab4000c1504bb000b592b0115042c2db7004853840401a7ffe02bb9004901003604150499002f2bb8004a3a0503360615062ab40005a2001d2ab4000c15063219051506b6004bc0004cb5004d840601a7ffe0b1
+toData,210,2a2bb7004e2ab4000ac7000d14004f2bb80051a7000f2ab4000ab60052852bb800512ab4003dc6000b2ab4003d2bb800532ab400102bb800532ab40005852bb800542ab400059e008bbb0055592ab40005b700564d033e2ab4000c0332b60022c10057360403360515052ab40005a200531d9a00122ab4000c150532b4004dc60005043e2ab4000c150532b4004d3a062c1906b60058572ab4000c15053201b5004d2ab4000c1505322b1504b600592ab4000c1505321906b5004d840501a7ffaa2b1db9005a02001d9900082c2bb8005bb1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage$PutAllReplyMessage,2
 fromData,27,2a2bb7001b2a2bb9001c0100b500032a2bb8001dc0001eb50002b1
 toData,24,2a2bb7001f2b2ab40003b9002002002ab400022bb80021b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutMessage,2
-fromData,273,2a2bb7005e2bb9005f01003d2a2bb80060b600612a2bb80060b500182a2bb900620100b500192a2bb900630100b80064b5001a1cb200657e99000b2a2bb80066b5001b1cb200677e99000e2a2bb80060c00068b5001d1cb200697e9900082a04b5006a2abb006b59b7006cb5001e2ab4001e2bb8006d2ab4006e1120007e99000b2a2bb80060b500232ab4006f9900162abb007059b70071b500292ab400292bb8006d2a1cb200727e91b500072ab4000999000e2a2bb80073b5000da7002e2ab4000704a0000e2a2bb80060b70074a7000b2a2bb80073b700751cb200767e99000b2a2bb80073b5000d2ab4006e1140007e99000e2a2bb80060c00077b500262ab4006e1180007e9900082a04b50078b1
-toData,374,014d2ab40022b60079b9007a01003e2ab4000cb6007bc600161d9900122ab4000b99000b2a04b50009a700082a03b50009a7000d4ebb007d592db7007ebf2a2bb7007f2ab400073e2ab4001bc600091db20065803e2ab400079900282ab40011c7000a2ab60080c6001a2ab400819900132ab4000cb6007bc600091db20076803e2ab4001dc600091db20067803e2ab4000cb600829900091db20069803e2b1db9008302002ab600842bb800852ab600862bb800852b2ab40019b9008703002b2ab4001ab40088b9008302002ab4001bc6000b2ab4001b2bb800852ab4001dc6000b2ab4001d2bb800852ab4001e2bb800892ab40023c6000b2ab400232bb800852ab4006f99000b2ab400292bb800892ab4000999002f2ab4008ab8008b4da7000f3a04bb008d59128eb7008fbf2ab4000cb6007b2bb800902cb60091b60092a700262ab400072ab400112ab600802bb800931db200767e99000e2ab4000cb6007b2bb800902ab40026c6000b2ab400262bb80085b1
+fromData,260,2a2bb7005d2bb9005e01003d2a2bb8005fb600602a2bb8005fb500172a2bb900610100b500182a2bb900620100b80063b500191cb200647e99000b2a2bb80065b5001a1cb200667e99000e2a2bb8005fc00067b5001c2abb006859b70069b5001d2ab4001d2bb8006a2ab4006b1120007e99000b2a2bb8005fb500222ab4006c9900162abb006d59b7006eb500282ab400282bb8006a2a1cb2006f7e91b500072ab4000999000e2a2bb80070b5000da7002e2ab4000704a0000e2a2bb8005fb70071a7000b2a2bb80070b700721cb200737e99000b2a2bb80070b5000d2ab4006b1140007e99000e2a2bb8005fc00074b500252ab4006b1180007e9900082a04b50075b1
+toData,358,014d2ab40021b60076b9007701003e2ab4000cb60078c600161d9900122ab4000b99000b2a04b50009a700082a03b50009a7000d4ebb007a592db7007bbf2a2bb7007c2ab400073e2ab4001ac600091db20064803e2ab400079900282ab40011c7000a2ab6007dc6001a2ab4007e9900132ab4000cb60078c600091db20073803e2ab4001cc600091db20066803e2b1db9007f02002ab600802bb800812ab600822bb800812b2ab40018b9008303002b2ab40019b40084b9007f02002ab4001ac6000b2ab4001a2bb800812ab4001cc6000b2ab4001c2bb800812ab4001d2bb800852ab40022c6000b2ab400222bb800812ab4006c99000b2ab400282bb800852ab4000999002f2ab40086b800874da7000f3a04bb008959128ab7008bbf2ab4000cb600782bb8008c2cb6008db6008ea700262ab400072ab400112ab6007d2bb8008f1db200737e99000e2ab4000cb600782bb8008c2ab40025c6000b2ab400252bb80081b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutMessage$PutReplyMessage,2
 fromData,48,2a2bb700252a2bb900260100b500032a2bb900270100b80028b500022a2bb80029b500062a2bb80029c0002ab50007b1
@@ -2029,8 +1989,8 @@ fromData,67,2a2bb700192a2bb8001ab60018b500032abb000759b7000bb500062bb9001b01003d
 toData,91,2a2bb700122ab40003b800132bb800142ab40006c6003f2b2ab40006b900150100b9001602002ab40006b9001701004d2cb9000d010099001a2cb9000e0100c0000f4e2b2db60018b900160200a7ffe3a7000a2b03b900160200b1
 
 com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl,2
-fromData,183,2bb9006d01003d1c10119f00032a04b500282a2bb9006e0100b500252a2bb9006e0100b500261c1011a200232bc1006f99001c2bb80070b20071a60012bb0072592bc0006fb20073b700744c2a2bb80075c00076b500272a2bb80077b5000e2a2bb900780100b5002b2a2bb600792a2bb8007ab5002d2a2bb80075c0001db5001e2a2bb9007b0100b500112a2bb9007c0100b500152a2bb9006e0100b500072a2bb9007c0100b80004b500052a2bb9007c0100b50018b1
-toData,133,2ab600242b1011b9006202002b2ab40025b9006302002b2ab40026b9006302002ab400272bb800642ab4000e2bb800652b2ab4002bb9006602002a2bb600672ab6002c2bb800682ab4001e2bb800642b2ab40011b9006902002b2ab40015b9006a03002b2ab40007b9006302002b2ab40005b6006bb9006a03002b2ab6006cb9006a0300b1
+fromData,183,2bb9007301003d1c10119f00032a04b5002a2a2bb900740100b500272a2bb900740100b500281c1011a200232bc1007599001c2bb80076b20077a60012bb0078592bc00075b20079b7007a4c2a2bb8007bc0007cb500292a2bb8007db500102a2bb9007e0100b5002d2a2bb6007f2a2bb80080b5002f2a2bb8007bc0001fb500202a2bb900810100b500132a2bb900820100b500172a2bb900740100b500092a2bb900820100b80004b500052a2bb900820100b5001ab1
+toData,133,2ab600262b1011b9006802002b2ab40027b9006902002b2ab40028b9006902002ab400292bb8006a2ab400102bb8006b2b2ab4002db9006c02002a2bb6006d2ab6002e2bb8006e2ab400202bb8006a2b2ab40013b9006f02002b2ab40017b9007003002b2ab40009b9006902002b2ab40005b60071b9007003002b2ab60072b900700300b1
 
 com/gemstone/gemfire/internal/cache/wan/parallel/ParallelQueueBatchRemovalMessage,2
 fromData,17,2a2bb7003a2a2bb8003bc0003cb50004b1

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
index 222e63d..b427ed3 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
@@ -51,7 +51,7 @@ com/gemstone/gemfire/admin/jmx/internal/GemFireHealthConfigJmxImpl,true,14827196
 com/gemstone/gemfire/admin/jmx/internal/ManagedResourceType,true,3752874768667480449,ordinal:int
 com/gemstone/gemfire/admin/jmx/internal/RefreshNotificationType,true,4376763592395613794,ordinal:int
 com/gemstone/gemfire/admin/jmx/internal/StatisticAttributeInfo,true,28022387514935560,stat:com/gemstone/gemfire/admin/Statistic
-com/gemstone/gemfire/cache/AttributesFactory$RegionAttributesImpl,true,-3663000883567530374,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskSynchronous:boolean,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,entryTimeToLive:int,entryTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,evictionAttr
 ibutes:com/gemstone/gemfire/internal/cache/EvictionAttributesImpl,gatewaySenderIds:java/util/Set,hdfsStoreName:java/lang/String,hdfsWriteOnly:boolean,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isBucketRegion:boolean,isCloningEnabled:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,regionIdleTimeout:int,regionIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,regionTimeToLive:int,regionTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
+com/gemstone/gemfire/cache/AttributesFactory$RegionAttributesImpl,true,-3663000883567530374,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskSynchronous:boolean,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,entryTimeToLive:int,entryTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,evictionAttr
 ibutes:com/gemstone/gemfire/internal/cache/EvictionAttributesImpl,gatewaySenderIds:java/util/Set,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isBucketRegion:boolean,isCloningEnabled:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,regionIdleTimeout:int,regionIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,regionTimeToLive:int,regionTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
 com/gemstone/gemfire/cache/CacheClosedException,true,-6479561694497811262
 com/gemstone/gemfire/cache/CacheException,false
 com/gemstone/gemfire/cache/CacheExistsException,true,4090002289325418100
@@ -128,18 +128,6 @@ com/gemstone/gemfire/cache/execute/EmtpyRegionFunctionException,true,1
 com/gemstone/gemfire/cache/execute/FunctionAdapter,false
 com/gemstone/gemfire/cache/execute/FunctionException,true,4893171227542647452
 com/gemstone/gemfire/cache/execute/FunctionInvocationTargetException,true,1,id:com/gemstone/gemfire/distributed/DistributedMember
-com/gemstone/gemfire/cache/hdfs/HDFSIOException,false
-com/gemstone/gemfire/cache/hdfs/StoreExistsException,true,1
-com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder,false,autoMajorCompact:boolean,batchIntervalMillis:int,batchSize:int,blockCacheSize:float,clientConfigFile:java/lang/String,diskStoreName:java/lang/String,diskSynchronous:boolean,dispatcherThreads:int,fileRolloverInterval:int,homeDir:java/lang/String,isAutoCompact:boolean,isPersistenceEnabled:boolean,logPrefix:java/lang/String,majorCompactionConcurrency:int,majorCompactionIntervalMins:int,maxConcurrency:int,maxFileSize:int,maxInputFileCount:int,maxInputFileSizeMB:int,maximumQueueMemory:int,minInputFileCount:int,name:java/lang/String,namenodeURL:java/lang/String,oldFileCleanupIntervalMins:int
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager$CompactionIsDisabled,true,1
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController$1,true,1,this$1:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController,val$this$0:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController$2,true,1,this$1:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController,val$this$0:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog$HoplogVersion,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog$Meta,false
-com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile$CompressionType,false
 com/gemstone/gemfire/cache/partition/PartitionNotAvailableException,true,1
 com/gemstone/gemfire/cache/persistence/ConflictingPersistentDataException,true,-2629287782021455875
 com/gemstone/gemfire/cache/persistence/PartitionOfflineException,true,-6471045959318795870,offlineMembers:java/util/Set
@@ -305,13 +293,12 @@ com/gemstone/gemfire/internal/cache/Oplog$OkToSkipResult,false
 com/gemstone/gemfire/internal/cache/Oplog$OplogFileType,false
 com/gemstone/gemfire/internal/cache/PRContainsValueFunction,false
 com/gemstone/gemfire/internal/cache/PRHARedundancyProvider$ArrayListWithClearState,true,1,wasCleared:boolean
-com/gemstone/gemfire/internal/cache/PartitionedRegion$8,true,0,this$0:com/gemstone/gemfire/internal/cache/PartitionedRegion,val$bucketId:int
+com/gemstone/gemfire/internal/cache/PartitionedRegion$7,true,0,this$0:com/gemstone/gemfire/internal/cache/PartitionedRegion,val$bucketId:int
 com/gemstone/gemfire/internal/cache/PartitionedRegion$PRIdMap,true,3667357372967498179,cleared:boolean
 com/gemstone/gemfire/internal/cache/PartitionedRegion$SizeEntry,false,isPrimary:boolean,size:int
 com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore$CreateBucketResult,false,nowExists:boolean
 com/gemstone/gemfire/internal/cache/PartitionedRegionException,true,5113786059279106007
 com/gemstone/gemfire/internal/cache/PartitionedRegionQueryEvaluator$MemberResultsList,false,isLastChunkReceived:boolean
-com/gemstone/gemfire/internal/cache/PartitionedRegionQueryEvaluator$TaintableArrayList,false,isPoison:boolean
 com/gemstone/gemfire/internal/cache/PartitionedRegionStatus,true,-6755318987122602065,numberOfLocalEntries:int
 com/gemstone/gemfire/internal/cache/PrimaryBucketException,true,1
 com/gemstone/gemfire/internal/cache/PutAllPartialResultException,false,result:com/gemstone/gemfire/internal/cache/PutAllPartialResultException$PutAllPartialResult
@@ -335,10 +322,7 @@ com/gemstone/gemfire/internal/cache/execute/InternalFunctionException,false
 com/gemstone/gemfire/internal/cache/execute/InternalFunctionInvocationTargetException,false,failedIds:java/util/Set
 com/gemstone/gemfire/internal/cache/execute/MemberMappedArgument,true,-6465867775653599576,defaultArgument:java/lang/Object,memberToArgMap:java/util/Map
 com/gemstone/gemfire/internal/cache/execute/NoResult,true,-4901369422864228848
-com/gemstone/gemfire/internal/cache/execute/util/CommitFunction,true,7851518767859544501
 com/gemstone/gemfire/internal/cache/execute/util/FindRestEnabledServersFunction,true,7851518767859544678
-com/gemstone/gemfire/internal/cache/execute/util/NestedTransactionFunction,true,1400965724856341543
-com/gemstone/gemfire/internal/cache/execute/util/RollbackFunction,true,1377183180063184795
 com/gemstone/gemfire/internal/cache/ha/ThreadIdentifier$Bits,false,position:int,width:int
 com/gemstone/gemfire/internal/cache/ha/ThreadIdentifier$WanType,false
 com/gemstone/gemfire/internal/cache/lru/HeapLRUCapacityController,true,4970685814429530675,perEntryOverhead:int,sizer:com/gemstone/gemfire/cache/util/ObjectSizer
@@ -354,7 +338,6 @@ com/gemstone/gemfire/internal/cache/partitioned/RedundancyAlreadyMetException,fa
 com/gemstone/gemfire/internal/cache/partitioned/rebalance/PartitionedRegionLoadModel$RefusalReason,false
 com/gemstone/gemfire/internal/cache/persistence/OplogType,false,prefix:java/lang/String
 com/gemstone/gemfire/internal/cache/persistence/PersistentMemberState,false
-com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader$Metadata,false
 com/gemstone/gemfire/internal/cache/snapshot/ClientExporter$ClientArgs,true,1,options:com/gemstone/gemfire/cache/snapshot/SnapshotOptions,prSingleHop:boolean,region:java/lang/String
 com/gemstone/gemfire/internal/cache/snapshot/ClientExporter$ProxyExportFunction,true,1
 com/gemstone/gemfire/internal/cache/snapshot/RegionSnapshotServiceImpl$1,true,1
@@ -380,7 +363,7 @@ com/gemstone/gemfire/internal/cache/wan/GatewaySenderException,true,809014315356
 com/gemstone/gemfire/internal/cache/wan/parallel/BucketRegionQueueUnavailableException,false
 com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlVersion,false,namespace:java/lang/String,publicId:java/lang/String,schemaLocation:java/lang/String,systemId:java/lang/String,version:java/lang/String
 com/gemstone/gemfire/internal/cache/xmlcache/DiskStoreAttributesCreation,false
-com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation,true,2241078661206355376,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,cloningEnabled:boolean,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,entryTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,evictionAttributes:com/gemstone/gemfire/internal/cache/EvictionAttributesIm
 pl,gatewaySenderIds:java/util/Set,hdfsStoreName:java/lang/String,hdfsWriteOnly:boolean,id:java/lang/String,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isDiskSynchronous:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,refid:java/lang/String,regionIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,regionTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
+com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation,true,2241078661206355376,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,cloningEnabled:boolean,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,entryTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,evictionAttributes:com/gemstone/gemfire/internal/cache/EvictionAttributesIm
 pl,gatewaySenderIds:java/util/Set,id:java/lang/String,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isDiskSynchronous:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,refid:java/lang/String,regionIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,regionTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
 com/gemstone/gemfire/internal/concurrent/AtomicLong5,true,-1915700199064062938
 com/gemstone/gemfire/internal/concurrent/CompactConcurrentHashSet2,true,7249069246763182397
 com/gemstone/gemfire/internal/concurrent/CompactConcurrentHashSet2$Segment,true,2249069246763182397,loadFactor:float
@@ -673,7 +656,7 @@ com/gemstone/gemfire/management/internal/cli/domain/MemberConfigurationInfo,fals
 com/gemstone/gemfire/management/internal/cli/domain/MemberInformation,true,1,cacheServerList:java/util/List,cacheXmlFilePath:java/lang/String,clientCount:int,cpuUsage:double,groups:java/lang/String,heapUsage:java/lang/String,host:java/lang/String,hostedRegions:java/util/Set,id:java/lang/String,initHeapSize:java/lang/String,isServer:boolean,locatorBindAddress:java/lang/String,locatorPort:int,locators:java/lang/String,logFilePath:java/lang/String,maxHeapSize:java/lang/String,name:java/lang/String,offHeapMemorySize:java/lang/String,processId:java/lang/String,serverBindAddress:java/lang/String,statArchiveFilePath:java/lang/String,workingDirPath:java/lang/String
 com/gemstone/gemfire/management/internal/cli/domain/MemberResult,true,1,errorMessage:java/lang/String,exceptionMessage:java/lang/String,isSuccessful:boolean,memberNameOrId:java/lang/String,opPossible:boolean,successMessage:java/lang/String
 com/gemstone/gemfire/management/internal/cli/domain/PartitionAttributesInfo,true,1,colocatedWith:java/lang/String,fpaInfoList:java/util/List,localMaxMemory:int,nonDefaultAttributes:java/util/Map,partitionResolverName:java/lang/String,recoveryDelay:long,redundantCopies:int,startupRecoveryDelay:long,totalNumBuckets:int
-com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo,true,1,cacheListenerClassNames:java/util/List,cacheLoaderClassName:java/lang/String,cacheWriterClassName:java/lang/String,cloningEnabled:boolean,compressorClassName:java/lang/String,concurrencyChecksEnabled:boolean,concurrencyLevel:int,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskStoreName:java/lang/String,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutAction:java/lang/String,entryTimeToLive:int,entryTimeToLiveAction:java/lang/String,evictionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/EvictionAttributesInfo,hdfsStoreName:java/lang/String,hdfsWriteOnly:java/lang/Boolean,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isLockGrantor:boolean,loadFactor:float,multicastEnabled:boolean,nonDefaultAttributes:java/util/Map,offHeap:boolean,partitionAttributesInfo:com/gemstone/gemfire/management/internal/cli/do
 main/PartitionAttributesInfo,poolName:java/lang/String,regionIdleTimeout:int,regionIdleTimeoutAction:java/lang/String,regionTimeToLive:int,regionTimeToLiveAction:java/lang/String,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean
+com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo,true,1,cacheListenerClassNames:java/util/List,cacheLoaderClassName:java/lang/String,cacheWriterClassName:java/lang/String,cloningEnabled:boolean,compressorClassName:java/lang/String,concurrencyChecksEnabled:boolean,concurrencyLevel:int,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskStoreName:java/lang/String,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutAction:java/lang/String,entryTimeToLive:int,entryTimeToLiveAction:java/lang/String,evictionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/EvictionAttributesInfo,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isLockGrantor:boolean,loadFactor:float,multicastEnabled:boolean,nonDefaultAttributes:java/util/Map,offHeap:boolean,partitionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/PartitionAttributesInfo,poolName:java/lang/String,regionId
 leTimeout:int,regionIdleTimeoutAction:java/lang/String,regionTimeToLive:int,regionTimeToLiveAction:java/lang/String,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean
 com/gemstone/gemfire/management/internal/cli/domain/RegionDescription,true,1,cndEvictionAttributes:java/util/Map,cndPartitionAttributes:java/util/Map,cndRegionAttributes:java/util/Map,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,haslocalDataStorage:boolean,isAccessor:boolean,isLocal:boolean,isPartition:boolean,isPersistent:boolean,isReplicate:boolean,isReplicatedProxy:boolean,name:java/lang/String,regionDescPerMemberMap:java/util/Map,scope:com/gemstone/gemfire/cache/Scope
 com/gemstone/gemfire/management/internal/cli/domain/RegionDescriptionPerMember,true,1,hostingMember:java/lang/String,isAccessor:boolean,name:java/lang/String,regionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo,size:int
 com/gemstone/gemfire/management/internal/cli/domain/RegionInformation,true,1,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,isRoot:boolean,name:java/lang/String,parentRegion:java/lang/String,path:java/lang/String,scope:com/gemstone/gemfire/cache/Scope,subRegionInformationSet:java/util/Set
@@ -710,7 +693,6 @@ com/gemstone/gemfire/management/internal/cli/functions/DataCommandFunction$Selec
 com/gemstone/gemfire/management/internal/cli/functions/DataCommandFunction$SelectQuitStep,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DeployFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DescribeDiskStoreFunction,false
-com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DestroyDiskStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DestroyIndexFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/ExportConfigFunction,true,1
@@ -771,7 +753,6 @@ com/gemstone/gemfire/management/internal/cli/shell/jline/ANSIHandler$ANSIStyle,f
 com/gemstone/gemfire/management/internal/cli/util/DiskStoreNotFoundException,false
 com/gemstone/gemfire/management/internal/cli/util/EvictionAttributesInfo,true,1,evictionAction:java/lang/String,evictionAlgorithm:java/lang/String,evictionMaxValue:int
 com/gemstone/gemfire/management/internal/cli/util/FixedPartitionAttributesInfo,false,isPrimary:boolean,numBuckets:int,partitionName:java/lang/String
-com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException,false
 com/gemstone/gemfire/management/internal/cli/util/JConsoleNotFoundException,false
 com/gemstone/gemfire/management/internal/cli/util/MemberInformation,true,1,cacheXmlFilePath:java/lang/String,cpuUsage:java/lang/String,groups:java/lang/String,heapUsage:java/lang/String,host:java/lang/String,id:java/lang/String,initHeapSize:java/lang/String,locatorBindAddress:java/lang/String,locatorPort:int,locators:java/lang/String,logFilePath:java/lang/String,maxHeapSize:java/lang/String,name:java/lang/String,processId:java/lang/String,serverBindAddress:java/lang/String,statArchiveFilePath:java/lang/String,workingDirPath:java/lang/String
 com/gemstone/gemfire/management/internal/cli/util/MemberNotFoundException,false

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-lucene/build.gradle
----------------------------------------------------------------------
diff --git a/geode-lucene/build.gradle b/geode-lucene/build.gradle
index 699c0b5..b7c449b 100644
--- a/geode-lucene/build.gradle
+++ b/geode-lucene/build.gradle
@@ -32,10 +32,6 @@ dependencies {
     testCompile 'org.apache.lucene:lucene-test-framework:' + project.'lucene.version'
     testCompile 'org.apache.lucene:lucene-codecs:' + project.'lucene.version'
     testCompile files(project(':geode-core').sourceSets.test.output)
-
-    // the following test dependencies are needed for mocking cache instance
-    testRuntime 'org.apache.hadoop:hadoop-common:' + project.'hadoop.version'
-    testRuntime 'org.apache.hadoop:hadoop-hdfs:' + project.'hadoop.version'
 }
 
 //The lucene integration tests don't have any issues that requiring forking

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
index dd89cdb..215b063 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
@@ -1516,7 +1516,6 @@ public class Cluster extends Thread {
     private boolean diskSynchronous;
     private boolean enableOffHeapMemory;
     private String compressionCodec = "";
-    private boolean hdfsWriteOnly;
 
     private List<String> memberName = new ArrayList<String>();
     private List<RegionOnMember> regionOnMembers  = new ArrayList<RegionOnMember>();
@@ -1769,14 +1768,6 @@ public class Cluster extends Thread {
       this.compressionCodec = compressionCodec;
     }
 
-    public boolean isHdfsWriteOnly() {
-      return hdfsWriteOnly;
-    }
-
-    public void setHdfsWriteOnly(boolean hdfsWriteOnly) {
-      this.hdfsWriteOnly = hdfsWriteOnly;
-    }
-
     public Cluster.RegionOnMember[] getRegionOnMembers() {
       Cluster.RegionOnMember[] regionOnMembers = null;
       synchronized (this.regionOnMembers) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
index c2999f8..e442b8e 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
@@ -297,7 +297,6 @@ public class PulseConstants {
   public static final String COMPOSITE_DATA_KEY_DISKSTORENAME = "diskStoreName";
   public static final String COMPOSITE_DATA_KEY_DISKSYNCHRONOUS = "diskSynchronous";
   public static final String COMPOSITE_DATA_KEY_COMPRESSIONCODEC = "compressionCodec";
-  public static final String COMPOSITE_DATA_KEY_HDFSWRITEONLY = "hdfsWriteOnly";
   public static final String COMPOSITE_DATA_KEY_ENABLEOFFHEAPMEMORY = "enableOffHeapMemory";
   public static final String COMPOSITE_DATA_KEY_CONNECTIONSACTIVE = "connectionsActive";
   public static final String COMPOSITE_DATA_KEY_CONNECTED = "connected";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
index 5537c28..350846c 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
@@ -162,17 +162,6 @@ public class ClusterRegionService implements PulseService {
         regionJSON.put("isEnableOffHeapMemory", VALUE_OFF);
       }
 
-      Boolean isHDFSWriteOnly = reg.isHdfsWriteOnly();
-      if (regionType.startsWith("HDFS")) {
-        if (isHDFSWriteOnly) {
-          regionJSON.put("isHDFSWriteOnly", VALUE_ON);
-        } else {
-          regionJSON.put("isHDFSWriteOnly", VALUE_OFF);
-        }
-      } else {
-        regionJSON.put("isHDFSWriteOnly", VALUE_NA);
-      }
-
       String regCompCodec = reg.getCompressionCodec();
       if (StringUtils.isNotNullNotEmptyNotWhiteSpace(regCompCodec)) {
         regionJSON.put("compressionCodec", reg.getCompressionCodec());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
index bd38b8d..3da4e59 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
@@ -156,17 +156,6 @@ public class ClusterRegionsService implements PulseService {
         regionJSON.put("isEnableOffHeapMemory", this.VALUE_OFF);
       }
 
-      Boolean isHDFSWriteOnly = reg.isHdfsWriteOnly();
-      if (regionType.startsWith("HDFS")) {
-        if (isHDFSWriteOnly) {
-          regionJSON.put("isHDFSWriteOnly", this.VALUE_ON);
-        } else {
-          regionJSON.put("isHDFSWriteOnly", this.VALUE_OFF);
-        }
-      } else {
-        regionJSON.put("isHDFSWriteOnly", this.VALUE_NA);
-      }
-
       String regCompCodec = reg.getCompressionCodec();
       if (StringUtils.isNotNullNotEmptyNotWhiteSpace(regCompCodec)) {
         regionJSON.put("compressionCodec", reg.getCompressionCodec());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
index 35e15c6..39a67cf 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
@@ -188,12 +188,6 @@ public class ClusterSelectedRegionService implements PulseService {
 
       regionJSON.put("isEnableOffHeapMemory", reg.isEnableOffHeapMemory() ? PulseService.VALUE_ON : PulseService.VALUE_OFF);
 
-      if (regionType.startsWith("HDFS")) {
-        regionJSON.put("isHDFSWriteOnly", reg.isHdfsWriteOnly() ? PulseService.VALUE_ON : PulseService.VALUE_OFF);
-      } else {
-        regionJSON.put("isHDFSWriteOnly", PulseService.VALUE_NA);
-      }
-
       String regCompCodec = reg.getCompressionCodec();
       if (StringUtils.isNotNullNotEmptyNotWhiteSpace(regCompCodec)) {
         regionJSON.put("compressionCodec", reg.getCompressionCodec());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js b/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
index adfe90c..e19ddf4 100644
--- a/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
+++ b/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
@@ -1284,7 +1284,6 @@ function updateDataViewDetails(clusterRegions) {
         "persistence" : clusterRegions[i].persistence,
         "isEnableOffHeapMemory" : clusterRegions[i].isEnableOffHeapMemory,
         "compressionCodec" : clusterRegions[i].compressionCodec,
-        "isHDFSWriteOnly" : clusterRegions[i].isHDFSWriteOnly,
         "memberNames" : clusterRegions[i].memberNames,
         "memoryWritesTrend" : clusterRegions[i].memoryWritesTrend,
         "memoryReadsTrend" : clusterRegions[i].memoryReadsTrend,
@@ -1323,7 +1322,6 @@ function updateDataViewDetails(clusterRegions) {
       "persistence" : "",
       "isEnableOffHeapMemory" : "",
       "compressionCodec" : "",
-      "isHDFSWriteOnly" : "",
       "memberNames" : "",
       "memoryWritesTrend" : "",
       "memoryReadsTrend" : "",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js b/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
index 6d14fd2..78c9cda 100644
--- a/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
+++ b/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
@@ -1321,7 +1321,7 @@ function createRegionsGridDefault() {
                      'Persistence', 'Entry Count', 'Empty Nodes', 'Data Usage',
                      'Total Data Usage', 'Memory Usage', 'Total Memory',
                      'Member Names', 'Writes', 'Reads','Off Heap Enabled',
-                     'Compression Codec','HDFS Write Only' ],
+                     'Compression Codec' ],
         colModel : [ {
           name : 'name',
           index : 'name',
@@ -1421,10 +1421,6 @@ function createRegionsGridDefault() {
           name : 'compressionCodec',
           index : 'compressionCodec',
           hidden : true
-        }, {
-          name : 'isHDFSWriteOnly',
-          index : 'isHDFSWriteOnly',
-          hidden : true
         }],
         userData : {
           "sortOrder" : "asc",
@@ -1854,7 +1850,6 @@ function buildRegionsTreeMapData(clusterRegions) {
       "persistence" : clusterRegions[i].persistence,
       "isEnableOffHeapMemory" : clusterRegions[i].isEnableOffHeapMemory,
       "compressionCodec" : clusterRegions[i].compressionCodec,
-      "isHDFSWriteOnly" : clusterRegions[i].isHDFSWriteOnly,
       "memberNames" : clusterRegions[i].memberNames,
       "memoryWritesTrend" : clusterRegions[i].memoryWritesTrend,
       "memoryReadsTrend" : clusterRegions[i].memoryReadsTrend,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
index 38bf9c4..0dfc2fb 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
@@ -345,7 +345,6 @@ public class PulseControllerJUnitTest {
         .andExpect(jsonPath("$.ClusterRegion.region[0].getsRate").value(27.99D))
         .andExpect(jsonPath("$.ClusterRegion.region[0].wanEnabled").value(false))
         .andExpect(jsonPath("$.ClusterRegion.region[0].memberCount").value(1))
-        .andExpect(jsonPath("$.ClusterRegion.region[0].isHDFSWriteOnly").value("NA"))
         .andExpect(jsonPath("$.ClusterRegion.region[0].memberNames[0].name").value(MEMBER_NAME))
         .andExpect(jsonPath("$.ClusterRegion.region[0].memberNames[0].id").value(MEMBER_ID))
         .andExpect(jsonPath("$.ClusterRegion.region[0].emptyNodes").value(0))
@@ -379,7 +378,6 @@ public class PulseControllerJUnitTest {
         .andExpect(jsonPath("$.ClusterRegions.regions[0].getsRate").value(27.99D))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].wanEnabled").value(false))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].memberCount").value(1))
-        .andExpect(jsonPath("$.ClusterRegions.regions[0].isHDFSWriteOnly").value("NA"))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].memberNames[0].name").value(MEMBER_NAME))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].memberNames[0].id").value(MEMBER_ID))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].emptyNodes").value(0))
@@ -430,7 +428,6 @@ public class PulseControllerJUnitTest {
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.memoryUsage").value("0.0000"))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.wanEnabled").value(false))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.memberCount").value(1))
-        .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.isHDFSWriteOnly").value("NA"))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.putsRate").value(12.31D))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.totalMemory").value(0))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.entryCount").value(0))

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
index 70476f9..1770dd5 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
@@ -32,10 +32,10 @@ public class Region extends JMXBaseBean implements RegionMBean {
   private String name = null;
 
   private static String[] regAttItemNames = { "compressionCodec",
-    "enableOffHeapMemory", "hdfsWriteOnly", "scope", "diskStoreName", 
+    "enableOffHeapMemory", "scope", "diskStoreName",
     "diskSynchronous" };
   private static String[] regAttItemDescriptions = { "compressionCodec",
-    "enableOffHeapMemory", "hdfsWriteOnly", "scope", "diskStoreName", 
+    "enableOffHeapMemory", "scope", "diskStoreName",
     "diskSynchronous" };
   private static OpenType[] regAttItemTypes = { SimpleType.STRING,
     SimpleType.BOOLEAN, SimpleType.BOOLEAN, SimpleType.STRING, 
@@ -158,11 +158,6 @@ public class Region extends JMXBaseBean implements RegionMBean {
       itemValuesHM.put(regAttItemNames[1], Boolean.parseBoolean(itemValues[1]));
     }
 
-    // hdfsWriteOnly
-    if (null != itemValues[2]) {
-      itemValuesHM.put(regAttItemNames[2], Boolean.parseBoolean(itemValues[2]));
-    }
-
     // scope
     if (null != itemValues[3]) {
       itemValuesHM.put(regAttItemNames[3], itemValues[3]);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-pulse/src/test/resources/test.properties
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/resources/test.properties b/geode-pulse/src/test/resources/test.properties
index b779c16..7952c0e 100644
--- a/geode-pulse/src/test/resources/test.properties
+++ b/geode-pulse/src/test/resources/test.properties
@@ -182,7 +182,7 @@ region.R1.diskUsage=200200
 #region.R1.diskSynchronous=false
 # operations
 # listRegionAttributes operation should values for return 
-# String compressionCodec, boolean enableOffHeapMemory, boolean hdfsWriteOnly,
+# String compressionCodec, boolean enableOffHeapMemory,
 # String scope, String diskStoreName, boolean diskSynchronous
 region.R1.listRegionAttributes=comp-Codec,true,true,local,TestDiskStoreLcl,false
 
@@ -248,7 +248,7 @@ region.R2.diskUsage=200200
 #region.R2.diskSynchronous=true
 # operations
 # listRegionAttributes operation should values for return 
-# string compressionCodec, boolean enableOffHeapMemory, boolean hdfsWriteOnly
+# string compressionCodec, boolean enableOffHeapMemory,
 # String scope, String diskStoreName, boolean diskSynchronous
 region.R2.listRegionAttributes=comp-Codec,true,false,global,TestDiskStoreGbl,true
 
@@ -297,7 +297,7 @@ region.R3.diskUsage=200200
 #region.R3.diskSynchronous=false
 # operations
 # listRegionAttributes operation should values for return 
-# String compressionCodec, boolean enableOffHeapMemory, boolean hdfsWriteOnly,
+# String compressionCodec, boolean enableOffHeapMemory,
 # String scope, String diskStoreName, boolean diskSynchronous
 region.R3.listRegionAttributes=comp-Codec,true,true,local,TestDiskStoreLcl,false
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-rebalancer/build.gradle
----------------------------------------------------------------------
diff --git a/geode-rebalancer/build.gradle b/geode-rebalancer/build.gradle
index b821590..00c43e4 100644
--- a/geode-rebalancer/build.gradle
+++ b/geode-rebalancer/build.gradle
@@ -25,11 +25,4 @@ dependencies {
   }
   compile ('org.springframework:spring-context:' + project.'springframework.version')
   testCompile project(':geode-junit')
-
-  // the following test dependencies are needed for mocking cache instance
-  testRuntime 'org.apache.hadoop:hadoop-common:' + project.'hadoop.version'
-  testRuntime 'org.apache.hadoop:hadoop-hdfs:' + project.'hadoop.version'
-  testRuntime ('org.apache.hbase:hbase:' + project.'hbase.version') {
-    transitive = false
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java b/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
index 0c10246..e7ba187 100644
--- a/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
+++ b/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
@@ -181,10 +181,6 @@ public class GatewaySenderFactoryImpl implements
     this.attrs.isBucketSorted = isBucketSorted;
     return this;
   }
-  public GatewaySenderFactory setIsHDFSQueue(boolean isHDFSQueue){
-    this.attrs.isHDFSQueue = isHDFSQueue;
-    return this;
-  }
   public GatewaySender create(String id, int remoteDSId) {
     int myDSId = InternalDistributedSystem.getAnyInstance()
         .getDistributionManager().getDistributedSystemId();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
index 18a89f8..0e7e8d8 100644
--- a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
@@ -179,7 +179,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
           public boolean done() {
             Entry<?,?> entry = null;
             try {
-              entry = region.getDataStore().getEntryLocally(0, key, false, false, false);
+              entry = region.getDataStore().getEntryLocally(0, key, false, false);
             } catch (EntryNotFoundException e) {
               // expected
             } catch (ForceReattemptException e) {
@@ -443,7 +443,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
           public boolean done() {
             Entry<?,?> entry = null;
             try {
-              entry = region.getDataStore().getEntryLocally(0, key, false, false, false);
+              entry = region.getDataStore().getEntryLocally(0, key, false, false);
             } catch (EntryNotFoundException e) {
               // expected
             } catch (ForceReattemptException e) {
@@ -582,7 +582,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
           public boolean done() {
             Entry<?,?> entry = null;
             try {
-              entry = region.getDataStore().getEntryLocally(0, key, false, false, false);
+              entry = region.getDataStore().getEntryLocally(0, key, false, false);
             } catch (EntryNotFoundException e) {
               // expected
             } catch (ForceReattemptException e) {


[05/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
index 3896800..c924be5 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
@@ -373,13 +373,20 @@ public final class HARegion extends DistributedRegion
   
   /**
    * @return the deserialized value
-   * @see DistributedRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean, boolean)
+   * @see LocalRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean)
    *      
    */
   @Override
-  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
-      TXStateInterface txState, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead,
-      boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
+  protected Object findObjectInSystem(KeyInfo keyInfo,
+                                      boolean isCreate,
+                                      TXStateInterface txState,
+                                      boolean generateCallbacks,
+                                      Object localValue,
+                                      boolean disableCopyOnRead,
+                                      boolean preferCD,
+                                      ClientProxyMembershipID requestingClient,
+                                      EntryEventImpl clientEvent,
+                                      boolean returnTombstones)
     throws CacheLoaderException, TimeoutException  {
 
     Object value = null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java
deleted file mode 100644
index f6c6aa7..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Collection;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.TimeoutException;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.cache.lru.EnableLRU;
-import com.gemstone.gemfire.internal.cache.lru.LRUEntry;
-import com.gemstone.gemfire.internal.cache.lru.NewLRUClockHand;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-
-/**
- * Implementation of RegionMap that reads data from HDFS and adds LRU behavior
- * 
- */
-public class HDFSLRURegionMap extends AbstractLRURegionMap implements HDFSRegionMap {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private final HDFSRegionMapDelegate delegate;
-
-  /**
-   *  A tool from the eviction controller for sizing entries and
-   *  expressing limits.
-   */
-  private EnableLRU ccHelper;
-
-  /**  The list of nodes in LRU order */
-  private NewLRUClockHand lruList;
-
-  private static final boolean DEBUG = Boolean.getBoolean("hdfsRegionMap.DEBUG");
-
-  public HDFSLRURegionMap(LocalRegion owner, Attributes attrs,
-      InternalRegionArguments internalRegionArgs) {
-    super(internalRegionArgs);
-    assert owner instanceof BucketRegion;
-    initialize(owner, attrs, internalRegionArgs);
-    this.delegate = new HDFSRegionMapDelegate(owner, attrs, internalRegionArgs, this);
-  }
-
-  @Override
-  public RegionEntry getEntry(Object key) {
-    return delegate.getEntry(key, null);
-  }
-
-  @Override
-  protected RegionEntry getEntry(EntryEventImpl event) {
-    return delegate.getEntry(event);
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public Collection<RegionEntry> regionEntries() {
-    return delegate.regionEntries();
-  }
-    
-  @Override
-  public int size() {
-    return delegate.size();
-  }
-    
-  @Override
-  public boolean isEmpty() {
-    return delegate.isEmpty();
-  }
-
-  @Override
-  protected void _setCCHelper(EnableLRU ccHelper) {
-    this.ccHelper = ccHelper;
-  }
-
-  @Override
-  protected EnableLRU _getCCHelper() {
-    return this.ccHelper;
-  }
-
-  @Override
-  protected void _setLruList(NewLRUClockHand lruList) {
-    this.lruList = lruList;
-  }
-
-  @Override
-  protected NewLRUClockHand _getLruList() {
-    return this.lruList;
-  }
-
-  @Override
-  public HDFSRegionMapDelegate getDelegate() {
-    return this.delegate;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java
deleted file mode 100644
index 2a7baef..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache;
-
-/**
- * Interface implemented by RegionMap implementations that
- * read from HDFS.
- * 
- *
- */
-public interface HDFSRegionMap {
-
-  /**
-   * @return the {@link HDFSRegionMapDelegate} that does
-   * all the work
-   */
-  public HDFSRegionMapDelegate getDelegate();
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java
deleted file mode 100644
index a2ef653..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java
+++ /dev/null
@@ -1,540 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.io.IOException;
-import java.lang.ref.Reference;
-import java.lang.ref.ReferenceQueue;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSEntriesSet;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSEntriesSet.HDFSIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
-import com.gemstone.gemfire.internal.cache.RegionMap.Attributes;
-import com.gemstone.gemfire.internal.cache.lru.EnableLRU;
-import com.gemstone.gemfire.internal.cache.lru.LRUEntry;
-import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
-import com.gemstone.gemfire.internal.cache.versions.VersionStamp;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor;
-import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import com.gemstone.gemfire.internal.util.concurrent.FutureResult;
-
-/**
- * This class encapsulates all the functionality of HDFSRegionMap, so
- * that it can be provided to HDFSLRURegionMap. 
- * 
- */
-public class HDFSRegionMapDelegate {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private final BucketRegion owner;
-
-  private ConcurrentParallelGatewaySenderQueue hdfsQueue;
-
-  private final RegionMap backingRM;
-
-  /** queue of dead iterators */
-  private final ReferenceQueue<HDFSIterator> refs;
-  
-  private static final boolean DEBUG = Boolean.getBoolean("hdfsRegionMap.DEBUG");
-  
-  /**
-   * used for serializing fetches from HDFS
-   */
-  private ConcurrentMap<Object, FutureResult> futures = new ConcurrentHashMap<Object, FutureResult>();
-
-  public HDFSRegionMapDelegate(LocalRegion owner, Attributes attrs,
-      InternalRegionArguments internalRegionArgs, RegionMap backingRM) {
-    assert owner instanceof BucketRegion;
-    this.owner = (BucketRegion) owner;
-    this.backingRM = backingRM;
-    refs = new ReferenceQueue<HDFSEntriesSet.HDFSIterator>();
-  }
-
-  public RegionEntry getEntry(Object key, EntryEventImpl event) {
-    
-    RegionEntry re = getEntry(key, event, true);
-    // get from tx should put the entry back in map
-    // it should be evicted once tx completes
-    /**MergeGemXDHDFSToGFE txstate does not apply for this*/
-    /* if (re != null && getTXState(event) != null) {
-    if (re != null) {
-      // put the region entry in backing CHM of AbstractRegionMap so that
-      // it can be locked in basicPut/destroy
-      RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re);
-      if (oldRe != null) {
-        if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
-          ((OffHeapRegionEntry)re).release();
-        }
-        return oldRe;
-      }
-      re.setMarkedForEviction();
-      owner.updateSizeOnCreate(key,
-          owner.calculateRegionEntryValueSize(re));
-      ((AbstractRegionMap)backingRM).incEntryCount(1);
-      ((AbstractRegionMap)backingRM).lruEntryCreate(re);
-    }*/
-    return re;
-  }
-
-  /*
-  private TXStateInterface getTXState(EntryEventImpl event) {
-    return event != null ? event.getTXState(this.owner) : this.owner
-        .getTXState();
-  }*/
-
-  /**
-   * 
-   * @param key
-   * @param event
-   * @param forceOnHeap if true will return heap version of off-heap region entries
-   */
-  private RegionEntry getEntry(Object key, EntryEventImpl event, boolean forceOnHeap) {
-    closeDeadIterators();
-    
-    RegionEntry re = backingRM.getEntryInVM(key);
-    if (logger.isTraceEnabled() || DEBUG) {
-      logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: Found the key in CHM: " + key
-          + " ,value=" + (re == null? "null" : "[" + re._getValue() + " or (" + re.getValueAsToken() + ")]")));
-    }
-    if ((re == null || (re.isRemoved() && !re.isTombstone()))
-        && owner.getBucketAdvisor().isPrimary() && allowReadFromHDFS()) {
-      if (logger.isTraceEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: fetching from hdfs key:" + key));
-      }
-      try {
-        this.owner.getPartitionedRegion().hdfsCalled(key);
-        re = getEntryFromFuture(key);
-        if (re != null) {
-          return re;
-        }
-
-        assert this.owner.getPartitionedRegion().getDataPolicy()
-            .withHDFS();
-        byte[] k = EntryEventImpl.serialize(key);
-      
-        // for destroy ops we will retain the entry in the region map so
-        // tombstones can be tracked
-        //final boolean forceOnHeap = (event==null || !event.getOperation().isDestroy());
-        
-        // get from queue
-        re = getFromHDFSQueue(key, k, forceOnHeap);
-        if (re == null) {
-          // get from HDFS
-          re = getFromHDFS(key, k, forceOnHeap);
-        }
-        if (re != null && re.isTombstone()) {
-          RegionVersionVector vector = this.owner.getVersionVector();
-//          if (vector == null) {
-//            this.owner.getLogWriterI18n().info(LocalizedStrings.DEBUG,
-//            "found a tombstone in a region w/o a version vector: " + re + "; region: " + this.owner);
-//          }
-          if (vector == null
-              || vector.isTombstoneTooOld(re.getVersionStamp().getMemberID(),
-                                    re.getVersionStamp().getRegionVersion())) {
-            re = null;
-          }
-        }
-        if (logger.isTraceEnabled() || DEBUG) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: returning from hdfs re:" + re));
-        }
-      } catch (ForceReattemptException e) {
-        throw new PrimaryBucketException(e.getLocalizedMessage(), e);
-      } catch (IOException e) {
-        throw new HDFSIOException("Error reading from HDFS", e);
-      } finally {
-        notifyFuture(key, re);
-        // If we mark it here, the table scan may miss it causing updates/delete using table
-        // scan to fail.
-//        if (re != null) {
-//          re.setMarkedForEviction();
-//        }
-        if(re != null && event != null && !re.isTombstone()) {
-          if (logger.isTraceEnabled() || DEBUG) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: loaded from hdfs re:" + re));
-          }
-          BucketRegion br = (BucketRegion)owner;
-          //CustomEvictionAttributes csAttr = br.getCustomEvictionAttributes();
-          //if(csAttr!=null)
-          event.setLoadedFromHDFS(true);
-        }
-      }
-    }
-    if(re!=null && re.isMarkedForEviction() && !re.isTombstone()) {
-      if(event!=null) {
-        event.setLoadedFromHDFS(true);
-      }
-    }
-
-    return re;
-  }
-
-  /**
-   * This method returns true if the RegionEntry should be read from HDFS.
-   * fixes #49101 by not allowing reads from HDFS for persistent regions
-   * that do not define an eviction criteria.
-   * 
-   * @return true if RegionEntry should be read from HDFS
-   */
-  private boolean allowReadFromHDFS() {
-    if (!owner.getDataPolicy().withPersistence()
-        || owner.getCustomEvictionAttributes() != null
-        || isEvictionActionLocalDestroy()){
-        /**MergeGemXDHDFSToGFE this is used for global index. Hence not required here*/ 
-        //|| owner.isUsedForIndex()) {
-      // when region does not have persistence, we have to read from HDFS (even
-      // though there is no eviction criteria) for constraint checks
-      return true;
-    }
-    return false;
-  }
-
-  private boolean isEvictionActionLocalDestroy() {
-    PartitionedRegion pr = owner.getPartitionedRegion();
-    if (pr.getEvictionAttributes() != null) {
-      return pr.getEvictionAttributes().getAction() == EvictionAction.LOCAL_DESTROY;
-    }
-    return false;
-  }
-
-  protected RegionEntry getEntry(EntryEventImpl event) {
-    RegionEntry re = getEntry(event.getKey(), event, false);
-    if (re != null && event.isLoadedFromHDFS()) {
-      // put the region entry in backing CHM of AbstractRegionMap so that
-      // it can be locked in basicPut/destroy
-      RegionEntry oldRe = backingRM.putEntryIfAbsent(event.getKey(), re);
-      if (oldRe != null) {
-        if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
-          ((OffHeapRegionEntry) re).release();
-        }
-        return oldRe;
-      }
-      // since the entry is faulted in from HDFS, it must have
-      // satisfied the eviction criteria in the past, so mark it for eviction
-      re.setMarkedForEviction();
-
-      owner.updateSizeOnCreate(event.getKey(), owner.calculateRegionEntryValueSize(re));
-      ((AbstractRegionMap) backingRM).incEntryCount(1);
-      ((AbstractRegionMap) backingRM).lruEntryCreate(re);
-    }
-    return re;
-  }
-
-  @SuppressWarnings("unchecked")
-  public Collection<RegionEntry> regionEntries() {
-    closeDeadIterators();
-    if (!owner.getPartitionedRegion().includeHDFSResults()) {
-      if (logger.isDebugEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #regionEntries"));
-      }
-      return backingRM.regionEntriesInVM();
-    }
-
-    try {
-      return createEntriesSet(IteratorType.ENTRIES);
-    } catch (ForceReattemptException e) {
-      throw new PrimaryBucketException(e.getLocalizedMessage(), e);
-    }
-  }
-    
-  public int size() {
-    closeDeadIterators();
-    if (!owner.getPartitionedRegion().includeHDFSResults()) {
-      if (logger.isDebugEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #size"));
-      }
-      return backingRM.sizeInVM();
-    }
-
-    try {
-      return createEntriesSet(IteratorType.KEYS).size();
-    } catch (ForceReattemptException e) {
-      throw new PrimaryBucketException(e.getLocalizedMessage(), e);
-    }
-  }
-    
-  public boolean isEmpty() {
-    closeDeadIterators();
-    if (!owner.getPartitionedRegion().includeHDFSResults()) {
-      if (logger.isDebugEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #isEmpty"));
-      }
-      return backingRM.sizeInVM() == 0;
-    }
-
-    try {
-      return createEntriesSet(IteratorType.KEYS).isEmpty();
-    } catch (ForceReattemptException e) {
-      throw new PrimaryBucketException(e.getLocalizedMessage(), e);
-    }
-  }
-  
-  private void notifyFuture(Object key, RegionEntry re) {
-    FutureResult future = this.futures.remove(key);
-    if (future != null) {
-      future.set(re);
-    }
-  }
-
-  private RegionEntry getEntryFromFuture(Object key) {
-    FutureResult future = new FutureResult(this.owner.getCancelCriterion());
-    FutureResult old = this.futures.putIfAbsent(key, future);
-    if (old != null) {
-      if (logger.isTraceEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: waiting for concurrent fetch to complete for key:" + key));
-      }
-      try {
-        return (RegionEntry) old.get();
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        this.owner.getCache().getCancelCriterion().checkCancelInProgress(null);
-      }
-    }
-    return null;
-  }
-
-  private RegionEntry getFromHDFS(Object key, byte[] k, boolean forceOnHeap) throws IOException, ForceReattemptException {
-    SortedHoplogPersistedEvent ev;
-    try {
-      ev = (SortedHoplogPersistedEvent) owner.getHoplogOrganizer().read(k);
-    } catch (IOException e) {
-      owner.checkForPrimary();
-      throw e;
-    }
-    if (ev != null) {
-      if (logger.isTraceEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: got from hdfs ev:" + ev));
-      }
-      return getEntryFromEvent(key, ev, forceOnHeap, false);
-    }
-    return null;
-  }
-
-  /**
-   * set the versionTag on the newly faulted-in entry
-   */
-  private void setVersionTag(RegionEntry re, VersionTag versionTag) {
-    if (owner.concurrencyChecksEnabled) {
-      versionTag.setMemberID(
-            owner.getVersionVector().getCanonicalId(versionTag.getMemberID()));
-      VersionStamp versionedRe = (VersionStamp) re;
-      versionedRe.setVersions(versionTag);
-    }
-  }
-
-  private RegionEntry getFromHDFSQueue(Object key, byte[] k, boolean forceOnHeap) throws ForceReattemptException {
-    ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
-    if (q == null) return null;
-    HDFSGatewayEventImpl hdfsGatewayEvent = (HDFSGatewayEventImpl) q.get(owner.getPartitionedRegion(), k, owner.getId());
-    if (hdfsGatewayEvent != null) {
-      if (logger.isTraceEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: got from hdfs queue: " + hdfsGatewayEvent));
-      }
-      return getEntryFromEvent(key, hdfsGatewayEvent, forceOnHeap, false);
-    }
-    return null;
-  }
-
-  private ConcurrentParallelGatewaySenderQueue getHDFSQueue()
-      throws ForceReattemptException {
-    if (this.hdfsQueue == null) {
-      String asyncQId = this.owner.getPartitionedRegion().getHDFSEventQueueName();
-      final AsyncEventQueueImpl asyncQ =  (AsyncEventQueueImpl)this.owner.getCache().getAsyncEventQueue(asyncQId);
-      final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender();
-      AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor();
-      if (ep == null) return null;
-      hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue();
-    }
-    
-    // Check whether the queue has become primary here.
-    // There could be some time between bucket becoming a primary
-    // and underlying queue becoming a primary, so isPrimaryWithWait()
-    // waits for some time for the queue to become a primary on this member
-    final HDFSBucketRegionQueue brq = hdfsQueue.getBucketRegionQueue(
-        this.owner.getPartitionedRegion(), this.owner.getId());
-    if (brq != null) {
-      if (owner.getBucketAdvisor().isPrimary()
-          && !brq.getBucketAdvisor().isPrimaryWithWait()) {
-        InternalDistributedMember primaryHolder = brq.getBucketAdvisor()
-            .basicGetPrimaryMember();
-        throw new PrimaryBucketException("Bucket " + brq.getName()
-            + " is not primary. Current primary holder is " + primaryHolder);
-      }
-    }
-      
-    return hdfsQueue;
-  }
-
-  public RegionEntry getEntryFromEvent(Object key, HDFSGatewayEventImpl event, boolean forceOnHeap, boolean forUpdate) {
-    Object val;
-    if (event.getOperation().isDestroy()) {
-      val = Token.TOMBSTONE;
-    } else if (event.getOperation().isInvalidate()) {
-      val = Token.INVALID;
-    } else {
-      val = event.getValue();
-    }
-    RegionEntry re = null;
-    final TXStateInterface tx = owner.getTXState();
-    if (tx == null) {
-      re = createRegionEntry(key, val, event.getVersionTag(), forceOnHeap);
-      return re;
-    }
-    else
-    if (val != null) {
-      if (((re = this.backingRM.getEntryInVM(key)) == null)
-          || (re.isRemoved() && !re.isTombstone())) {
-        boolean shouldCreateOnHeapEntry = !(owner.getOffHeap() && forUpdate); 
-        re = createRegionEntry(key, val, event.getVersionTag(), shouldCreateOnHeapEntry);
-        if (forUpdate) {
-          if (re != null && tx != null) {
-            // put the region entry in backing CHM of AbstractRegionMap so that
-            // it can be locked in basicPut/destroy
-            RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re);
-            if (oldRe != null) {
-              if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
-                ((OffHeapRegionEntry)re).release();
-              }
-              return oldRe;
-            }
-            re.setMarkedForEviction();
-            owner.updateSizeOnCreate(key,
-                owner.calculateRegionEntryValueSize(re));
-            ((AbstractRegionMap)backingRM).incEntryCount(1);
-            ((AbstractRegionMap)backingRM).lruEntryCreate(re);
-          }
-        }
-      }
-    }
-    return re;
-  }
-
-  public RegionEntry getEntryFromEvent(Object key, SortedHoplogPersistedEvent event, boolean forceOnHeap, boolean forUpdate) {
-    Object val = getValueFromEvent(event);
-    RegionEntry re = null;
-    final TXStateInterface tx = owner.getTXState();
-    if (tx == null) {
-      re = createRegionEntry(key, val, event.getVersionTag(), forceOnHeap);
-      return re;
-    }
-    else // FOR TX case, we need to create off heap entry if required
-    if (val != null) {
-      if (((re = this.backingRM.getEntryInVM(key)) == null)
-          || (re.isRemoved() && !re.isTombstone())) {
-        boolean shouldCreateOnHeapEntry = !(owner.getOffHeap() && forUpdate); 
-        re = createRegionEntry(key, val, event.getVersionTag(), shouldCreateOnHeapEntry);
-        if(forUpdate) {
-          if (re != null && tx != null) {
-            // put the region entry in backing CHM of AbstractRegionMap so that
-            // it can be locked in basicPut/destroy
-            RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re);
-            if (oldRe != null) {
-              if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
-                ((OffHeapRegionEntry)re).release();
-              }
-              return oldRe;
-            }
-            re.setMarkedForEviction();
-            owner.updateSizeOnCreate(key,
-                owner.calculateRegionEntryValueSize(re));
-            ((AbstractRegionMap)backingRM).incEntryCount(1);
-            ((AbstractRegionMap)backingRM).lruEntryCreate(re);
-          }
-        }
-      }
-    }
-    return re;
-  }
-
-  private RegionEntry createRegionEntry(Object key, Object value, VersionTag tag, boolean forceOnHeap) {
-    RegionEntryFactory ref = backingRM.getEntryFactory();
-    if (forceOnHeap) {
-      ref = ref.makeOnHeap();
-    }
-    value = getValueDuringGII(key, value);
-    RegionEntry re = ref.createEntry(this.owner, key, value);
-    setVersionTag(re, tag);
-    if (re instanceof LRUEntry) {
-      assert backingRM instanceof AbstractLRURegionMap;
-      EnableLRU ccHelper = ((AbstractLRURegionMap)backingRM)._getCCHelper();
-      ((LRUEntry)re).updateEntrySize(ccHelper);
-    }
-    return re;
-  }
-
-  private Object getValueDuringGII(Object key, Object value) {
-    if (owner.getIndexUpdater() != null && !owner.isInitialized()) {
-      return AbstractRegionMap.listOfDeltasCreator.newValue(key, owner, value,
-          null);
-    }
-    return value;
-  }
-
-  private Set createEntriesSet(IteratorType type)
-      throws ForceReattemptException {
-    ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
-    if (q == null) return Collections.emptySet();
-    HDFSBucketRegionQueue brq = q.getBucketRegionQueue(this.owner.getPartitionedRegion(), owner.getId());
-    return new HDFSEntriesSet(owner, brq, owner.getHoplogOrganizer(), type, refs);
-  }
-
-  private void closeDeadIterators() {
-    Reference<? extends HDFSIterator> weak;
-    while ((weak = refs.poll()) != null) {
-      if (logger.isTraceEnabled() || DEBUG) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Closing weak ref for iterator "
-            + weak.get()));
-      }
-      weak.get().close();
-    }
-  }
-
-  /**
-   * gets the value from event, deserializing if necessary.
-   */
-  private Object getValueFromEvent(PersistedEventImpl ev) {
-    if (ev.getOperation().isDestroy()) {
-      return Token.TOMBSTONE;
-    } else if (ev.getOperation().isInvalidate()) {
-      return Token.INVALID;
-    }
-    return ev.getValue();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java
deleted file mode 100644
index 9336ed7..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Collection;
-
-import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.TimeoutException;
-import com.gemstone.gemfire.internal.size.SingleObjectSizer;
-
-/**
- * Implementation of RegionMap that reads data from HDFS.
- * 
- */
-public class HDFSRegionMapImpl extends AbstractRegionMap implements HDFSRegionMap {
-
-  private final HDFSRegionMapDelegate delegate;
-
-  private static final boolean DEBUG = Boolean.getBoolean("hdfsRegionMap.DEBUG");
-
-  public HDFSRegionMapImpl(LocalRegion owner, Attributes attrs,
-      InternalRegionArguments internalRegionArgs) {
-    super(internalRegionArgs);
-    assert owner instanceof BucketRegion;
-    initialize(owner, attrs, internalRegionArgs, false);
-    this.delegate = new HDFSRegionMapDelegate(owner, attrs, internalRegionArgs, this);
-  }
-
-  @Override
-  public RegionEntry getEntry(Object key) {
-    return delegate.getEntry(key, null);
-  }
-
-  @Override
-  protected RegionEntry getEntry(EntryEventImpl event) {
-    return delegate.getEntry(event);
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public Collection<RegionEntry> regionEntries() {
-    return delegate.regionEntries();
-  }
-    
-  @Override
-  public int size() {
-    return delegate.size();
-  }
-    
-  @Override
-  public boolean isEmpty() {
-    return delegate.isEmpty();
-  }
-
-  @Override
-  public HDFSRegionMapDelegate getDelegate() {
-    return this.delegate;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
index 36eee80..bda5a27 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
@@ -20,8 +20,6 @@ package com.gemstone.gemfire.internal.cache;
 import java.util.Collection;
 
 import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSStoreDirector;
 import com.gemstone.gemfire.cache.query.internal.cq.CqService;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.internal.cache.extension.Extensible;
@@ -45,7 +43,5 @@ public interface InternalCache extends Cache, Extensible<Cache> {
 
   public CqService getCqService();
   
-  public Collection<HDFSStoreImpl> getHDFSStores() ;
-  
   public <T extends CacheService> T getService(Class<T> clazz);
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
index e506f2e..0885477 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
@@ -39,17 +39,22 @@ public interface InternalDataView {
    * @param keyInfo
    * @param localRegion
    * @param updateStats
-   * @param disableCopyOnRead 
-   * @param preferCD 
+   * @param disableCopyOnRead
+   * @param preferCD
    * @param clientEvent TODO
    * @param returnTombstones TODO
    * @param retainResult if true then the result may be a retained off-heap reference
    * @return the object associated with the key
    */
   @Retained
-  Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
-      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, 
-      boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult);
+  Object getDeserializedValue(KeyInfo keyInfo,
+                              LocalRegion localRegion,
+                              boolean updateStats,
+                              boolean disableCopyOnRead,
+                              boolean preferCD,
+                              EntryEventImpl clientEvent,
+                              boolean returnTombstones,
+                              boolean retainResult);
 
   /**
    * @param event
@@ -182,8 +187,8 @@ public interface InternalDataView {
    * @return the Object associated with the key
    */
   Object findObject(KeyInfo key, LocalRegion r, boolean isCreate, boolean generateCallbacks,
-      Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS);
+                    Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
+                    EntryEventImpl clientEvent, boolean returnTombstones);
 
 
   /**
@@ -224,13 +229,18 @@ public interface InternalDataView {
    * 
    * @param localRegion
    * @param key
-   * @param doNotLockEntry 
+   * @param doNotLockEntry
    * @param requestingClient the client that made the request, or null if not from a client
    * @param clientEvent the client event, if any
    * @param returnTombstones TODO
    * @return the serialized value from the cache
    */
-  Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException;
+  Object getSerializedValue(LocalRegion localRegion,
+                            KeyInfo key,
+                            boolean doNotLockEntry,
+                            ClientProxyMembershipID requestingClient,
+                            EntryEventImpl clientEvent,
+                            boolean returnTombstones) throws DataLocationException;
 
   abstract void checkSupportsRegionDestroy() throws UnsupportedOperationInTransactionException;
   abstract void checkSupportsRegionInvalidate() throws UnsupportedOperationInTransactionException;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
index 41e763d..f7d46fe 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
@@ -37,7 +37,6 @@ public final class InternalRegionArguments
   private boolean isUsedForPartitionedRegionAdmin;
   private boolean isUsedForSerialGatewaySenderQueue;
   private boolean isUsedForParallelGatewaySenderQueue;
-  private boolean isUsedForHDFSParallelGatewaySenderQueue = false;
   private int bucketRedundancy;
   private boolean isUsedForPartitionedRegionBucket;
   private RegionAdvisor partitionedRegionAdvisor;
@@ -273,26 +272,11 @@ public final class InternalRegionArguments
     this.isUsedForParallelGatewaySenderQueue = queueFlag;
     return this;
   }
-  public InternalRegionArguments setIsUsedForHDFSParallelGatewaySenderQueue(
-      boolean queueFlag) {
-    this.isUsedForHDFSParallelGatewaySenderQueue = queueFlag;
-    return this;
-  }
 
   public boolean isUsedForParallelGatewaySenderQueue() {
     return this.isUsedForParallelGatewaySenderQueue;
   }
   
-  public boolean isUsedForHDFSParallelGatewaySenderQueue() {
-    return this.isUsedForHDFSParallelGatewaySenderQueue;
-  }
-  
-  public boolean isReadWriteHDFSRegion() {
-    return isUsedForPartitionedRegionBucket()
-        && getPartitionedRegion().getHDFSStoreName() != null
-        && !getPartitionedRegion().getHDFSWriteOnly();
-  }
-
   public InternalRegionArguments setParallelGatewaySender(
       AbstractGatewaySender pgSender) {
     this.parallelGatewaySender = pgSender;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
index b3de9b7..3ad294c 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
@@ -116,11 +116,6 @@ import com.gemstone.gemfire.cache.client.internal.ServerRegionProxy;
 import com.gemstone.gemfire.cache.control.ResourceManager;
 import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.ResultCollector;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
-import com.gemstone.gemfire.cache.hdfs.internal.HoplogListenerForRegion;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
 import com.gemstone.gemfire.cache.partition.PartitionRegionHelper;
 import com.gemstone.gemfire.cache.query.FunctionDomainException;
 import com.gemstone.gemfire.cache.query.Index;
@@ -465,10 +460,6 @@ public class LocalRegion extends AbstractRegion
   // Lock for updating PR MetaData on client side 
   public final Lock clientMetaDataLock = new ReentrantLock();
   
-  
-  protected HdfsRegionManager hdfsManager;
-  protected HoplogListenerForRegion hoplogListener;
-
   /**
    * There seem to be cases where a region can be created and yet the
    * distributed system is not yet in place...
@@ -641,7 +632,6 @@ public class LocalRegion extends AbstractRegion
       }
     }
 
-    this.hdfsManager = initHDFSManager();
     this.dsi = findDiskStore(attrs, internalRegionArgs);
     this.diskRegion = createDiskRegion(internalRegionArgs);
     this.entries = createRegionMap(internalRegionArgs);
@@ -696,22 +686,8 @@ public class LocalRegion extends AbstractRegion
     
   }
 
-  private HdfsRegionManager initHDFSManager() {
-    HdfsRegionManager hdfsMgr = null;
-    if (this.getHDFSStoreName() != null) {
-      this.hoplogListener = new HoplogListenerForRegion();
-      HDFSRegionDirector.getInstance().setCache(cache);
-      hdfsMgr = HDFSRegionDirector.getInstance().manageRegion(this, 
-          this.getHDFSStoreName(), hoplogListener);
-    }
-    return hdfsMgr;
-  }
-
   private RegionMap createRegionMap(InternalRegionArguments internalRegionArgs) {
     RegionMap result = null;
-	if ((internalRegionArgs.isReadWriteHDFSRegion()) && this.diskRegion != null) {
-      this.diskRegion.setEntriesMapIncompatible(true);
-    }
     if (this.diskRegion != null) {
       result = this.diskRegion.useExistingRegionMap(this);
     }
@@ -977,11 +953,6 @@ public class LocalRegion extends AbstractRegion
           existing = (LocalRegion)this.subregions.get(subregionName);
 
           if (existing == null) {
-            // create the async queue for HDFS if required. 
-            HDFSIntegrationUtil.createAndAddAsyncQueue(regionPath,
-                regionAttributes, this.cache);
-            regionAttributes = cache.setEvictionAttributesForLargeRegion(
-                regionAttributes);
             if (regionAttributes.getScope().isDistributed()
                 && internalRegionArgs.isUsedForPartitionedRegionBucket()) {
               final PartitionedRegion pr = internalRegionArgs
@@ -991,15 +962,8 @@ public class LocalRegion extends AbstractRegion
               internalRegionArgs.setKeyRequiresRegionContext(pr
                   .keyRequiresRegionContext());
               if (pr.isShadowPR()) {
-                if (!pr.isShadowPRForHDFS()) {
-                    newRegion = new BucketRegionQueue(subregionName, regionAttributes,
-                      this, this.cache, internalRegionArgs);
-                }
-                else {
-                   newRegion = new HDFSBucketRegionQueue(subregionName, regionAttributes,
-                      this, this.cache, internalRegionArgs);
-                }
-                
+                newRegion = new BucketRegionQueue(subregionName, regionAttributes,
+                  this, this.cache, internalRegionArgs);
               } else {
                 newRegion = new BucketRegion(subregionName, regionAttributes,
                     this, this.cache, internalRegionArgs);  
@@ -1134,7 +1098,6 @@ public class LocalRegion extends AbstractRegion
       if (event.getEventId() == null && generateEventID()) {
         event.setNewEventId(cache.getDistributedSystem());
       }
-      assert event.isFetchFromHDFS() : "validatedPut() should have been called";
       // Fix for 42448 - Only make create with null a local invalidate for
       // normal regions. Otherwise, it will become a distributed invalidate.
       if (getDataPolicy() == DataPolicy.NORMAL) {
@@ -1261,18 +1224,20 @@ public class LocalRegion extends AbstractRegion
    * @param retainResult if true then the result may be a retained off-heap reference
    * @return the value for the given key
    */
-  public final Object getDeserializedValue(RegionEntry re, final KeyInfo keyInfo, final boolean updateStats, boolean disableCopyOnRead, 
-  boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult) {
+  public final Object getDeserializedValue(RegionEntry re,
+                                           final KeyInfo keyInfo,
+                                           final boolean updateStats,
+                                           boolean disableCopyOnRead,
+                                           boolean preferCD,
+                                           EntryEventImpl clientEvent,
+                                           boolean returnTombstones,
+                                           boolean retainResult) {
     if (this.diskRegion != null) {
       this.diskRegion.setClearCountReference();
     }
     try {
       if (re == null) {
-        if (allowReadFromHDFS) {
-          re = this.entries.getEntry(keyInfo.getKey());
-        } else {
-          re = this.entries.getOperationalEntryInVM(keyInfo.getKey());
-        }
+        re = this.entries.getEntry(keyInfo.getKey());
       }
       //skip updating the stats if the value is null
       // TODO - We need to clean up the callers of the this class so that we can
@@ -1382,7 +1347,7 @@ public class LocalRegion extends AbstractRegion
   public Object get(Object key, Object aCallbackArgument,
       boolean generateCallbacks, EntryEventImpl clientEvent) throws TimeoutException, CacheLoaderException
   {
-    Object result = get(key, aCallbackArgument, generateCallbacks, false, false, null, clientEvent, false, true/*allowReadFromHDFS*/);
+    Object result = get(key, aCallbackArgument, generateCallbacks, false, false, null, clientEvent, false);
     if (Token.isInvalid(result)) {
       result = null;
     }
@@ -1392,11 +1357,16 @@ public class LocalRegion extends AbstractRegion
   /*
    * @see BucketRegion#getSerialized(KeyInfo, boolean, boolean)
    */
-  public Object get(Object key, Object aCallbackArgument,
-	      boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
-	      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws TimeoutException, CacheLoaderException {
+  public Object get(Object key,
+                    Object aCallbackArgument,
+                    boolean generateCallbacks,
+                    boolean disableCopyOnRead,
+                    boolean preferCD,
+                    ClientProxyMembershipID requestingClient,
+                    EntryEventImpl clientEvent,
+                    boolean returnTombstones) throws TimeoutException, CacheLoaderException {
 	  return get(key, aCallbackArgument,
-		      generateCallbacks, disableCopyOnRead, preferCD,requestingClient, clientEvent, returnTombstones, false, allowReadFromHDFS, false);
+		      generateCallbacks, disableCopyOnRead, preferCD,requestingClient, clientEvent, returnTombstones, false, false);
   }
   
   /**
@@ -1418,16 +1388,17 @@ public class LocalRegion extends AbstractRegion
   public Object getRetained(Object key, Object aCallbackArgument,
       boolean generateCallbacks, boolean disableCopyOnRead,
       ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean opScopeIsLocal) throws TimeoutException, CacheLoaderException {
-    return get(key, aCallbackArgument, generateCallbacks, disableCopyOnRead, true, requestingClient, clientEvent, returnTombstones, opScopeIsLocal, true, false/* see GEODE-1291*/);
+    return get(key, aCallbackArgument, generateCallbacks, disableCopyOnRead, true, requestingClient, clientEvent, returnTombstones, opScopeIsLocal,
+      false /* see GEODE-1291*/);
   }
   /**
    * @param opScopeIsLocal if true then just check local storage for a value; if false then try to find the value if it is not local
    * @param retainResult if true then the result may be a retained off-heap reference.
    */
   public Object get(Object key, Object aCallbackArgument,
-      boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
-      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, 
-	  boolean opScopeIsLocal, boolean allowReadFromHDFS, boolean retainResult) throws TimeoutException, CacheLoaderException
+                    boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
+                    ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones,
+                    boolean opScopeIsLocal, boolean retainResult) throws TimeoutException, CacheLoaderException
   {
     assert !retainResult || preferCD;
     validateKey(key);
@@ -1440,7 +1411,8 @@ public class LocalRegion extends AbstractRegion
     boolean isMiss = true;
     try {
       KeyInfo keyInfo = getKeyInfo(key, aCallbackArgument);
-      Object value = getDataView().getDeserializedValue(keyInfo, this, true, disableCopyOnRead, preferCD, clientEvent, returnTombstones, allowReadFromHDFS, retainResult);
+      Object value = getDataView().getDeserializedValue(keyInfo, this, true, disableCopyOnRead, preferCD, clientEvent, returnTombstones,
+        retainResult);
       final boolean isCreate = value == null;
       isMiss = value == null || Token.isInvalid(value)
           || (!returnTombstones && value == Token.TOMBSTONE);
@@ -1453,13 +1425,13 @@ public class LocalRegion extends AbstractRegion
         // if scope is local and there is no loader, then
         // don't go further to try and get value
         if (!opScopeIsLocal
-            && ((getScope().isDistributed() && !isHDFSRegion())
+            && ((getScope().isDistributed())
                 || hasServerProxy()
                 || basicGetLoader() != null)) { 
           // serialize search/load threads if not in txn
           value = getDataView().findObject(keyInfo,
               this, isCreate, generateCallbacks, value, disableCopyOnRead,
-              preferCD, requestingClient, clientEvent, returnTombstones, false/*allowReadFromHDFS*/);
+              preferCD, requestingClient, clientEvent, returnTombstones);
           if (!returnTombstones && value == Token.TOMBSTONE) {
             value = null;
           }
@@ -1485,7 +1457,7 @@ public class LocalRegion extends AbstractRegion
    */
   final public void recordMiss(final RegionEntry re, Object key) {
     final RegionEntry e;
-    if (re == null && !isTX() && !isHDFSRegion()) {
+    if (re == null && !isTX()) {
       e = basicGetEntry(key);
     } else {
       e = re;
@@ -1494,60 +1466,30 @@ public class LocalRegion extends AbstractRegion
   }
 
   /**
-   * @return true if this region has been configured for HDFS persistence
-   */
-  public boolean isHDFSRegion() {
-    return false;
-  }
-
-  /**
-   * @return true if this region is configured to read and write data from HDFS
-   */
-  public boolean isHDFSReadWriteRegion() {
-    return false;
-  }
-
-  /**
-   * @return true if this region is configured to only write to HDFS
-   */
-  protected boolean isHDFSWriteOnly() {
-    return false;
-  }
-
-  /**
-   * FOR TESTING ONLY
-   */
-  public HoplogListenerForRegion getHoplogListener() {
-    return hoplogListener;
-  }
-  
-  /**
-   * FOR TESTING ONLY
-   */
-  public HdfsRegionManager getHdfsRegionManager() {
-    return hdfsManager;
-  }
-  
-  /**
    * optimized to only allow one thread to do a search/load, other threads wait
    * on a future
-   *
-   * @param keyInfo
+   *  @param keyInfo
    * @param p_isCreate
    *                true if call found no entry; false if updating an existing
    *                entry
    * @param generateCallbacks
    * @param p_localValue
-   *                the value retrieved from the region for this object.
+*                the value retrieved from the region for this object.
    * @param disableCopyOnRead if true then do not make a copy
    * @param preferCD true if the preferred result form is CachedDeserializable
    * @param clientEvent the client event, if any
    * @param returnTombstones whether to return tombstones
    */
   @Retained
-  Object nonTxnFindObject(KeyInfo keyInfo, boolean p_isCreate,
-      boolean generateCallbacks, Object p_localValue, boolean disableCopyOnRead, boolean preferCD,
-      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) 
+  Object nonTxnFindObject(KeyInfo keyInfo,
+                          boolean p_isCreate,
+                          boolean generateCallbacks,
+                          Object p_localValue,
+                          boolean disableCopyOnRead,
+                          boolean preferCD,
+                          ClientProxyMembershipID requestingClient,
+                          EntryEventImpl clientEvent,
+                          boolean returnTombstones)
       throws TimeoutException, CacheLoaderException
   {
     final Object key = keyInfo.getKey();
@@ -1606,7 +1548,8 @@ public class LocalRegion extends AbstractRegion
     try {
       boolean partitioned = this.getDataPolicy().withPartitioning();
       if (!partitioned) {
-        localValue = getDeserializedValue(null, keyInfo, isCreate, disableCopyOnRead, preferCD, clientEvent, false, false/*allowReadFromHDFS*/, false);
+        localValue = getDeserializedValue(null, keyInfo, isCreate, disableCopyOnRead, preferCD, clientEvent, false,
+          false);
 
         // stats have now been updated
         if (localValue != null && !Token.isInvalid(localValue)) {
@@ -1615,7 +1558,7 @@ public class LocalRegion extends AbstractRegion
         }
         isCreate = localValue == null;
         result = findObjectInSystem(keyInfo, isCreate, null, generateCallbacks,
-            localValue, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false/*allowReadFromHDFS*/);
+            localValue, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
 
       } else {
         
@@ -1623,7 +1566,7 @@ public class LocalRegion extends AbstractRegion
         // For PRs we don't want to deserialize the value and we can't use findObjectInSystem because
         // it can invoke code that is transactional.
         result = getSharedDataView().findObject(keyInfo, this, true/*isCreate*/, generateCallbacks,
-            localValue, disableCopyOnRead, preferCD, null, null, false, allowReadFromHDFS);
+            localValue, disableCopyOnRead, preferCD, null, null, false);
         // TODO why are we not passing the client event or returnTombstones in the above invokation?
       }
 
@@ -1806,7 +1749,6 @@ public class LocalRegion extends AbstractRegion
   public final EntryEventImpl newPutEntryEvent(Object key, Object value,
       Object aCallbackArgument) {
     EntryEventImpl ev = newUpdateEntryEvent(key, value, aCallbackArgument);
-    ev.setFetchFromHDFS(false);
     ev.setPutDML(true);
     return ev;
   }
@@ -1938,23 +1880,11 @@ public class LocalRegion extends AbstractRegion
     }
   }
 
-  protected boolean includeHDFSResults() {
-    return isUsedForPartitionedRegionBucket() 
-        && isHDFSReadWriteRegion() 
-        && getPartitionedRegion().includeHDFSResults();
-  }
-  
-
   /** a fast estimate of total number of entries locally in the region */
   public long getEstimatedLocalSize() {
     RegionMap rm;
     if (!this.isDestroyed) {
       long size;
-      if (isHDFSReadWriteRegion() && this.initialized) {
-        // this size is not used by HDFS region iterators
-        // fixes bug 49239
-        return 0;
-      }
       // if region has not been initialized yet, then get the estimate from
       // disk region's recovery map if available
       if (!this.initialized && this.diskRegion != null
@@ -2266,9 +2196,6 @@ public class LocalRegion extends AbstractRegion
       if (this.imageState.isClient() && !this.concurrencyChecksEnabled) {
         return result - this.imageState.getDestroyedEntriesCount();
       }
-	if (includeHDFSResults()) {
-      return result;
-    }
       return result - this.tombstoneCount.get();
     }
   }
@@ -3004,11 +2931,18 @@ public class LocalRegion extends AbstractRegion
    * @param clientEvent the client's event, if any.  If not null, we set the version tag
    * @param returnTombstones TODO
    * @return the deserialized value
-   * @see DistributedRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean, boolean )
-   */
-  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
-      TXStateInterface tx, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean returnTombstones,  boolean allowReadFromHDFS)
+   * @see LocalRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean)
+   */
+  protected Object findObjectInSystem(KeyInfo keyInfo,
+                                      boolean isCreate,
+                                      TXStateInterface tx,
+                                      boolean generateCallbacks,
+                                      Object localValue,
+                                      boolean disableCopyOnRead,
+                                      boolean preferCD,
+                                      ClientProxyMembershipID requestingClient,
+                                      EntryEventImpl clientEvent,
+                                      boolean returnTombstones)
       throws CacheLoaderException, TimeoutException
   {
     final Object key = keyInfo.getKey();
@@ -5383,9 +5317,6 @@ public class LocalRegion extends AbstractRegion
     // Notify bridge clients (if this is a BridgeServer)
     event.setEventType(eventType);
     notifyBridgeClients(event);
-  if (this.hdfsStoreName != null) {
-    notifyGatewaySender(eventType, event);
-    }
     if(callDispatchListenerEvent){
       dispatchListenerEvent(eventType, event);
     }
@@ -7271,24 +7202,8 @@ public class LocalRegion extends AbstractRegion
     if (generateEventID()) {
       event.setNewEventId(cache.getDistributedSystem());
     }
-    event.setFetchFromHDFS(false);
-    return event;
-  }
-  
-  @Retained
-  protected EntryEventImpl generateCustomEvictDestroyEvent(final Object key) {
-    @Retained EntryEventImpl event =  EntryEventImpl.create(
-        this, Operation.CUSTOM_EVICT_DESTROY, key, null/* newValue */,
-        null, false, getMyId());
-    
-    // Fix for bug#36963
-    if (generateEventID()) {
-      event.setNewEventId(cache.getDistributedSystem());
-    }
-    event.setFetchFromHDFS(false);
     return event;
   }
-  
   /**
    * @return true if the evict destroy was done; false if it was not needed
    */
@@ -9941,8 +9856,6 @@ public class LocalRegion extends AbstractRegion
       }
     }
     
-    clearHDFSData();
-    
     if (!isProxy()) {
       // Now we need to recreate all the indexes.
       //If the indexManager is null we don't have to worry
@@ -9981,11 +9894,6 @@ public class LocalRegion extends AbstractRegion
     }
   }
 
-  /**Clear HDFS data, if present */
-  protected void clearHDFSData() {
-    //do nothing, clear is implemented for subclasses like BucketRegion.
-  }
-
   @Override
   void basicLocalClear(RegionEventImpl rEvent)
   {
@@ -10762,7 +10670,6 @@ public class LocalRegion extends AbstractRegion
   }
     public final DistributedPutAllOperation newPutAllForPUTDmlOperation(Map<?, ?> map, Object callbackArg) {
     DistributedPutAllOperation dpao = newPutAllOperation(map, callbackArg);
-    dpao.getEvent().setFetchFromHDFS(false);
     dpao.getEvent().setPutDML(true);
     return dpao;
   }
@@ -10818,7 +10725,6 @@ public class LocalRegion extends AbstractRegion
         putallOp, this, Operation.PUTALL_CREATE, key, value);
 
     try {
-	event.setFetchFromHDFS(putallOp.getEvent().isFetchFromHDFS());
     event.setPutDML(putallOp.getEvent().isPutDML());
     
     if (tagHolder != null) {
@@ -12921,22 +12827,6 @@ public class LocalRegion extends AbstractRegion
   public Integer getCountNotFoundInLocal() {
     return countNotFoundInLocal.get();
   }
-  /// End of Variables and methods for test Hook for HDFS ///////
-  public void forceHDFSCompaction(boolean isMajor, Integer maxWaitTime) {
-    throw new UnsupportedOperationException(
-        LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
-            .toLocalizedString(getName()));
-  }
-
-  public void flushHDFSQueue(int maxWaitTime) {
-    throw new UnsupportedOperationException(
-        LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
-            .toLocalizedString(getName()));
-  }
-  
-  public long lastMajorHDFSCompaction() {
-    throw new UnsupportedOperationException();
-  }
 
   public static void simulateClearForTests(boolean flag) {
     simulateClearForTests = flag;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
index 5193a17..c26ff10 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
@@ -17,7 +17,6 @@
 package com.gemstone.gemfire.internal.cache;
 
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.Set;
 
 import com.gemstone.gemfire.cache.EntryNotFoundException;
@@ -36,9 +35,16 @@ public class LocalRegionDataView implements InternalDataView {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
-      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadsFromHDFS, boolean retainResult) {
-    return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones, allowReadsFromHDFS, retainResult);
+  public Object getDeserializedValue(KeyInfo keyInfo,
+                                     LocalRegion localRegion,
+                                     boolean updateStats,
+                                     boolean disableCopyOnRead,
+                                     boolean preferCD,
+                                     EntryEventImpl clientEvent,
+                                     boolean returnTombstones,
+                                     boolean retainResult) {
+    return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones,
+      retainResult);
   }
 
   /* (non-Javadoc)
@@ -136,9 +142,17 @@ public class LocalRegionDataView implements InternalDataView {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
-  public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
-   return r.nonTxnFindObject(keyInfo, isCreate, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+  public Object findObject(KeyInfo keyInfo,
+                           LocalRegion r,
+                           boolean isCreate,
+                           boolean generateCallbacks,
+                           Object value,
+                           boolean disableCopyOnRead,
+                           boolean preferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent,
+                           boolean returnTombstones) {
+   return r.nonTxnFindObject(keyInfo, isCreate, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
   }
 
   /* (non-Javadoc)
@@ -180,7 +194,12 @@ public class LocalRegionDataView implements InternalDataView {
    * (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.BucketRegion, java.lang.Object, java.lang.Object)
    */
-  public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion,
+                                   KeyInfo key,
+                                   boolean doNotLockEntry,
+                                   ClientProxyMembershipID requestingClient,
+                                   EntryEventImpl clientEvent,
+                                   boolean returnTombstones) throws DataLocationException {
     throw new IllegalStateException();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
index bb83383..4c1fa7f 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
@@ -461,26 +461,6 @@ public class NonLocalRegionEntry implements RegionEntry, VersionStamp {
   }
 
   @Override
-  public boolean isMarkedForEviction() {
-    throw new UnsupportedOperationException(LocalizedStrings
-        .PartitionedRegion_NOT_APPROPRIATE_FOR_PARTITIONEDREGIONNONLOCALREGIONENTRY
-            .toLocalizedString());
-  }
-  @Override
-  public void setMarkedForEviction() {
-    throw new UnsupportedOperationException(LocalizedStrings
-        .PartitionedRegion_NOT_APPROPRIATE_FOR_PARTITIONEDREGIONNONLOCALREGIONENTRY
-            .toLocalizedString());
-  }
-
-  @Override
-  public void clearMarkedForEviction() {
-    throw new UnsupportedOperationException(LocalizedStrings
-        .PartitionedRegion_NOT_APPROPRIATE_FOR_PARTITIONEDREGIONNONLOCALREGIONENTRY
-            .toLocalizedString());
-  }
-
-  @Override
   public boolean isValueNull() {
     return (null == getValueAsToken());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
index fe8813e..4728594 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
@@ -7384,19 +7384,6 @@ public final class Oplog implements CompactableOplog, Flushable {
       // TODO Auto-generated method stub
     }
     @Override
-    public boolean isMarkedForEviction() {
-      // TODO Auto-generated method stub
-      return false;
-    }
-    @Override
-    public void setMarkedForEviction() {
-      // TODO Auto-generated method stub
-    }
-    @Override
-    public void clearMarkedForEviction() {
-      // TODO Auto-generated method stub
-    }
-    @Override
     public boolean isInvalid() {
       // TODO Auto-generated method stub
       return false;


[09/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java
deleted file mode 100644
index f7d746d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.concurrent.Future;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-
-/**
- * Manages bucket level operations on sorted oplog files including creation, reading, serde, bloom
- * buffering and compaction. Abstracts existence of multiple sorted oplog files
- */
-public interface HoplogOrganizer<T extends PersistedEventImpl> extends HoplogSetReader<byte[], T>,
-    HoplogListener, Closeable {
-
-  /**
-   * Iterates on the input buffer and persists it in a new sorted oplog. This invocation may block
-   * if there are too many outstanding write requests.
-   * 
-   * @param bufferIter
-   *          ordered iterator on a buffer of objects to be persisted
-   * @param count
-   *          number of K,V pairs expected to be part of flush, 0 if unknown
-   * @throws IOException
-   */
-  public void flush(Iterator<? extends QueuedPersistentEvent> bufferIter, int count) 
-      throws IOException, ForceReattemptException;
-  
-  
-  /**
-   * Clear the data in HDFS. This method assumes that the
-   * dispatcher thread has already been paused, so there should be
-   * no concurrent flushes to HDFS when this method is called.
-   * 
-   * @throws IOException
-   */
-  public void clear() throws IOException;
-
-  /**
-   * returns the compactor associated with this set
-   */
-  public Compactor getCompactor();
-  
-  /**
-   * Called to execute bucket maintenance activities, like purge expired files
-   * and create compaction task. Long running activities must be executed
-   * asynchronously, not on this thread, to avoid impact on other buckets
-   * @throws IOException 
-   */
-  public void performMaintenance() throws IOException;
-
-  /**
-   * Schedules a compaction task and returns immediately.
-   * 
-   * @param isMajor true for major compaction, false for minor compaction
-   * @return future for status of compaction request
-   */
-  public Future<CompactionStatus> forceCompaction(boolean isMajor);
-
-  /**
-   * Returns the timestamp of the last completed major compaction
-   * 
-   * @return the timestamp or 0 if a major compaction has not taken place yet
-   */
-  public long getLastMajorCompactionTimestamp();
-
-  public interface Compactor {
-    /**
-     * Requests a compaction operation be performed on this set of sorted oplogs.
-     *
-     * @param isMajor true for major compaction
-     * @param isForced true if the compaction should be carried out even if there
-     * is only one hoplog to compact
-     * 
-     * @return true if compaction was performed, false otherwise
-     * @throws IOException
-     */
-    boolean compact(boolean isMajor, boolean isForced) throws IOException;
-
-    /**
-     * Stop the current compaction operation in the middle and suspend
-     * compaction operations. The current current compaction data
-     * will be thrown away, and no more compaction will be performend
-     * until resume is called. 
-     */
-    void suspend();
-    
-    /**
-     * Resume compaction operations. 
-     */
-    void resume();
-
-    /**
-     * @return true if the compactor is not ready or busy
-     */
-    boolean isBusy(boolean isMajor);
-
-    /**
-     * @return the hdfsStore configuration used by this compactor
-     */
-    public HDFSStore getHdfsStore();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java
deleted file mode 100644
index 16939db..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog.HFileReader.HFileSortedIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.ByteComparator;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-
-/**
- * Provides a merged iterator on set of {@link HFileSortedOplog}
- */
-public class HoplogSetIterator implements HoplogIterator<ByteBuffer, ByteBuffer> {
-  private final List<HFileSortedIterator> iters;
-
-  // Number of entries remaining to be iterated by this scanner
-  private int entriesRemaining;
-
-  // points at the current iterator holding the next entry
-  private ByteBuffer currentKey;
-  private ByteBuffer currentValue;
-
-  public HoplogSetIterator(List<TrackedReference<Hoplog>> targets) throws IOException {
-    iters = new ArrayList<HFileSortedIterator>();
-    for (TrackedReference<Hoplog> oplog : targets) {
-      HFileSortedIterator iter = (HFileSortedIterator) oplog.get().getReader().scan();
-      if (!iter.hasNext()) {
-        // the oplog is empty, exclude from iterator
-        continue;
-      }
-
-      // initialize the iterator
-      iter.nextBB();
-      iters.add(iter);
-      entriesRemaining += oplog.get().getReader().getEntryCount();
-    }
-  }
-
-  public boolean hasNext() {
-    return entriesRemaining > 0;
-  }
-
-  @Override
-  public ByteBuffer next() throws IOException {
-    return nextBB();
-  }
-  public ByteBuffer nextBB() throws IOException {
-    if (!hasNext()) {
-      throw new NoSuchElementException();
-    }
-
-    seekToMinKeyIter();
-
-    return currentKey;
-  }
-
-  private void seekToMinKeyIter() throws IOException {
-    HFileSortedIterator currentIter = null;
-    ByteBuffer minKey = null;
-
-    // scan through all hoplog iterators to reach to the iterator with smallest
-    // key on the head and remove duplicate keys
-    for (Iterator<HFileSortedIterator> iterator = iters.iterator(); iterator.hasNext();) {
-      HFileSortedIterator iter = iterator.next();
-      
-      ByteBuffer tmpK = iter.getKeyBB();
-      ByteBuffer tmpV = iter.getValueBB();
-      if (minKey == null || ByteComparator.compareBytes(tmpK.array(), tmpK.arrayOffset(), tmpK.remaining(), minKey.array(), minKey.arrayOffset(), minKey.remaining()) < 0) {
-        minKey = tmpK;
-        currentKey = tmpK;
-        currentValue = tmpV;
-        currentIter = iter;
-      } else {
-        // remove possible duplicate key entries from iterator
-        if (seekHigherKeyInIter(minKey, iter) == null) {
-          // no more keys left in this iterator
-          iter.close();
-          iterator.remove();
-        }
-      }
-    }
-    
-    //seek next key in current iter
-    if (currentIter != null && seekHigherKeyInIter(minKey, currentIter) == null) {
-      // no more keys left in this iterator
-      currentIter.close();
-      iters.remove(currentIter);
-    }
-  }
-
-  private ByteBuffer seekHigherKeyInIter(ByteBuffer key, HFileSortedIterator iter) throws IOException {
-    ByteBuffer newK = iter.getKeyBB();
-
-    // remove all duplicates by incrementing iterator when a key is less than
-    // equal to current key
-    while (ByteComparator.compareBytes(newK.array(), newK.arrayOffset(), newK.remaining(), key.array(), key.arrayOffset(), key.remaining()) <= 0) {
-      entriesRemaining--;
-      if (iter.hasNext()) {
-        newK = iter.nextBB();
-      } else {
-        newK = null;
-        break;
-      }
-    }
-    return newK;
-  }
-
-  @Override
-  public ByteBuffer getKey() {
-    return getKeyBB();
-  }
-  public ByteBuffer getKeyBB() {
-    if (currentKey == null) {
-      throw new IllegalStateException();
-    }
-    return currentKey;
-  }
-
-  @Override
-  public ByteBuffer getValue() {
-    return getValueBB();
-  }
-  public ByteBuffer getValueBB() {
-    if (currentValue == null) {
-      throw new IllegalStateException();
-    }
-    return currentValue;
-  }
-
-  @Override
-  public void remove() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void close() {
-    for (HoplogIterator<byte[], byte[]> iter : iters) {
-      iter.close();
-    }
-  }
-
-  public int getRemainingEntryCount() {
-    return entriesRemaining;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java
deleted file mode 100644
index 789a616..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Iterator;
-
-/**
- * Reads a sorted oplog file or a merged set of sorted oplogs.
- */
-public interface HoplogSetReader<K, V> {
-  /**
-   * Returns the value associated with the given key.
-   */
-  V read(K key) throws IOException;
-
-  /**
-   * Iterators over the entire contents of the sorted file.
-   * 
-   * @return the sorted iterator
-   * @throws IOException
-   */
-  HoplogIterator<K, V> scan() throws IOException;
-
-  /**
-   * Scans the available keys and allows iteration over the interval [from, to) where the starting
-   * key is included and the ending key is excluded from the results.
-   * 
-   * @param from
-   *          the start key
-   * @param to
-   *          the end key
-   * @return the sorted iterator
-   * @throws IOException
-   */
-  HoplogIterator<K, V> scan(K from, K to) throws IOException;
-
-  /**
-   * Scans the keys and allows iteration between the given keys.
-   * 
-   * @param from
-   *          the start key
-   * @param fromInclusive
-   *          true if the start key is included in the scan
-   * @param to
-   *          the end key
-   * @param toInclusive
-   *          true if the end key is included in the scan
-   * @return the sorted iterator
-   * @throws IOException
-   */
-  HoplogIterator<K, V> scan(K from, boolean fromInclusive, K to, boolean toInclusive) throws IOException;
-  
-  
-  /**
-   * Scans the available keys and allows iteration over the offset 
-   * specified as parameters
-   * 
-   * 
-   * @param startOffset
-   *          the start offset
-   * @param length
-   *          bytes to read
-   * @return the sorted iterator
-   * @throws IOException
-   */
-  HoplogIterator<K, V> scan(long startOffset, long length) throws IOException;
-
-  /**
-   * Using Cardinality estimator provides an approximate number of entries
-   * 
-   * @return the number of entries
-   */
-  long sizeEstimate();
-
-  /**
-   * Returns true if the reader has been closed.
-   * @return true if closed
-   */
-  boolean isClosed();
-
-  /**
-   * Allows sorted iteration through a set of keys and values.
-   */
-  public interface HoplogIterator<K, V> {
-    K getKey();
-
-    V getValue();
-
-    /** moves to next element and returns the key object */
-    K next() throws IOException;
-    
-    boolean hasNext();
-    
-    void close();
-    
-    void remove();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java
deleted file mode 100644
index a2926ff..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-  
-import java.io.Closeable;
-import java.io.EOFException;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.EnumMap;
-
-import com.gemstone.gemfire.internal.hll.ICardinality;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Text;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile;
-import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Reader;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import com.gemstone.gemfire.internal.Version;
-
-import org.apache.logging.log4j.Logger;
-
-/**
- * Implements Sequence file based {@link Hoplog}
- * 
- *
- */
-public class SequenceFileHoplog extends AbstractHoplog{
-  
-   public SequenceFileHoplog(FileSystem inputFS, Path filePath,  
-      SortedOplogStatistics stats)
-  throws IOException
-  {
-     super(inputFS, filePath, stats);
-  }
-  @Override
-  public void close() throws IOException {
-    // Nothing to do 
-  }
-
-  @Override
-  public HoplogReader getReader() throws IOException {
-    return new SequenceFileReader();
-  }
-
-  @Override
-  /**
-   * gets the writer for sequence file. 
-   * 
-   * @param keys is not used for SequenceFileHoplog class 
-   */
-  public HoplogWriter createWriter(int keys) throws IOException {
-    return new SequenceFileHoplogWriter();
-  }
-
-  @Override
-  public boolean isClosed() {
-    return false;
-  }
-  
-  @Override
-  public void close(boolean clearCache) throws IOException {
-    // Nothing to do 
-  }
-
-  /**
-   * Currently, hsync does not update the file size on namenode. So, if last time the 
-   * process died after calling hsync but before calling file close, the file is 
-   * left with an inconsistent file size. This is a workaround that - open the file stream in append 
-   * mode and close it. This fixes the file size on the namenode.
-   * 
-   * @throws IOException
-   * @return true if the file size was fixed 
-   */
-  public boolean fixFileSize() throws IOException {
-    // Try to fix the file size
-    // Loop so that the expected expceptions can be ignored 3
-   // times
-    if (logger.isDebugEnabled())
-      logger.debug("{}Fixing size of hoplog " + path, logPrefix);
-    Exception e = null;
-    boolean exceptionThrown = false;
-    for (int i =0; i < 3; i++) {
-      try {
-        FSDataOutputStream stream = fsProvider.getFS().append(path);
-        stream.close();
-        stream = null;
-      } catch (IOException ie) {
-        exceptionThrown = true;
-        e = ie;
-        if (logger.isDebugEnabled())
-        logger.debug("{}Retry run " + (i + 1) + ": Hoplog " + path + " is still a temporary " +
-            "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
-            "fix the hoplog because an exception was thrown " + e, logPrefix );
-      }
-      // As either RecoveryInProgressException was thrown or 
-      // Already being created exception was thrown, wait for 
-      // sometime before next retry. 
-      if (exceptionThrown) {
-        try {
-          Thread.sleep(5000);
-        } catch (InterruptedException e1) {
-        } 
-        exceptionThrown = false;
-      } else {
-        // no exception was thrown, break;
-        return true;
-      }
-    }
-    logger.info (logPrefix, LocalizedMessage.create(LocalizedStrings.DEBUG, "Hoplog " + path + " is still a temporary " +
-        "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
-        "fix the hoplog because an exception was thrown " + e));
-    
-    return false;
-  }
-  
-  @Override
-  public String toString() {
-    return "SequenceFileHplog[" + getFileName() + "]";
-  }
-  
-  private class SequenceFileHoplogWriter implements HoplogWriter {
-    
-    private SequenceFile.Writer writer = null;
-    
-    public SequenceFileHoplogWriter() throws IOException{
-      writer = AbstractHoplog.getSequenceFileWriter(path, conf, logger);
-    }
-   
-    @Override
-    public void close() throws IOException {
-      writer.close();
-      if (logger.isDebugEnabled())
-        logger.debug("{}Completed creating hoplog " + path, logPrefix);
-    }
-    
-    @Override
-    public void hsync() throws IOException {
-      writer.hsyncWithSizeUpdate();
-      if (logger.isDebugEnabled())
-        logger.debug("{}hsync'ed a batch of data to hoplog " + path, logPrefix);
-    }
-    
-    @Override
-    public void append(byte[] key, byte[] value) throws IOException {
-      writer.append(new BytesWritable(key), new BytesWritable(value));
-    }
-
-    @Override
-    public void append(ByteBuffer key, ByteBuffer value) throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public void close(EnumMap<Meta, byte[]> metadata) throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-    @Override
-    public long getCurrentSize() throws IOException {
-      return writer.getLength();
-    }
-    
-  }
-  /**
-   * Sequence file reader. This is currently to be used only by MapReduce jobs and 
-   * test functions
-   * 
-   */
-  public class SequenceFileReader implements HoplogReader, Closeable {
-    @Override
-    public byte[] read(byte[] key) throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public HoplogIterator<byte[], byte[]> scan()
-        throws IOException {
-      return  new SequenceFileIterator(fsProvider.getFS(), path, 0, Long.MAX_VALUE, conf, logger);
-    }
-
-    @Override
-    public HoplogIterator<byte[], byte[]> scan(
-        byte[] from, byte[] to) throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-    
-    @Override
-    public HoplogIterator<byte[], byte[]> scan(
-        long startOffset, long length) throws IOException {
-      return  new SequenceFileIterator(fsProvider.getFS(), path, startOffset, length, conf, logger);
-    }
-    
-    @Override
-    public HoplogIterator<byte[], byte[]> scan(
-        byte[] from, boolean fromInclusive, byte[] to, boolean toInclusive)
-        throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public boolean isClosed() {
-      throw new UnsupportedOperationException("Not supported for Sequence files.");
-    }
-    
-    @Override
-    public void close() throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files. Close the iterator instead.");
-    }
-
-    @Override
-    public ByteBuffer get(byte[] key) throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public BloomFilter getBloomFilter() throws IOException {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public long getEntryCount() {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public ICardinality getCardinalityEstimator() {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public long sizeEstimate() {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-
-  }
-  
-  /**
-   * Sequence file iterator. This is currently to be used only by MapReduce jobs and 
-   * test functions
-   * 
-   */
-  public static class SequenceFileIterator implements HoplogIterator<byte[], byte[]> {
-    
-    SequenceFile.Reader reader = null;
-    private BytesWritable prefetchedKey = null;
-    private BytesWritable prefetchedValue = null;
-    private byte[] currentKey;
-    private byte[] currentValue;
-    boolean hasNext = false;
-    Logger logger; 
-    Path path;
-    private long start;
-    private long end;
-    
-    public SequenceFileIterator(FileSystem fs, Path path, long startOffset, 
-        long length, Configuration conf, Logger logger) 
-        throws IOException {
-      Reader.Option optPath = SequenceFile.Reader.file(path);
-      
-      // Hadoop has a configuration parameter io.serializations that is a list of serialization 
-      // classes which can be used for obtaining serializers and deserializers. This parameter 
-      // by default contains avro classes. When a sequence file is created, it calls 
-      // SerializationFactory.getSerializer(keyclass). This internally creates objects using 
-      // reflection of all the classes that were part of io.serializations. But since, there is 
-      // no avro class available it throws an exception. 
-      // Before creating a sequenceFile, override the io.serializations parameter and pass only the classes 
-      // that are important to us. 
-      String serializations[] = conf.getStrings("io.serializations",
-          new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
-      conf.setStrings("io.serializations",
-          new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
-      // create reader
-      boolean emptyFile = false;
-      try {
-        reader = new SequenceFile.Reader(conf, optPath);
-      }catch (EOFException e) {
-        // this is ok as the file has ended. just return false that no more records available
-        emptyFile = true;
-      }
-      // reset the configuration to its original value 
-      conf.setStrings("io.serializations", serializations);
-      this.logger = logger;
-      this.path = path;
-      
-      if (emptyFile) {
-        hasNext = false;
-      } else {
-        // The file should be read from the first sync marker after the start position and 
-        // until the first sync marker after the end position is seen. 
-        this.end = startOffset + length;
-        if (startOffset > reader.getPosition()) {
-          reader.sync(startOffset);                  // sync to start
-        }
-        this.start = reader.getPosition();
-        this.hasNext = this.start < this.end;
-        if (hasNext)
-          readNext();
-      } 
-    }
-  
-
-    public Version getVersion(){
-      String version = reader.getMetadata().get(new Text(Meta.GEMFIRE_VERSION.name())).toString();
-      return Version.fromOrdinalOrCurrent(Short.parseShort(version)); 
-    }
-    @Override
-    public boolean hasNext() {
-      return hasNext;
-    }
-
-    @Override
-    public byte[] next() {
-      currentKey = prefetchedKey.getBytes();
-      currentValue = prefetchedValue.getBytes();
-      
-      readNext();
-
-      return currentKey;
-    }
-    
-    private void readNext() {
-      try {
-        long pos = reader.getPosition();
-        prefetchedKey = new BytesWritable();
-        prefetchedValue = new BytesWritable();
-        hasNext = reader.next(prefetchedKey, prefetchedValue);
-        // The file should be read from the first sync marker after the start position and 
-        // until the first sync marker after the end position is seen. 
-        if (pos >= end && reader.syncSeen()) {
-          hasNext = false;
-        }
-      } catch (EOFException e) {
-        // this is ok as the file has ended. just return false that no more records available
-        hasNext = false;
-      } 
-      catch (IOException e) {
-        hasNext = false;
-        logger.error(LocalizedMessage.create(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path), e);
-        throw new HDFSIOException(
-            LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
-      }
-    }
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException("Not supported for Sequence files");
-    }
-
-    @Override
-    public void close() {
-      IOUtils.closeStream(reader);
-    }
-
-    @Override
-    public byte[] getKey() {
-      return currentKey;
-    }
-
-    @Override
-    public byte[] getValue() {
-      return currentValue;
-    }
-    
-    /** Returns true iff the previous call to next passed a sync mark.*/
-    public boolean syncSeen() { return reader.syncSeen(); }
-
-    /** Return the current byte position in the input file. */
-    public synchronized long getPosition() throws IOException {
-      return reader.getPosition();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java
deleted file mode 100644
index f5b63cc..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapred;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.lib.CombineFileSplit;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFKey;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HDFSSplitIterator;
-
-public class AbstractGFRecordReader
-    extends
-    com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.AbstractGFRecordReader
-    implements RecordReader<GFKey, PersistedEventImpl> {
-
-  /**
-   * Initializes instance of record reader using file split and job
-   * configuration
-   * 
-   * @param split
-   * @param conf
-   * @throws IOException
-   */
-  public void initialize(CombineFileSplit split, JobConf conf) throws IOException {
-    CombineFileSplit cSplit = (CombineFileSplit) split;
-    Path[] path = cSplit.getPaths();
-    long[] start = cSplit.getStartOffsets();
-    long[] len = cSplit.getLengths();
-
-    FileSystem fs = cSplit.getPath(0).getFileSystem(conf);
-    this.splitIterator = HDFSSplitIterator.newInstance(fs, path, start, len, 0l, 0l);
-  }
-
-  @Override
-  public boolean next(GFKey key, PersistedEventImpl value) throws IOException {
-    /*
-     * if there are more records in the hoplog, iterate to the next record. Set
-     * key object as is. 
-     */
-
-    if (!super.hasNext()) {
-      key.setKey(null);
-      // TODO make value null;
-      return false;
-    }
-
-    super.next();
-
-    key.setKey(super.getKey().getKey());
-    PersistedEventImpl usersValue = super.getValue();
-    value.copy(usersValue);
-    return true;
-  }
-
-  @Override
-  public GFKey createKey() {
-    return new GFKey();
-  }
-
-  @Override
-  public PersistedEventImpl createValue() {
-    if(this.isSequential) {
-      return new UnsortedHoplogPersistedEvent();
-    } else {
-      return new SortedHoplogPersistedEvent();
-    }
-  }
-
-  @Override
-  public long getPos() throws IOException {
-    // there is no efficient way to find the position of key in hoplog file.
-    return 0;
-  }
-
-  @Override
-  public void close() throws IOException {
-    super.close();
-  }
-
-  @Override
-  public float getProgress() throws IOException {
-    return super.getProgressRatio();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java
deleted file mode 100644
index 0e0e455..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapred;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobConfigurable;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.lib.CombineFileSplit;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFKey;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.HoplogOptimizedSplitter;
-
-public class GFInputFormat extends
-    com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFInputFormat
-    implements InputFormat<GFKey, PersistedEventImpl>, JobConfigurable {
-
-  @Override
-  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
-    this.conf = job;
-
-    Collection<FileStatus> hoplogs = getHoplogs();
-    return createSplits(job, hoplogs);
-  }
-
-  /**
-   * Creates an input split for every block occupied by hoplogs of the input
-   * regions
-   * 
-   * @param job 
-   * @param hoplogs
-   * @return array of input splits of type file input split
-   * @throws IOException
-   */
-  private InputSplit[] createSplits(JobConf job, Collection<FileStatus> hoplogs)
-      throws IOException {
-    if (hoplogs == null || hoplogs.isEmpty()) {
-      return new InputSplit[0];
-    }
-
-    HoplogOptimizedSplitter splitter = new HoplogOptimizedSplitter(hoplogs);
-    List<org.apache.hadoop.mapreduce.InputSplit> mr2Splits = splitter.getOptimizedSplits(conf);
-    InputSplit[] splits = new InputSplit[mr2Splits.size()];
-    int i = 0;
-    for (org.apache.hadoop.mapreduce.InputSplit inputSplit : mr2Splits) {
-      org.apache.hadoop.mapreduce.lib.input.CombineFileSplit mr2Spit;
-      mr2Spit = (org.apache.hadoop.mapreduce.lib.input.CombineFileSplit) inputSplit;
-      
-      CombineFileSplit split = new CombineFileSplit(job, mr2Spit.getPaths(),
-          mr2Spit.getStartOffsets(), mr2Spit.getLengths(),
-          mr2Spit.getLocations());
-      splits[i] = split;
-      i++;
-    }
-
-    return splits;
-  }
-
-  @Override
-  public RecordReader<GFKey, PersistedEventImpl> getRecordReader(
-      InputSplit split, JobConf job, Reporter reporter) throws IOException {
-
-    CombineFileSplit cSplit = (CombineFileSplit) split;
-    AbstractGFRecordReader reader = new AbstractGFRecordReader();
-    reader.initialize(cSplit, job);
-    return reader;
-  }
-
-  @Override
-  public void configure(JobConf job) {
-    this.conf = job;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java
deleted file mode 100644
index 1494e9f..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapred;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.OutputFormat;
-import org.apache.hadoop.mapred.RecordWriter;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.Progressable;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.client.ClientCache;
-
-/**
- * Output format for gemfire. The records provided to writers created by this
- * output format are PUT in a live gemfire cluster.
- * 
- */
-public class GFOutputFormat extends
-    com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFOutputFormat
-    implements OutputFormat<Object, Object> {
-
-  @Override
-  public RecordWriter<Object, Object> getRecordWriter(
-      FileSystem ignored, JobConf job, String name, Progressable progress)
-      throws IOException {
-    ClientCache cache = getClientCacheInstance(job);
-    return new GFRecordWriter(cache, job);
-  }
-  
-  @Override
-  public void checkOutputSpecs(FileSystem ignored, JobConf job)
-      throws IOException {
-    validateConfiguration(job);
-  }
-
-  public class GFRecordWriter implements RecordWriter<Object, Object> {
-    private ClientCache clientCache;
-    private Region<Object, Object> region;
-
-    public GFRecordWriter(ClientCache cache, Configuration conf) {
-      this.clientCache = cache;
-      region = getRegionInstance(conf, clientCache);
-    }
-    
-    @Override
-    public void write(Object key, Object value) throws IOException {
-      executePut(region, key, value);
-    }
-
-    @Override
-    public void close(Reporter reporter) throws IOException {
-      closeClientCache(clientCache);
-      // TODO update reporter
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java
deleted file mode 100644
index 2c71b18..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-
-public class AbstractGFRecordReader extends
-    RecordReader<GFKey, PersistedEventImpl> {
-
-  // constant overhead of each KV in hfile. This is used in computing the
-  // progress of record reader
-  protected long RECORD_OVERHEAD = 8;
-
-  // accounting for number of bytes already read from the hfile
-  private long bytesRead;
-  
-  protected boolean isSequential;
-  
-  protected HDFSSplitIterator splitIterator;
-
-  @Override
-  public void initialize(InputSplit split, TaskAttemptContext context)
-  throws IOException, InterruptedException {
-    CombineFileSplit cSplit = (CombineFileSplit) split;
-    Path[] path = cSplit.getPaths();
-    long[] start = cSplit.getStartOffsets();
-    long[] len = cSplit.getLengths();
-
-    Configuration conf = context.getConfiguration();
-    FileSystem fs = cSplit.getPath(0).getFileSystem(conf);
-    
-    this.splitIterator = HDFSSplitIterator.newInstance(fs, path, start, len, 0l, 0l);
-  }
-  
-  @Override
-  public boolean nextKeyValue() throws IOException, InterruptedException {
-    return next();
-  }
-
-  protected boolean next() throws IOException {
-    if (!hasNext()) {
-      return false;
-    }
-    
-    splitIterator.next();
-    bytesRead += (splitIterator.getKey().length + splitIterator.getValue().length);
-    bytesRead += RECORD_OVERHEAD;
-    return true;
-  }
-  
-  protected boolean hasNext() throws IOException {
-    return splitIterator.hasNext();
-  }
-
-  @Override
-  public GFKey getCurrentKey() throws IOException, InterruptedException {
-    return getKey();
-  }
-
-  protected GFKey getKey() throws IOException {
-    try {
-      GFKey key = new GFKey();
-      key.setKey(BlobHelper.deserializeBlob(splitIterator.getKey()));
-      return key;
-    } catch (ClassNotFoundException e) {
-      // TODO resolve logging
-      return null;
-    }
-  }
-
-  @Override
-  public PersistedEventImpl getCurrentValue() throws IOException,
-      InterruptedException {
-    return getValue();
-  }
-
-  protected PersistedEventImpl getValue() throws IOException {
-    try {
-      byte[] valueBytes = splitIterator.getValue();
-      if(isSequential) {
-        return UnsortedHoplogPersistedEvent.fromBytes(valueBytes);
-      } else {
-        return SortedHoplogPersistedEvent.fromBytes(valueBytes);
-      }
-    } catch (ClassNotFoundException e) {
-      // TODO resolve logging
-      return null;
-    }
-  }
-
-  @Override
-  public float getProgress() throws IOException, InterruptedException {
-    return getProgressRatio();
-  }
-
-  protected float getProgressRatio() throws IOException {
-    if (!splitIterator.hasNext()) {
-      return 1.0f;
-    } else if (bytesRead > splitIterator.getLength()) {
-      // the record reader is expected to read more number of bytes as it
-      // continues till beginning of next block. hence if extra reading has
-      // started return fixed value
-      return 0.95f;
-    } else {
-      return Math.min(1.0f, bytesRead / (float) (splitIterator.getLength()));
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    splitIterator.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java
deleted file mode 100644
index ff64ceb..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.HoplogOptimizedSplitter;
-
-public class GFInputFormat extends InputFormat<GFKey, PersistedEventImpl>
-    implements Configurable {
-  public static final String HOME_DIR = "mapreduce.input.gfinputformat.homedir";
-  public static final String INPUT_REGION = "mapreduce.input.gfinputformat.inputregion";
-  public static final String START_TIME = "mapreduce.input.gfinputformat.starttime";
-  public static final String END_TIME = "mapreduce.input.gfinputformat.endtime";
-  public static final String CHECKPOINT = "mapreduce.input.gfinputformat.checkpoint";
-  
-  protected Configuration conf;
-
-  @Override
-  public List<InputSplit> getSplits(JobContext job) throws IOException {
-    this.conf = job.getConfiguration();
-    
-    Collection<FileStatus> hoplogs = getHoplogs();
-    return createSplits(hoplogs);
-  }
-
-  /**
-   * Identifies filters provided in the job configuration and creates a list of
-   * sorted hoplogs. If there are no sorted hoplogs, checks if the region has
-   * sequential hoplogs
-   * 
-   * @return list of hoplogs
-   * @throws IOException
-   */
-  protected Collection<FileStatus> getHoplogs() throws IOException {
-    String regionName = conf.get(INPUT_REGION);
-    System.out.println("GFInputFormat: Region Name is " + regionName);
-    if (regionName == null || regionName.trim().isEmpty()) {
-      // incomplete job configuration, region name must be provided
-      return new ArrayList<FileStatus>();
-    }
-
-    String home = conf.get(HOME_DIR, HDFSStore.DEFAULT_HOME_DIR);
-    regionName = HdfsRegionManager.getRegionFolder(regionName);
-    Path regionPath = new Path(home + "/" + regionName);
-    FileSystem fs = regionPath.getFileSystem(conf);
-
-    long start = conf.getLong(START_TIME, 0l);
-    long end = conf.getLong(END_TIME, 0l);
-    boolean checkpoint = conf.getBoolean(CHECKPOINT, true);
-
-    // if the region contains flush hoplogs then the region is of type RW.
-    Collection<FileStatus> hoplogs;
-    hoplogs = HoplogUtil.filterHoplogs(fs, regionPath, start, end, checkpoint);
-    return hoplogs == null ? new ArrayList<FileStatus>() : hoplogs;
-  }
-  
-  /**
-   * Creates an input split for every block occupied by hoplogs of the input
-   * regions
-   * 
-   * @param hoplogs
-   * @return list of input splits of type file input split
-   * @throws IOException
-   */
-  private List<InputSplit> createSplits(Collection<FileStatus> hoplogs)
-      throws IOException {
-    List<InputSplit> splits = new ArrayList<InputSplit>();
-    if (hoplogs == null || hoplogs.isEmpty()) {
-      return splits;
-    }
-    
-    HoplogOptimizedSplitter splitter = new HoplogOptimizedSplitter(hoplogs);
-    return splitter.getOptimizedSplits(conf);
-  }
-
-  @Override
-  public RecordReader<GFKey, PersistedEventImpl> createRecordReader(
-      InputSplit split, TaskAttemptContext context) throws IOException,
-      InterruptedException {
-    return new AbstractGFRecordReader();
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java
deleted file mode 100644
index 5bba2c7..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableComparator;
-
-import com.gemstone.gemfire.internal.util.BlobHelper;
-
-public class GFKey implements WritableComparable<GFKey> {
-  private Object key;
-
-  public Object getKey() {
-    return key;
-  }
-
-  public void setKey(Object key) {
-    this.key = key;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    byte[] bytes = BlobHelper.serializeToBlob(key);
-    out.writeInt(bytes.length);
-    out.write(bytes, 0, bytes.length);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int len = in.readInt();
-    byte[] bytes = new byte[len];
-    in.readFully(bytes, 0, len);
-    try {
-      key = BlobHelper.deserializeBlob(bytes);
-    } catch (ClassNotFoundException e) {
-      // TODO Auto-generated catch block
-      e.printStackTrace();
-    }
-  }
-
-  @Override
-  public int compareTo(GFKey o) {
-    try {
-      byte[] b1 = BlobHelper.serializeToBlob(key);
-      byte[] b2 = BlobHelper.serializeToBlob(o.key);
-      return WritableComparator.compareBytes(b1, 0, b1.length, b2, 0, b2.length);
-    } catch (IOException e) {
-      // TODO Auto-generated catch block
-      e.printStackTrace();
-    }
-    
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java
deleted file mode 100644
index 3be2ab0..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.InvalidJobConfException;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionExistsException;
-import com.gemstone.gemfire.cache.client.ClientCache;
-import com.gemstone.gemfire.cache.client.ClientCacheFactory;
-import com.gemstone.gemfire.cache.client.ClientRegionFactory;
-import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
-import com.gemstone.gemfire.cache.server.CacheServer;
-import com.gemstone.gemfire.management.internal.cli.converters.ConnectionEndpointConverter;
-
-/**
- * Output format for gemfire. The records provided to writers created by this
- * output format are PUT in a live gemfire cluster.
- * 
- */
-public class GFOutputFormat extends OutputFormat<Object, Object> {
-  public static final String REGION = "mapreduce.output.gfoutputformat.outputregion";
-  public static final String LOCATOR_HOST = "mapreduce.output.gfoutputformat.locatorhost";
-  public static final String LOCATOR_PORT = "mapreduce.output.gfoutputformat.locatorport";
-  public static final String SERVER_HOST = "mapreduce.output.gfoutputformat.serverhost";
-  public static final String SERVER_PORT = "mapreduce.output.gfoutputformat.serverport";
-
-  @Override
-  public RecordWriter<Object, Object> getRecordWriter(TaskAttemptContext context)
-      throws IOException, InterruptedException {
-    Configuration conf = context.getConfiguration();
-    ClientCache cache = getClientCacheInstance(conf);
-    return new GFRecordWriter(cache, context.getConfiguration());
-  }
-
-  public ClientCache getClientCacheInstance(Configuration conf) {
-    // if locator host is provided create a client cache instance using
-    // connection to locator. If locator is not provided and server host is also
-    // not provided, connect using default locator
-    ClientCache cache;
-    String serverHost = conf.get(SERVER_HOST);
-    if (serverHost == null || serverHost.isEmpty()) {
-      cache = createGFWriterUsingLocator(conf);
-    } else {
-      cache = createGFWriterUsingServer(conf);
-    }
-    return cache;
-  }
-
-  /**
-   * Creates instance of {@link ClientCache} by connecting to GF cluster through
-   * locator
-   */
-  public ClientCache createGFWriterUsingLocator(Configuration conf) {
-    // if locator host is not provided assume localhost
-    String locator = conf.get(LOCATOR_HOST,
-        ConnectionEndpointConverter.DEFAULT_LOCATOR_HOST);
-    // if locator port is not provided assume default locator port 10334
-    int port = conf.getInt(LOCATOR_PORT,
-        ConnectionEndpointConverter.DEFAULT_LOCATOR_PORT);
-
-    // create gemfire client cache instance
-    ClientCacheFactory ccf = new ClientCacheFactory();
-    ccf.addPoolLocator(locator, port);
-    ClientCache cache = ccf.create();
-    return cache;
-  }
-
-  /**
-   * Creates instance of {@link ClientCache} by connecting to GF cluster through
-   * GF server
-   */
-  public ClientCache createGFWriterUsingServer(Configuration conf) {
-    String server = conf.get(SERVER_HOST);
-    // if server port is not provided assume default server port, 40404
-    int port = conf.getInt(SERVER_PORT, CacheServer.DEFAULT_PORT);
-
-    // create gemfire client cache instance
-    ClientCacheFactory ccf = new ClientCacheFactory();
-    ccf.addPoolServer(server, port);
-    ClientCache cache = ccf.create();
-    return cache;
-  }
-
-  public Region<Object, Object> getRegionInstance(Configuration conf,
-      ClientCache cache) {
-    Region<Object, Object> region;
-
-    // create gemfire region in proxy mode
-    String regionName = conf.get(REGION);
-    ClientRegionFactory<Object, Object> regionFactory = cache
-        .createClientRegionFactory(ClientRegionShortcut.PROXY);
-    try {
-      region = regionFactory.create(regionName);
-    } catch (RegionExistsException e) {
-      region = cache.getRegion(regionName);
-    }
-
-    return region;
-  }
-
-  /**
-   * Puts a K-V pair in region
-   * @param region
-   * @param key
-   * @param value
-   */
-  public void executePut(Region<Object, Object> region, Object key, Object value) {
-    region.put(key, value);
-  }
-
-  /**
-   * Closes client cache instance
-   * @param clientCache
-   */
-  public void closeClientCache(ClientCache clientCache) {
-    if (clientCache != null && !clientCache.isClosed()) {
-      clientCache.close();
-    }
-  }
-
-  /**
-   * Validates correctness and completeness of job's output configuration
-   * 
-   * @param conf
-   * @throws InvalidJobConfException
-   */
-  protected void validateConfiguration(Configuration conf)
-      throws InvalidJobConfException {
-    // User must configure the output region name.
-    String region = conf.get(REGION);
-    if (region == null || region.trim().isEmpty()) {
-      throw new InvalidJobConfException("Output Region name not provided.");
-    }
-
-    // TODO validate if a client connected to gemfire cluster can be created
-  }
-  
-  @Override
-  public void checkOutputSpecs(JobContext context) throws IOException,
-      InterruptedException {
-    Configuration conf = context.getConfiguration();
-    validateConfiguration(conf);
-  }
-
-  @Override
-  public OutputCommitter getOutputCommitter(TaskAttemptContext context)
-      throws IOException, InterruptedException {
-    return new FileOutputCommitter(FileOutputFormat.getOutputPath(context),
-        context);
-  }
-
-  public class GFRecordWriter extends RecordWriter<Object, Object> {
-    private ClientCache clientCache;
-    private Region<Object, Object> region;
-
-    public GFRecordWriter(ClientCache cache, Configuration conf) {
-      this.clientCache = cache;
-      region = getRegionInstance(conf, clientCache);
-    }
-
-    @Override
-    public void write(Object key, Object value) throws IOException,
-        InterruptedException {
-      executePut(region, key, value);
-    }
-
-    @Override
-    public void close(TaskAttemptContext context) throws IOException,
-        InterruptedException {
-      closeClientCache(clientCache);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java
deleted file mode 100644
index 869ad0d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-
-/**
- * Iterates over the records in part of a hoplog. This iterator
- * is passed from the map reduce job into the gemfirexd LanguageConnectionContext
- * for gemfirexd to use as the iterator during the map phase.
- *
- */
-public abstract class HDFSSplitIterator {
-  // data object for holding path, offset and length, of all the blocks this
-  // iterator needs to iterate on
-  private CombineFileSplit split;
-
-  // the following members are pointers to current hoplog which is being
-  // iterated upon
-  private int currentHopIndex = 0;
-  private AbstractHoplog hoplog;
-  protected HoplogIterator<byte[], byte[]> iterator;
-  byte[] key;
-  byte[] value;
-  
-  private long bytesRead;
-  protected long RECORD_OVERHEAD = 8;
-
-  private long startTime = 0l;
-  private long endTime = 0l;
-
-  protected FileSystem fs;
-  private static final Logger logger = LogService.getLogger();
-  protected final String logPrefix = "<" + "HDFSSplitIterator" + "> ";
-
-  public HDFSSplitIterator(FileSystem fs, Path[] paths, long[] offsets, long[] lengths, long startTime, long endTime) throws IOException {
-    this.fs = fs;
-    this.split = new CombineFileSplit(paths, offsets, lengths, null);
-    while(currentHopIndex < split.getNumPaths() && !fs.exists(split.getPath(currentHopIndex))){
-      logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_CLEANED_UP_BY_JANITOR, split.getPath(currentHopIndex)));
-      currentHopIndex++;
-    }
-    if(currentHopIndex == split.getNumPaths()){
-      this.hoplog = null;
-      iterator = null;
-    } else {
-      this.hoplog = getHoplog(fs,split.getPath(currentHopIndex));
-      iterator = hoplog.getReader().scan(split.getOffset(currentHopIndex), split.getLength(currentHopIndex));
-    }
-    this.startTime = startTime;
-    this.endTime = endTime;
-  }
-
-  /**
-   * Get the appropriate iterator for the file type.
-   */
-  public static HDFSSplitIterator newInstance(FileSystem fs, Path[] path,
-      long[] start, long[] len, long startTime, long endTime)
-      throws IOException {
-    String fileName = path[0].getName();
-    if (fileName.endsWith(AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION)) {
-      return new StreamSplitIterator(fs, path, start, len, startTime, endTime);
-    } else {
-      return new RWSplitIterator(fs, path, start, len, startTime, endTime);
-    }
-  }
-
-  public final boolean hasNext() throws IOException {
-    while (currentHopIndex < split.getNumPaths()) {
-      if (iterator != null) {
-        if(iterator.hasNext()) {
-          return true;
-        } else {
-          iterator.close();
-          iterator = null;
-          hoplog.close();
-          hoplog = null;
-        }
-      }
-      
-      if (iterator == null) {
-        // Iterator is null if this is first read from this iterator or all the
-        // entries from the previous iterator have been read. create iterator on
-        // the next hoplog.
-        currentHopIndex++;
-        while (currentHopIndex < split.getNumPaths() && !fs.exists(split.getPath(currentHopIndex))){
-          logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_CLEANED_UP_BY_JANITOR, split.getPath(currentHopIndex).toString()));
-          currentHopIndex++;
-        }
-        if (currentHopIndex >= split.getNumPaths()) {
-          return false;
-        }
-        hoplog = getHoplog(fs, split.getPath(currentHopIndex));
-        iterator = hoplog.getReader().scan(split.getOffset(currentHopIndex), split.getLength(currentHopIndex));
-      }
-    }
-    
-    return false;
-  } 
-
-  public final boolean next() throws IOException {
-    while (hasNext()) {
-      key = iterator.next();
-      value = iterator.getValue();
-      bytesRead += (key.length + value.length);
-      bytesRead += RECORD_OVERHEAD;
-      
-      // if any filter is set, check if the event's timestamp matches the
-      // filter. The events returned by the iterator may not be time ordered. So
-      // it is important to check filters everytime.
-      if (startTime > 0 || endTime > 0) {
-        try {
-          PersistedEventImpl event = getDeserializedValue();
-          long timestamp = event.getTimstamp();
-          if (startTime > 0l && timestamp < startTime) {
-            continue;
-          }
-          
-          if (endTime > 0l && timestamp > endTime) {
-            continue;
-          }
-        } catch (ClassNotFoundException e) {
-          throw new HDFSIOException("Error reading from HDFS", e);
-        } 
-      }
-        
-      return true;
-    }
-    
-    return false;
-  }
-
-  public final long getBytesRead() {
-    return this.bytesRead;
-  }
-
-  public final byte[] getKey() {
-    return key;
-  }
-
-  public abstract PersistedEventImpl getDeserializedValue()
-      throws ClassNotFoundException, IOException;
-
-  protected abstract AbstractHoplog getHoplog(FileSystem fs, Path path)
-      throws IOException;
-
-  public final byte[] getValue() {
-    return value;
-  }
-
-  public final long getLength() {
-    return split.getLength();
-  }
-
-  public void close() throws IOException {
-    if (iterator != null) {
-      iterator.close();
-      iterator = null;
-    }
-    
-    if (hoplog != null) {
-      hoplog.close();
-      hoplog.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java
deleted file mode 100644
index c4c0d1c..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer.HoplogComparator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-
-public class HoplogUtil {
-  /**
-   * @param regionPath
-   *          HDFS path of the region
-   * @param fs
-   *          file system associated with the region
-   * @param type
-   *          type of hoplog to be fetched; flush hoplog or sequence hoplog
-   * @return All hoplog file paths belonging to the region provided
-   * @throws IOException
-   */
-  public static Collection<FileStatus> getAllRegionHoplogs(Path regionPath,
-      FileSystem fs, String type) throws IOException {
-    return getRegionHoplogs(regionPath, fs, type, 0, 0);
-  }
-
-  /**
-   * @param regionPath
-   *          Region path
-   * @param fs
-   *          file system associated with the region
-   * @param type
-   *          type of hoplog to be fetched; flush hoplog or sequence hoplog
-   * @param start
-   *          Exclude files that do not contain records mutated after start time
-   * @param end
-   *          Exclude files that do not contain records mutated before end time
-   * @return All hoplog file paths belonging to the region provided
-   * @throws IOException
-   */
-  public static Collection<FileStatus> getRegionHoplogs(Path regionPath,
-      FileSystem fs, String type, long start, long end) throws IOException {
-    Collection<Collection<FileStatus>> allBuckets = getBucketHoplogs(
-        regionPath, fs, type, start, end);
-
-    ArrayList<FileStatus> hoplogs = new ArrayList<FileStatus>();
-    for (Collection<FileStatus> bucket : allBuckets) {
-      for (FileStatus file : bucket) {
-        hoplogs.add(file);
-      }
-    }
-    return hoplogs;
-  }
-
-  public static Collection<Collection<FileStatus>> getBucketHoplogs(Path regionPath,
-      FileSystem fs, String type, long start, long end) throws IOException {
-    Collection<Collection<FileStatus>> allBuckets = new ArrayList<Collection<FileStatus>>();
-
-    // hoplog files names follow this pattern
-    String HOPLOG_NAME_REGEX = AbstractHoplogOrganizer.HOPLOG_NAME_REGEX + type;
-    String EXPIRED_HOPLOG_NAME_REGEX = HOPLOG_NAME_REGEX + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
-    final Pattern pattern = Pattern.compile(HOPLOG_NAME_REGEX);
-    final Pattern expiredPattern = Pattern.compile(EXPIRED_HOPLOG_NAME_REGEX);
-    
-    Path cleanUpIntervalPath = new Path(regionPath.getParent(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
-    long intervalDurationMillis = readCleanUpIntervalMillis(fs, cleanUpIntervalPath);
-
-    // a region directory contains directories for individual buckets. A bucket
-    // has a integer name.
-    FileStatus[] bucketDirs = fs.listStatus(regionPath);
-    
-    for (FileStatus bucket : bucketDirs) {
-      if (!bucket.isDirectory()) {
-        continue;
-      }
-      try {
-        Integer.valueOf(bucket.getPath().getName());
-      } catch (NumberFormatException e) {
-        continue;
-      }
-
-      ArrayList<FileStatus> bucketHoplogs = new ArrayList<FileStatus>();
-
-      // identify all the flush hoplogs and seq hoplogs by visiting all the
-      // bucket directories
-      FileStatus[] bucketFiles = fs.listStatus(bucket.getPath());
-      
-      Map<String, Long> expiredHoplogs = getExpiredHoplogs(fs, bucketFiles, expiredPattern);
-      
-      FileStatus oldestHopAfterEndTS = null;
-      long oldestHopTS = Long.MAX_VALUE;
-      long currentTimeStamp = System.currentTimeMillis();
-      for (FileStatus file : bucketFiles) {
-        if (!file.isFile()) {
-          continue;
-        }
-
-        Matcher match = pattern.matcher(file.getPath().getName());
-        if (!match.matches()) {
-          continue;
-        }
-        
-        long timeStamp = AbstractHoplogOrganizer.getHoplogTimestamp(match);
-        if (start > 0 && timeStamp < start) {
-          // this hoplog contains records less than the start time stamp
-          continue;
-        }
-
-        if (end > 0 && timeStamp > end) {
-          // this hoplog contains records mutated after end time stamp. Ignore
-          // this hoplog if it is not the oldest.
-          if (oldestHopTS > timeStamp) {
-            oldestHopTS = timeStamp;
-            oldestHopAfterEndTS = file;
-          }
-          continue;
-        }
-        long expiredTimeStamp = expiredTime(file, expiredHoplogs);
-        if (expiredTimeStamp > 0 && intervalDurationMillis > 0) {
-          if ((currentTimeStamp - expiredTimeStamp) > 0.8 * intervalDurationMillis) {
-            continue;
-          }
-        }
-        bucketHoplogs.add(file);
-      }
-
-      if (oldestHopAfterEndTS != null) {
-        long expiredTimeStamp = expiredTime(oldestHopAfterEndTS, expiredHoplogs);
-        if (expiredTimeStamp <= 0 || intervalDurationMillis <=0  || 
-            (currentTimeStamp - expiredTimeStamp) <= 0.8 * intervalDurationMillis) {
-          bucketHoplogs.add(oldestHopAfterEndTS);
-        }
-      }
-
-      if (bucketHoplogs.size() > 0) {
-        allBuckets.add(bucketHoplogs);
-      }
-    }
-    
-    return allBuckets;
-  }
-  
-  private static Map<String, Long> getExpiredHoplogs(FileSystem fs, FileStatus[] bucketFiles, 
-      Pattern expiredPattern) throws IOException{
-    Map<String, Long> expiredHoplogs = new HashMap<String,Long>();
-    
-    for(FileStatus file : bucketFiles) {
-      if(!file.isFile()) {
-        continue;
-      }
-      String fileName = file.getPath().getName();
-      Matcher match = expiredPattern.matcher(fileName);
-      if (!match.matches()){
-        continue;
-      }
-      expiredHoplogs.put(fileName,file.getModificationTime());
-    }
-    return expiredHoplogs;
-  }
-  
-  private static long expiredTime(FileStatus file, Map<String, Long> expiredHoplogs){
-    String expiredMarkerName = file.getPath().getName() + 
-        AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
-    
-    long expiredTimeStamp = -1;
-    if (expiredHoplogs.containsKey(expiredMarkerName)) {
-      expiredTimeStamp = expiredHoplogs.get(expiredMarkerName);
-    }
-    return expiredTimeStamp;
-  }
-  
-  public static long readCleanUpIntervalMillis(FileSystem fs, Path cleanUpIntervalPath) throws IOException{
-    if (fs.exists(cleanUpIntervalPath)) {
-      FSDataInputStream input = new FSDataInputStream(fs.open(cleanUpIntervalPath));
-      long intervalDurationMillis = input.readLong();
-      input.close();
-      return intervalDurationMillis;
-    } else {
-      return -1l;
-    }
-  }
-  
-  public static void exposeCleanupIntervalMillis(FileSystem fs, Path path, long intervalDurationMillis){
-    FSDataInputStream input = null;
-    FSDataOutputStream output = null;
-    try {
-      if(fs.exists(path)){
-        input = new FSDataInputStream(fs.open(path));
-        if (intervalDurationMillis == input.readLong()) {
-          input.close();
-          return;
-        }
-        input.close();
-        fs.delete(path, true);
-      } 
-      output = fs.create(path);
-      output.writeLong(intervalDurationMillis);
-      output.close();
-    } catch (IOException e) {
-      return;
-    } finally {
-      try {
-        if (input != null){
-          input.close();
-        }
-        if (output != null) {
-          output.close();
-        }
-      } catch(IOException e2) {
-        
-      } 
-    }
-  }
-
-  /**
-   * @param regionPath
-   * @param fs
-   * @return list of latest checkpoint files of all buckets in the region
-   * @throws IOException
-   */
-  public static Collection<FileStatus> getCheckpointFiles(Path regionPath,
-      FileSystem fs) throws IOException {
-    ArrayList<FileStatus> latestSnapshots = new ArrayList<FileStatus>();
-
-    Collection<Collection<FileStatus>> allBuckets = getBucketHoplogs(
-        regionPath, fs, AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION, 0, 0);
-
-    // extract the latest major compacted hoplog from each bucket
-    for (Collection<FileStatus> bucket : allBuckets) {
-      FileStatus latestSnapshot = null;
-      for (FileStatus file : bucket) {
-        if (latestSnapshot == null) {
-          latestSnapshot = file;
-        } else {
-          String name1 = latestSnapshot.getPath().getName();
-          String name2 = file.getPath().getName();
-          
-          if (HoplogComparator.compareByName(name1, name2) > 0) {
-            latestSnapshot = file;
-          }
-        }
-      }
-      
-      if (latestSnapshot != null) {
-        latestSnapshots.add(latestSnapshot);
-      }
-    }
-
-    return latestSnapshots;
-  }
-  
-  /**
-   * Creates a mapping of hoplog to hdfs blocks on disk
-   * 
-   * @param files
-   *          list of hoplog file status objects
-   * @return array of hdfs block location objects associated with a hoplog
-   * @throws IOException
-   */
-  public static Map<FileStatus, BlockLocation[]> getBlocks(Configuration config,
-      Collection<FileStatus> files) throws IOException {
-    Map<FileStatus, BlockLocation[]> blocks = new HashMap<FileStatus, BlockLocation[]>();
-    if (files == null || files.isEmpty()) {
-      return blocks;
-    }
-
-    FileSystem fs = files.iterator().next().getPath().getFileSystem(config);
-
-    for (FileStatus hoplog : files) {
-      long length = hoplog.getLen();
-      BlockLocation[] fileBlocks = fs.getFileBlockLocations(hoplog, 0, length);
-      blocks.put(hoplog, fileBlocks);
-    }
-
-    return blocks;
-  }
-  
-  /**
-   * Filters out hoplogs of a region that do not match time filters and creates
-   * a list of hoplogs that may be used by hadoop jobs.
-   * 
-   * @param fs
-   *          file system instance
-   * @param path
-   *          region path
-   * @param start
-   *          start time in milliseconds
-   * @param end
-   *          end time in milliseconds
-   * @param snapshot
-   *          if true latest snapshot hoplog will be included in the final
-   *          return list
-   * @return filtered collection of hoplogs
-   * @throws IOException
-   */
-  public static Collection<FileStatus> filterHoplogs(FileSystem fs, Path path,
-      long start, long end, boolean snapshot) throws IOException {
-    ArrayList<FileStatus> hoplogs = new ArrayList<FileStatus>();
-
-    // if the region contains flush hoplogs or major compacted files then the
-    // region is of type RW.
-    // check if the intent is to operate on major compacted files only
-    if (snapshot) {
-      hoplogs.addAll(getCheckpointFiles(path, fs));
-    } else {
-      hoplogs.addAll(getRegionHoplogs(path, fs,
-          AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, start, end));
-    }
-
-    if (hoplogs == null || hoplogs.isEmpty()) {
-      // there are no sorted hoplogs. Check if sequence hoplogs are present
-      // there is no checkpoint mode for write only tables
-      hoplogs.addAll(getRegionHoplogs(path, fs,
-          AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION, start, end));
-    }
-
-    return hoplogs == null ? new ArrayList<FileStatus>() : hoplogs;
-  }
-  
-  private HoplogUtil() {
-    //static methods only.
-  }
-  
-  /**
-   * This class creates MR splits from hoplog files. This class leverages
-   * CombineFileInputFormat to create locality, node and rack, aware splits
-   * 
-   */
-  public static class HoplogOptimizedSplitter extends CombineFileInputFormat<Long, Long> {
-    private Collection<FileStatus> hoplogs;
-
-    public HoplogOptimizedSplitter(Collection<FileStatus> hoplogs) {
-      this.hoplogs = hoplogs;
-    }
-    
-    @Override
-    protected List<FileStatus> listStatus(JobContext job) throws IOException {
-      /**
-       * listStatus in super collects fileStatus for each file again. It also
-       * tries to recursively list files in subdirectories. None of this is
-       * applicable in this case. Splitter has already collected fileStatus for
-       * all files. So bypassing super's method will improve performance as NN
-       * chatter will be reduced. Specially helpful if NN is not colocated.
-       */
-      return new ArrayList<FileStatus>(hoplogs);
-    }
-    
-    /**
-     * Creates an array of splits for the input list of hoplogs. Each split is
-     * roughly the size of an hdfs block. Hdfs blocks of a hoplog may be smaller
-     * than hdfs block size, for e.g. if the hoplog is very small. The method
-     * keeps adding hdfs blocks of a hoplog to a split till the split is less
-     * than hdfs block size and the block is local to the split.
-     */
-    public List<InputSplit> getOptimizedSplits(Configuration conf) throws IOException {
-      
-      if (hoplogs == null || hoplogs.isEmpty()) {
-        return null;
-      }
-      Path[] paths = new Path[hoplogs.size()];
-      int i = 0;
-      for (FileStatus file : hoplogs) {
-        paths[i] = file.getPath();
-        i++;
-      }
-
-      FileStatus hoplog = hoplogs.iterator().next();
-      long blockSize = hoplog.getBlockSize();
-      setMaxSplitSize(blockSize);
-
-      Job job = Job.getInstance(conf);
-      setInputPaths(job, paths);
-      List<InputSplit> splits = super.getSplits(job);
-      
-      // in some cases a split may not get populated with host location
-      // information. If such a split is created, fill location information of
-      // the first file in the split
-      ArrayList<CombineFileSplit> newSplits = new ArrayList<CombineFileSplit>();
-      for (Iterator<InputSplit> iter = splits.iterator(); iter.hasNext();) {
-        CombineFileSplit split = (CombineFileSplit) iter.next();
-        if (split.getLocations() != null && split.getLocations().length > 0) {
-          continue;
-        }
-        
-        paths = split.getPaths();
-        if (paths.length == 0) {
-          continue;
-        }
-        long[] starts = split.getStartOffsets();
-        long[] ends = split.getLengths();
-        
-        FileSystem fs = paths[0].getFileSystem(conf);
-        FileStatus file = fs.getFileStatus(paths[0]);
-        BlockLocation[] blks = fs.getFileBlockLocations(file, starts[0], ends[0]);
-        if (blks != null && blks.length > 0) {
-          // hosts found. Need to create a new split and replace the one missing
-          // hosts.
-          iter.remove();
-          String hosts[] = blks[0].getHosts();
-          split = new CombineFileSplit(paths, starts, ends, hosts);
-          newSplits.add(split);
-        }
-      }
-      splits.addAll(newSplits);
-      
-      return splits;
-    }
-    
-    @Override
-    public List<InputSplit> getSplits(JobContext job) throws IOException {
-      // a call to this method is invalid. This class is only meant to create
-      // optimized splits independent of the api type
-      throw new IllegalStateException();
-    }
-
-    @Override
-    public RecordReader<Long, Long> createRecordReader(InputSplit split,
-        TaskAttemptContext arg1) throws IOException {
-      // Record reader creation is managed by GFInputFormat. This method should
-      // not be called
-      throw new IllegalStateException();
-    }
-  }
-}



[15/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java
deleted file mode 100644
index 9127e4d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java
+++ /dev/null
@@ -1,1232 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NavigableSet;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.gemstone.gemfire.InternalGemFireError;
-import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.EntryNotFoundException;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionAttributes;
-import com.gemstone.gemfire.cache.TimeoutException;
-import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.SortedEventBuffer.BufferIterator;
-import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
-import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.internal.cache.AbstractBucketRegionQueue;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalRegionArguments;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.RegionEventImpl;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.ByteComparator;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.CursorIterator;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * This class holds the sorted list required for HDFS. 
- * 
- * 
- */
-public class HDFSBucketRegionQueue extends AbstractBucketRegionQueue {
-     private static final boolean VERBOSE = Boolean.getBoolean("hdfsBucketRegionQueue.VERBOSE");
-     private final int batchSize;
-     volatile HDFSEventQueue hdfsEventQueue = null;
-     
-     // set before releasing the primary lock. 
-     private final AtomicBoolean releasingPrimaryLock = new AtomicBoolean(true);
-     
-     // This is used to keep track of the current size of the queue in bytes. 
-     final AtomicLong queueSizeInBytes =  new AtomicLong(0);
-     public boolean isBucketSorted = true;
-     /**
-     * @param regionName
-     * @param attrs
-     * @param parentRegion
-     * @param cache
-     * @param internalRegionArgs
-     */
-    public HDFSBucketRegionQueue(String regionName, RegionAttributes attrs,
-        LocalRegion parentRegion, GemFireCacheImpl cache,
-        InternalRegionArguments internalRegionArgs) {
-      super(regionName, attrs, parentRegion, cache, internalRegionArgs);
-      
-      this.isBucketSorted = internalRegionArgs.getPartitionedRegion().getParallelGatewaySender().getBucketSorted();
-      if (isBucketSorted)
-        hdfsEventQueue = new MultiRegionSortedQueue();
-      else
-        hdfsEventQueue = new EventQueue();
-      
-      batchSize = internalRegionArgs.getPartitionedRegion().
-          getParallelGatewaySender().getBatchSize() * 1024 *1024;
-      this.keySet();
-    }
-    @Override
-    protected void initialize(InputStream snapshotInputStream,
-        InternalDistributedMember imageTarget,
-        InternalRegionArguments internalRegionArgs) throws TimeoutException,
-        IOException, ClassNotFoundException {
-
-      super.initialize(snapshotInputStream, imageTarget, internalRegionArgs);
-
-      loadEventsFromTempQueue();
-      
-      this.initialized = true;
-      notifyEventProcessor();
-    }
-
-    private TreeSet<Long> createSkipListFromMap(Set keySet) {
-      TreeSet<Long> sortedKeys = null;
-      if (!hdfsEventQueue.isEmpty())
-        return sortedKeys;
-      
-      if (!keySet.isEmpty()) {
-        sortedKeys = new TreeSet<Long>(keySet);
-        if (!sortedKeys.isEmpty())
-        {
-          for (Long key : sortedKeys) {
-            if (this.isBucketSorted) {
-              Object hdfsevent = getNoLRU(key, true, false, false);
-              if (hdfsevent == null) { // this can happen when tombstones are recovered. 
-                if (logger.isDebugEnabled() || VERBOSE) {
-                  logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Discarding key " + key + ", no event recovered"));
-                }
-              } else {
-                int eventSize = ((HDFSGatewayEventImpl)hdfsevent).
-                    getSizeOnHDFSInBytes(!this.isBucketSorted);
-                hdfsEventQueue.put(key,(HDFSGatewayEventImpl)hdfsevent, eventSize );
-                queueSizeInBytes.getAndAdd(eventSize);
-              }
-            }
-            else {
-              Object hdfsevent = getNoLRU(key, true, false, false);
-              if (hdfsevent != null) { // hdfs event can be null when tombstones are recovered.
-                queueSizeInBytes.getAndAdd(((HDFSGatewayEventImpl)hdfsevent).
-                    getSizeOnHDFSInBytes(!this.isBucketSorted));
-              }
-              ((EventQueue)hdfsEventQueue).put(key);
-            }
-              
-          }
-          getEventSeqNum().setIfGreater(sortedKeys.last());
-        }
-      
-      }
-      if (logger.isDebugEnabled() || VERBOSE) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG,
-            "For bucket " + getId() + ", total keys recovered are : " + keySet.size()
-                + " and the seqNo is " + getEventSeqNum()));
-      }
-      return sortedKeys;
-    }
-    
-    @Override
-    protected void basicClear(RegionEventImpl ev) {
-      super.basicClear(ev);
-      queueSizeInBytes.set(0);
-      if ( this.getBucketAdvisor().isPrimary()) {
-        this.hdfsEventQueue.clear();
-      }
-    }
-    
-    protected void clearQueues(){
-      queueSizeInBytes.set(0);
-      if ( this.getBucketAdvisor().isPrimary()) {
-        this.hdfsEventQueue.clear();
-      }
-    }
-   
-    @Override
-    protected void basicDestroy(final EntryEventImpl event,
-        final boolean cacheWrite, Object expectedOldValue)
-        throws EntryNotFoundException, CacheWriterException, TimeoutException {
-      super.basicDestroy(event, cacheWrite, expectedOldValue);
-    }
-    
-    ArrayList peekABatch() {
-      ArrayList result = new ArrayList();
-      hdfsEventQueue.peek(result);
-      return result;
-    }
-    
-    @Override
-    protected void addToEventQueue(Object key, boolean didPut, EntryEventImpl event, int sizeOfHDFSEvent) {
-      if (didPut &&  this.getBucketAdvisor().isPrimary()) {
-        HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)event.getValue();
-        if (sizeOfHDFSEvent == -1) { 
-          try {
-            // the size is calculated only on primary before event is inserted in the bucket. 
-            // If this node became primary after size was calculated, sizeOfHDFSEvent will be -1. 
-            // Try to get the size. #50016
-            sizeOfHDFSEvent = hdfsEvent.getSizeOnHDFSInBytes(!((HDFSBucketRegionQueue)this).isBucketSorted);
-          } catch (Throwable e) {
-           //   Ignore any exception while fetching the size.
-            sizeOfHDFSEvent = 0;
-          }
-        }
-        queueSizeInBytes.getAndAdd(sizeOfHDFSEvent);
-        if (this.initialized) {
-          Long longKey = (Long)key;
-          this.hdfsEventQueue.put(longKey, hdfsEvent, sizeOfHDFSEvent);
-        }
-        if (logger.isDebugEnabled()) {
-          logger.debug("Put successfully in the queue : " + hdfsEvent + " . Queue initialized: " 
-              + this.initialized);
-        }
-      }
-    }
-    
-    /**
-     * It removes the first key from the queue.
-     * 
-     * @return Returns the key for which value was destroyed.
-     * @throws ForceReattemptException
-     */
-    public Long remove() throws ForceReattemptException {
-      throw new UnsupportedOperationException("Individual entries cannot be removed in a HDFSBucketRegionQueue");
-    }
-    
-    /**
-     * It removes the first key from the queue.
-     * 
-     * @return Returns the value.
-     * @throws InterruptedException
-     * @throws ForceReattemptException
-     */
-    public Object take() throws InterruptedException, ForceReattemptException {
-      throw new UnsupportedOperationException("take() cannot be called for individual entries in a HDFSBucketRegionQueue");
-    }
-    
-    public void destroyKeys(ArrayList<HDFSGatewayEventImpl>  listToDestroy) {
-      
-      HashSet<Long> removedSeqNums = new HashSet<Long>();
-      
-      for (int index =0; index < listToDestroy.size(); index++) {
-        HDFSGatewayEventImpl entry = null;
-        if (this.isBucketSorted) {
-          // Remove the events in reverse order so that the events with higher sequence number
-          // are removed last to ensure consistency.
-          entry = listToDestroy.get(listToDestroy.size() - index -1);
-        } else {
-          entry = listToDestroy.get(index);
-        }
-       
-        try {
-          if (this.logger.isDebugEnabled())
-            logger.debug("destroying primary key " + entry.getShadowKey() + " bucket id: " + this.getId());
-          // removed from peeked list
-          boolean deleted = this.hdfsEventQueue.remove(entry);
-          if (deleted) {
-            // this is an onheap event so a call to size should be ok. 
-            long entrySize = entry.getSizeOnHDFSInBytes(!this.isBucketSorted);
-            destroyKey(entry.getShadowKey());
-            long queueSize = queueSizeInBytes.getAndAdd(-1*entrySize);
-            if (queueSize < 0) {
-              // In HA scenarios, queueSizeInBytes can go awry.
-              queueSizeInBytes.compareAndSet(queueSize, 0);
-            }
-            removedSeqNums.add(entry.getShadowKey());
-          }
-        }catch (ForceReattemptException e) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("ParallelGatewaySenderQueue#remove->HDFSBucketRegionQueue#destroyKeys: " + "Got ForceReattemptException for " + this
-            + " for bucket = " + this.getId());
-          }
-        }
-        catch(EntryNotFoundException e) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("ParallelGatewaySenderQueue#remove->HDFSBucketRegionQueue#destroyKeys: " + "Got EntryNotFoundException for " + this
-              + " for bucket = " + this.getId() + " and key " + entry.getShadowKey());
-          }
-        } finally {
-          entry.release();
-        }
-      }
-      
-      if (this.getBucketAdvisor().isPrimary()) {
-        hdfsEventQueue.handleRemainingElements(removedSeqNums);
-      }
-    }
-
-    
-    public boolean isReadyForPeek() {
-      return !this.isEmpty() && !this.hdfsEventQueue.isEmpty() && getBucketAdvisor().isPrimary();
-    }
-
-    public long getLastPeekTimeInMillis() {
-      return hdfsEventQueue.getLastPeekTimeInMillis();
-    }
-    
-    public long getQueueSizeInBytes() {
-      return queueSizeInBytes.get();
-    }
-    /*
-     * This function is called when the bucket takes as the role of primary.
-     */
-    @Override
-    public void beforeAcquiringPrimaryState() {
-      
-      queueSizeInBytes.set(0);
-      if (logger.isDebugEnabled() || VERBOSE) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG,
-            "This node has become primary for bucket " + this.getId()  +". " +
-            		"Creating sorted data structure for the async queue."));
-      }
-      releasingPrimaryLock.set(false);
-      
-      // clear the hdfs queue in case it has already elements left if it was a primary
-      // in the past
-      hdfsEventQueue.clear();
-      if (isBucketSorted)
-        hdfsEventQueue = new MultiRegionSortedQueue();
-      else
-        hdfsEventQueue = new EventQueue();
-      
-      TreeSet<Long> sortedKeys = createSkipListFromMap(this.keySet());
-      
-      if (sortedKeys != null && sortedKeys.size() > 0) {    
-        // Mark the events equal to batch size as duplicate. 
-        // calculate the batch size based on the number of events currently in the queue
-        // This is an approximation. 
-        long batchSizeMB =  this.getPartitionedRegion().getParallelGatewaySender().getBatchSize();
-        long batchSizeInBytes = batchSizeMB*1024*1024;
-        long totalBucketSize = queueSizeInBytes.get();
-        totalBucketSize = totalBucketSize >  0 ? totalBucketSize: 1;
-        long totalEntriesInBucket = this.entryCount();
-        totalEntriesInBucket =  totalEntriesInBucket > 0 ? totalEntriesInBucket: 1;
-        
-        long perEntryApproxSize = totalBucketSize/totalEntriesInBucket;
-        perEntryApproxSize = perEntryApproxSize >  0 ? perEntryApproxSize: 1;
-        
-        int batchSize  = (int)(batchSizeInBytes/perEntryApproxSize);
-        
-        if (logger.isDebugEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG,
-              "Calculating batch size " +  " batchSizeMB: " + batchSizeMB + " batchSizeInBytes: " + batchSizeInBytes + 
-              " totalBucketSize: " + totalBucketSize + " totalEntriesInBucket: " + totalEntriesInBucket + 
-              " perEntryApproxSize: " + perEntryApproxSize + " batchSize: " + batchSize ));
-        }
-        
-        markEventsAsDuplicate(batchSize, sortedKeys.iterator());
-      }
-    }
-    
-    @Override
-    public void beforeReleasingPrimaryLockDuringDemotion() {
-      queueSizeInBytes.set(0);
-      releasingPrimaryLock.set(true);
-      // release memory in case of a clean transition
-      hdfsEventQueue.clear();
-    }
-
-    /**
-     * This function searches the skip list and the peeked skip list for a given region key
-     * @param region 
-     * 
-     */
-    public HDFSGatewayEventImpl getObjectForRegionKey(Region region, byte[] regionKey) {
-      // get can only be called for a sorted queue.
-      // Calling get with Long.MIN_VALUE seq number ensures that 
-      // the list will return the key which has highest seq number. 
-      return hdfsEventQueue.get(region, regionKey, Long.MIN_VALUE);
-    }
-
-    /**
-     * Get an iterator on the queue, passing in the partitioned region
-     * we want to iterate over the events from.
-     */
-    public SortedEventQueueIterator iterator(Region region) {
-      return hdfsEventQueue.iterator(region);
-    }
-
-    public long totalEntries() {
-      return entryCount();
-    }
-    
-    /**
-     * Ideally this function should be called from a thread periodically to 
-     * rollover the skip list when it is above a certain size. 
-     * 
-     */
-    public void rolloverSkipList() {
-      // rollover can only be called for a sorted queue.
-      hdfsEventQueue.rollover();
-    }
-    
-    public boolean shouldDrainImmediately() {
-      return hdfsEventQueue.getFlushObserver().shouldDrainImmediately();
-    }
-
-    public AsyncFlushResult flush() {
-      if (logger.isDebugEnabled() || VERBOSE) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flush requested"));
-      }
-      return hdfsEventQueue.getFlushObserver().flush();
-    }
-    
-    /**
-     * This class keeps the regionkey and seqNum. The objects of this class are 
-     * kept in a concurrent skip list. The order of elements is decided based on the 
-     * comparison of regionKey + seqNum. This kind of comparison allows us to keep 
-     * multiple updates on a single key (becaus it has different seq Num)
-     */
-    static class KeyToSeqNumObject implements Comparable<KeyToSeqNumObject>
-    {
-      private byte[] regionkey; 
-      private Long seqNum;
-      
-      KeyToSeqNumObject(byte[] regionkey, Long seqNum){
-        this.regionkey = regionkey;
-        this.seqNum = seqNum;
-      }
-      
-      /**
-       * This function compares the key first. If the keys are same then seq num is compared.
-       * This function is a key function because it ensures that the skiplists hold the elements 
-       * in an order we want it to and for multiple updates on key fetches the most recent one 
-       * Currently we are comparing seq numbers but we will have to change it to version stamps. 
-       * * List can have elements in following sequence 
-       * K1 Value1 version : 1 
-       * K2 Value2a version : 2
-       * K2 Value2 version : 1
-       * K3 Value3 version : 1
-       * For a get on K2, it should retunr K2 Value 2a.  
-       */
-      @Override
-      public int compareTo(KeyToSeqNumObject o) {
-        int compareOutput = ByteComparator.compareBytes(
-            this.getRegionkey(), 0, this.getRegionkey().length, o.getRegionkey(), 0, o.getRegionkey().length);
-        if (compareOutput != 0 )
-          return compareOutput;
-        
-        // If the keys are same and this is an object with dummy seq number, 
-        // return -1. This will ensure that ceiling function on a skip list will enumerate 
-        // all the entries and return the last one.   
-        if (this.getSeqNum() == Long.MIN_VALUE) 
-          return -1;
-        
-        // this is to just maintain consistency with the above statement. 
-        if (o.getSeqNum() == Long.MIN_VALUE) 
-          return 1;
-       
-        // minus operator pushes entries with lower seq number in the end so that 
-        // the order as mentioned above is maintained. And the entries with 
-        // higher version are fetched on a get. 
-        return this.getSeqNum().compareTo(o.getSeqNum()) * -1;  
-      }
-      
-      @Override
-      public boolean equals (Object o) {
-    	KeyToSeqNumObject obj = null;
-      	if (o == null)
-    		return false; 
-    	
-    	if (o instanceof KeyToSeqNumObject) 
-    		obj = (KeyToSeqNumObject)o;
-    	else
-    		return false;
-    	
-    	if (this.compareTo(obj) != 0)
-          return false;
-        else
-          return true;
-      }
-      
-      public int hashCode() {
-    	assert false : "hashCode not designed";
-    	return -1;
-      }
-      
-      byte[] getRegionkey() {
-        return regionkey;
-      }
-
-      public Long getSeqNum() {
-        return seqNum;
-      }
-
-      public void setSeqNum(Long seqNum) {
-        this.seqNum = seqNum;
-      }
-      
-      @Override
-      public String toString() {
-        return EntryEventImpl.deserialize(regionkey) + " {" + seqNum + "}";
-      }
-    }
-    
-    public interface HDFSEventQueue {
-      FlushObserver getFlushObserver();
-      
-      /** puts an event in the queue. */ 
-      public void put (long key, HDFSGatewayEventImpl event, int size);
-      
-      public SortedEventQueueIterator iterator(Region region);
-
-      public void rollover();
-
-      /** Get a value from the queue
-       * @throws IllegalStateException if this queue doesn't support get  
-       **/
-      public HDFSGatewayEventImpl get(Region region, byte[] regionKey,
-          long minValue);
-
-      // Peeks a batch of size specified by batchSize
-      // And add the results to the array list
-      public void peek(ArrayList result);
-      
-      // Checks if there are elements to bee peeked 
-      public boolean isEmpty();
-      
-      // removes the event if it has already been peeked. 
-      public boolean remove(HDFSGatewayEventImpl event);
-      
-      // take care of the elements that were peeked 
-      // but were not removed after a batch dispatch 
-      // due to concurrency effects. 
-      public void handleRemainingElements(HashSet<Long> listToBeremoved);
-      
-      // clears the list. 
-      public void clear();
-      
-      // get the time when the last peek was done. 
-      public long getLastPeekTimeInMillis();
-    }
-    
-    class MultiRegionSortedQueue implements HDFSEventQueue {
-      ConcurrentMap<String, SortedEventQueue> regionToEventQueue = new ConcurrentHashMap<String, SortedEventQueue>();
-      volatile Set<SortedEventQueue> peekedQueues = Collections.EMPTY_SET;
-      private final AtomicBoolean peeking = new AtomicBoolean(false);
-      long lastPeekTimeInMillis = System.currentTimeMillis();
-      
-      private final FlushObserver flush = new FlushObserver() {
-        @Override
-        public AsyncFlushResult flush() {
-          final Set<AsyncFlushResult> flushes = new HashSet<AsyncFlushResult>();
-          for (SortedEventQueue queue : regionToEventQueue.values()) {
-            flushes.add(queue.getFlushObserver().flush());
-          }
-          
-          return new AsyncFlushResult() {
-            @Override
-            public boolean waitForFlush(long timeout, TimeUnit unit) throws InterruptedException {
-              long start = System.nanoTime();
-              long remaining = unit.toNanos(timeout);
-              for (AsyncFlushResult afr : flushes) {
-                if (!afr.waitForFlush(remaining, TimeUnit.NANOSECONDS)) {
-                  return false;
-                }
-                remaining -= (System.nanoTime() - start);
-              }
-              return true;
-            }
-          };
-        }
-        
-        @Override
-        public boolean shouldDrainImmediately() {
-          for (SortedEventQueue queue : regionToEventQueue.values()) {
-            if (queue.getFlushObserver().shouldDrainImmediately()) {
-              return true;
-            }
-          }
-          return false;
-        }
-      };
-      
-      @Override
-      public FlushObserver getFlushObserver() {
-        return flush;
-      }
-
-      @Override
-      public void put(long key, HDFSGatewayEventImpl event, int size) {
-        
-        String region = event.getRegionPath();
-        SortedEventQueue regionQueue = regionToEventQueue.get(region);
-        if(regionQueue == null) {
-          regionToEventQueue.putIfAbsent(region, new SortedEventQueue());
-          regionQueue = regionToEventQueue.get(region);
-        }
-        regionQueue.put(key, event, size);
-      }
-
-      @Override
-      public void peek(ArrayList result) {
-        // The elements that were peeked last time, have not been persisted to HDFS 
-        // yet. You cannot take out next batch until that is done.
-        if (!peeking.compareAndSet(false, true)) {
-          if (logger.isTraceEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Peek already in progress, aborting"));
-          }
-          return;
-        }
-        //Maintain a separate set of peeked queues.
-        //All of these queues are statefull, and expect to be
-        //handleRemainingElements and clear to be called on
-        //them iff peek was called on them. However, new queues
-        //may be created in that time.
-        peekedQueues = Collections.newSetFromMap(new ConcurrentHashMap<SortedEventQueue, Boolean>(regionToEventQueue.size()));
-        
-        //Peek from all of the existing queues
-        for(SortedEventQueue queue : regionToEventQueue.values()) {
-          if(!queue.isEmpty()) {
-            queue.peek(result);
-            peekedQueues.add(queue);
-          }
-        }
-        if (result.isEmpty()) 
-          peeking.set(false);
-        
-        
-        this.lastPeekTimeInMillis = System.currentTimeMillis();
-      }
-
-      @Override
-      public boolean isEmpty() {
-        for(SortedEventQueue queue : regionToEventQueue.values()) {
-          if(!queue.isEmpty()) {
-            return false;
-          }
-        }
-        return true;
-      }
-
-      @Override
-      public boolean remove(HDFSGatewayEventImpl event) {
-        String region = event.getRegionPath();
-        SortedEventQueue regionQueue = regionToEventQueue.get(region);
-        return regionQueue.remove(event);
-      }
-
-      @Override
-      public void handleRemainingElements(HashSet<Long> removedSeqNums){
-        for(SortedEventQueue queue : peekedQueues) {
-          queue.handleRemainingElements(removedSeqNums);
-        }
-        peekedQueues.clear();
-        peeking.set(false);
-      }
-
-      @Override
-      public void clear() {
-        for(SortedEventQueue queue : regionToEventQueue.values()) {
-          queue.clear();
-        }
-        peekedQueues.clear();
-        peeking.set(false);
-      }
-
-      @Override
-      public long getLastPeekTimeInMillis() {
-        return this.lastPeekTimeInMillis;
-      }
-
-      @Override
-      public HDFSGatewayEventImpl get(Region region, byte[] regionKey,
-          long minValue) {
-        SortedEventQueue queue = regionToEventQueue.get(region.getFullPath());
-        if(queue == null) {
-          return null;
-        }
-        return queue.get(region, regionKey, minValue);
-      }
-
-      @Override
-      public SortedEventQueueIterator iterator(Region region) {
-        SortedEventQueue queue = regionToEventQueue.get(region.getFullPath());
-        if(queue == null) {
-          return new SortedEventQueueIterator(new LinkedBlockingDeque<SortedEventBuffer>());
-        }
-        return queue.iterator(region);
-      }
-
-      @Override
-      public void rollover() {
-        for(SortedEventQueue queue : regionToEventQueue.values()) {
-          queue.rollover();
-        }
-      }
-    }
-    
-    class EventQueue implements HDFSEventQueue {
-      private final SignalledFlushObserver flush = new SignalledFlushObserver();
-      private final BlockingQueue<Long> eventSeqNumQueue = new LinkedBlockingQueue<Long>();
-      private final BlockingQueue<Long> peekedEvents = new LinkedBlockingQueue<Long>();
-      private long lastPeekTimeInMillis = System.currentTimeMillis(); 
-      
-      public EventQueue() {
-        
-      }
-      
-      @Override
-      public FlushObserver getFlushObserver() {
-        return flush;
-      }
-
-      @Override
-      public void put(long key, HDFSGatewayEventImpl event, int size) {
-        put(key);
-      }
-      public void put (long key) {
-        eventSeqNumQueue.add(key);
-        flush.push();
-        incQueueSize();
-      }
-      
-      
-      @Override
-      public HDFSGatewayEventImpl get(Region region, byte[] regionKey,
-          long minValue) {
-        throw new InternalGemFireError("Get not supported on unsorted queue");
-      }
-      
-      @Override
-      public void peek(ArrayList peekedEntries) {
-        if (peekedEvents.size() != 0) {
-          return;
-        }
-        
-        for(int size=0; size < batchSize; ) {
-          Long seqNum = eventSeqNumQueue.peek();
-          if (seqNum == null) {
-            // queue is now empty, return
-            break;
-          }
-          Object object = getNoLRU(seqNum, true, false, false);
-          if (object != null) {
-            peekedEvents.add(seqNum);
-            size += ((HDFSGatewayEventImpl)object).getSizeOnHDFSInBytes(!isBucketSorted);
-            peekedEntries.add(object);
-
-          } else {
-            logger.debug("The entry corresponding to the sequence number " + 
-               seqNum +  " is missing. This can happen when an entry is already" +
-               "dispatched before a bucket moved.");
-            // event is being ignored. Decrease the queue size
-            decQueueSize();
-            flush.pop(1);
-           
-          }
-          eventSeqNumQueue.poll();
-          
-        }
-        this.lastPeekTimeInMillis  = System.currentTimeMillis();
-      }
-
-      @Override
-      public boolean isEmpty() {
-        return eventSeqNumQueue.isEmpty();
-      }
-
-      
-      @Override
-      public boolean remove(HDFSGatewayEventImpl event) {
-        boolean deleted = peekedEvents.remove(event.getShadowKey());
-        if (deleted)
-         decQueueSize();
-        return deleted;
-      }
-
-      @Override
-      // It looks like that there is no need for this function 
-      // in EventQueue.
-      public void handleRemainingElements(HashSet<Long> removedSeqNums) {
-        flush.pop(removedSeqNums.size());
-        eventSeqNumQueue.addAll(peekedEvents);
-        peekedEvents.clear();
-      }
-
-      @Override
-      public void clear() {
-        flush.clear();
-        decQueueSize(eventSeqNumQueue.size());
-        eventSeqNumQueue.clear();
-        decQueueSize(peekedEvents.size());
-        peekedEvents.clear();
-      }
-
-      @Override
-      public long getLastPeekTimeInMillis() {
-        return this.lastPeekTimeInMillis;
-      }
-      @Override
-      public SortedEventQueueIterator iterator(Region region) {
-        throw new InternalGemFireError("not supported on unsorted queue");
-      }
-      @Override
-      public void rollover() {
-        throw new InternalGemFireError("not supported on unsorted queue");
-      }
-    }
-    
-    class SortedEventQueue implements HDFSEventQueue {
-      private final SignalledFlushObserver flush = new SignalledFlushObserver();
-
-      // List of all the skip lists that hold the data
-      final Deque<SortedEventBuffer> queueOfLists = 
-          new LinkedBlockingDeque<SortedEventBuffer>();
-      
-      // This points to the tail of the queue
-      volatile SortedEventBuffer currentSkipList = new SortedEventBuffer();
-      
-      private final AtomicBoolean peeking = new AtomicBoolean(false);
-      
-      private long lastPeekTimeInMillis = System.currentTimeMillis(); 
-      
-      public SortedEventQueue() {
-        queueOfLists.add(currentSkipList);
-      }
-      
-      @Override
-      public FlushObserver getFlushObserver() {
-        return flush;
-      }
-
-      public boolean remove(HDFSGatewayEventImpl event) {
-        SortedEventBuffer eventBuffer = queueOfLists.peek();
-        if (eventBuffer != null) {
-          return eventBuffer.copyToBuffer(event);
-        }
-        else {
-          // This can happen when the queue is cleared because of bucket movement 
-          // before the remove is called. 
-          return true;
-        }
-      } 
-
-      public void clear() {
-        flush.clear();
-        for (SortedEventBuffer buf : queueOfLists) {
-          decQueueSize(buf.size());
-          buf.clear();
-        }
-        
-        queueOfLists.clear();
-        rollList(false);
-
-        peeking.set(false);
-      }
-
-      public boolean isEmpty() {
-        if (queueOfLists.size() == 1)
-          return queueOfLists.peek().isEmpty();
-        return false;
-      }
-
-      public void put(long key, HDFSGatewayEventImpl event, int eventSize) {
-        if (logger.isTraceEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Inserting key " + event + " into list " + System.identityHashCode(currentSkipList)));
-        }
-        putInList(new KeyToSeqNumObject(((HDFSGatewayEventImpl)event).getSerializedKey(), key), 
-            eventSize);
-      }
-
-      private void putInList(KeyToSeqNumObject entry, int sizeInBytes) {
-        // It was observed during testing that peek can start peeking 
-        // elements from a list to which a put is happening. This happens 
-        // when the peek changes the value of currentSkiplist to a new list 
-        // but the put continues to write to an older list. 
-        // So there is a possibility that an element is added to the list 
-        // that has already been peeked. To handle this case, in handleRemainingElements
-        // function we re-add the elements that were not peeked. 
-        if (currentSkipList.add(entry, sizeInBytes) == null) {
-          flush.push();
-          incQueueSize();
-        }
-      }
-
-      public void rollover(boolean forceRollover) {
-        if (currentSkipList.bufferSize() >= batchSize || forceRollover) {
-          rollList(forceRollover);
-        }
-      }
-      
-      /**
-       * Ideally this function should be called from a thread periodically to 
-       * rollover the skip list when it is above a certain size. 
-       * 
-       */
-      public void rollover() {
-        rollover(false);
-      }
-
-      public void peek(ArrayList peekedEntries) {
-        // The elements that were peeked last time, have not been persisted to HDFS 
-        // yet. You cannot take out next batch until that is done.
-        if (!peeking.compareAndSet(false, true)) {
-          if (logger.isTraceEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Peek already in progress, aborting"));
-          }
-          return;
-        }
-
-        if (queueOfLists.size() == 1) {
-          rollList(false);
-        }
-        
-        Assert.assertTrue(queueOfLists.size() > 1, "Cannot peek from head of queue");
-        BufferIterator itr = queueOfLists.peek().iterator();
-        while (itr.hasNext()) {
-          KeyToSeqNumObject entry = itr.next();
-          if (logger.isTraceEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Peeking key " + entry + " from list " + System.identityHashCode(queueOfLists.peek())));
-          }
-
-          HDFSGatewayEventImpl ev = itr.value();
-          ev.copyOffHeapValue();
-          peekedEntries.add(ev);
-        }
-        
-        // discard an empty batch as it is not processed and will plug up the
-        // queue
-        if (peekedEntries.isEmpty()) {
-          SortedEventBuffer empty = queueOfLists.remove();
-          if (logger.isTraceEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Discarding empty batch " + empty));
-          }
-          peeking.set(false);
-        }
-        this.lastPeekTimeInMillis = System.currentTimeMillis();
-      }
-
-      public HDFSGatewayEventImpl get(Region region, byte[] regionKey, long key) {
-        KeyToSeqNumObject event = new KeyToSeqNumObject(regionKey, key);
-        Iterator<SortedEventBuffer> queueIterator = queueOfLists.descendingIterator();
-        while (queueIterator.hasNext()) {
-          HDFSGatewayEventImpl evt = queueIterator.next().getFromQueueOrBuffer(event);
-          if (evt != null) {
-            return evt;
-          }
-        }
-        return null;
-      }
-      
-      public void handleRemainingElements(HashSet<Long> removedSeqNums) {
-        if (!peeking.get()) {
-          if (logger.isTraceEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Not peeked, just cleaning up empty batch; current list is " + currentSkipList));
-          }
-          return;
-        }
-
-        Assert.assertTrue(queueOfLists.size() > 1, "Cannot remove only event list");
-
-        // all done with the peeked elements, okay to throw away now
-        SortedEventBuffer buf = queueOfLists.remove();
-        SortedEventBuffer.BufferIterator bufIter = buf.iterator();
-        // Check if the removed buffer has any extra events. If yes, check if these extra 
-        // events are part of region. If yes, reinsert these as they were probably inserted 
-        // into this list while it was being peeked. 
-        while (bufIter.hasNext()) {
-          KeyToSeqNumObject key = bufIter.next();
-          if (!removedSeqNums.contains(key.getSeqNum())) {
-            HDFSGatewayEventImpl evt = (HDFSGatewayEventImpl) getNoLRU(key.getSeqNum(), true, false, false);
-            if (evt != null) {
-              flush.push();
-              incQueueSize();
-              queueOfLists.getFirst().add(key, evt.getSizeOnHDFSInBytes(!isBucketSorted));
-            }
-          }
-        }
-
-        decQueueSize(buf.size());
-        flush.pop(buf.size());
-        peeking.set(false);
-      }
-      
-      public long getLastPeekTimeInMillis(){
-        return this.lastPeekTimeInMillis;
-      }
-      
-      NavigableSet<KeyToSeqNumObject> getPeeked() {
-        assert peeking.get();
-        return queueOfLists.peek().keySet();
-      }
-      
-      private synchronized void rollList(boolean forceRollover) {
-        if (currentSkipList.bufferSize() < batchSize && queueOfLists.size() > 1 && !forceRollover)
-          return;
-        SortedEventBuffer tmp = new SortedEventBuffer();
-        queueOfLists.add(tmp);
-        if (logger.isTraceEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Rolling over list from " + currentSkipList + " to list " + tmp));
-        }
-        currentSkipList = tmp;
-      }
-
-      @Override
-      public SortedEventQueueIterator iterator(Region region) {
-        return new SortedEventQueueIterator(queueOfLists);
-      }
-    }
-    
-    public class SortedEventBuffer {
-      private final HDFSGatewayEventImpl NULL = new HDFSGatewayEventImpl();
-  
-      private final ConcurrentSkipListMap<KeyToSeqNumObject, HDFSGatewayEventImpl> events = new ConcurrentSkipListMap<KeyToSeqNumObject, HDFSGatewayEventImpl>();
-      
-      private int bufferSize = 0;
-      
-      public boolean copyToBuffer(HDFSGatewayEventImpl event) {
-        KeyToSeqNumObject key = new KeyToSeqNumObject(event.getSerializedKey(), event.getShadowKey());
-        if (events.containsKey(key)) {
-          // After an event has been delivered in a batch, we copy it into the
-          // buffer so that it can be returned by an already in progress iterator.
-          // If we do not do this it is possible to miss events since the hoplog
-          // iterator uses a fixed set of files that are determined when the
-          // iterator is created.  The events will be GC'd once the buffer is no
-          // longer strongly referenced.
-          HDFSGatewayEventImpl oldVal = events.put(key, event);
-          assert oldVal == NULL;
-  
-          return true;
-        }
-        // If the primary lock is being relinquished, the events is cleared and probaly that is
-        // why we are here. return true if the primary lock is being relinquished
-        if (releasingPrimaryLock.get())
-          return true;
-        else 
-          return false;
-      }
-  
-      public HDFSGatewayEventImpl getFromQueueOrBuffer(KeyToSeqNumObject key) {
-        KeyToSeqNumObject result = events.ceilingKey(key);
-        if (result != null && Bytes.compareTo(key.getRegionkey(), result.getRegionkey()) == 0) {
-          
-          // first try to fetch the buffered event to make it fast. 
-          HDFSGatewayEventImpl evt = events.get(result);
-          if (evt != NULL) {
-            return evt;
-          }
-          // now try to fetch the event from the queue region
-          evt = (HDFSGatewayEventImpl) getNoLRU(result.getSeqNum(), true, false, false);
-          if (evt != null) {
-            return evt;
-          }
-          
-          // try to fetch again from the buffered events to avoid a race between 
-          // item deletion and the above two statements. 
-          evt = events.get(result);
-          if (evt != NULL) {
-            return evt;
-          }
-          
-        }
-        return null;
-      }
-  
-      public HDFSGatewayEventImpl add(KeyToSeqNumObject key, int sizeInBytes) {
-        bufferSize += sizeInBytes;
-        return events.put(key, NULL);
-      }
-  
-      public void clear() {
-        events.clear();
-      }
-  
-      public boolean isEmpty() {
-        return events.isEmpty();
-      }
-  
-      public int bufferSize() {
-        return bufferSize;
-      }
-      public int size() {
-        return events.size();
-      }
-      public NavigableSet<KeyToSeqNumObject> keySet() {
-        return events.keySet();
-      }
-  
-      public BufferIterator iterator() {
-        return new BufferIterator(events.keySet().iterator());
-      }
-  
-      public class BufferIterator implements Iterator<KeyToSeqNumObject> {
-        private final Iterator<KeyToSeqNumObject> src;
-
-        private KeyToSeqNumObject currentKey;
-        private HDFSGatewayEventImpl currentVal;
-
-        private KeyToSeqNumObject nextKey;
-        private HDFSGatewayEventImpl nextVal;
-        
-        public BufferIterator(Iterator<KeyToSeqNumObject> src) {
-          this.src = src;
-          moveNext();
-        }
-  
-        @Override
-        public void remove() {
-          throw new UnsupportedOperationException();
-        }
-        
-        @Override
-        public boolean hasNext() {
-          return nextVal != null;
-        }
-        
-        @Override
-        public KeyToSeqNumObject next() {
-          if (!hasNext()) {
-            throw new NoSuchElementException();
-          }
-          
-          currentKey = nextKey;
-          currentVal = nextVal;
-          
-          moveNext();
-          
-          return currentKey;
-        }
-  
-        public KeyToSeqNumObject key() {
-          assert currentKey != null;
-          return currentKey;
-        }
-        
-        public HDFSGatewayEventImpl value() {
-          assert currentVal != null;
-          return currentVal;
-        }
-        
-        private void moveNext() {
-          while (src.hasNext()) {
-            nextKey = src.next();
-            nextVal = getFromQueueOrBuffer(nextKey);
-            if (nextVal != null) {
-              return;
-            } else if (logger.isDebugEnabled() || VERBOSE) {
-              logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "The entry corresponding to"
-                  + " the sequence number " + nextKey.getSeqNum() 
-                  + " is missing. This can happen when an entry is already" 
-                  + " dispatched before a bucket moved."));
-            }
-          }
-          nextKey = null;
-          nextVal = null;
-        }
-      }
-    }
-  
-    public final class SortedEventQueueIterator implements CursorIterator<HDFSGatewayEventImpl> {
-      /** the iterators to merge */
-      private final List<SortedEventBuffer.BufferIterator> iters;
-  
-      /** the current iteration value */
-      private HDFSGatewayEventImpl value;
-  
-      public SortedEventQueueIterator(Deque<SortedEventBuffer> queueOfLists) {
-        iters = new ArrayList<SortedEventBuffer.BufferIterator>();
-        for (Iterator<SortedEventBuffer> iter = queueOfLists.descendingIterator(); iter.hasNext();) {
-          SortedEventBuffer.BufferIterator buf = iter.next().iterator();
-          if (buf.hasNext()) {
-            buf.next();
-            iters.add(buf);
-          }
-        }
-      }
-      
-      public void close() {
-        value = null;
-        iters.clear();
-      }
-
-      @Override
-      public boolean hasNext() {
-        return !iters.isEmpty();
-      }
-      
-      @Override
-      public HDFSGatewayEventImpl next() {
-        if (!hasNext()) {
-          throw new UnsupportedOperationException();
-        }
-        
-        int diff = 0;
-        KeyToSeqNumObject min = null;
-        SortedEventBuffer.BufferIterator cursor = null;
-        
-        for (Iterator<SortedEventBuffer.BufferIterator> merge = iters.iterator(); merge.hasNext(); ) {
-          SortedEventBuffer.BufferIterator buf = merge.next();
-          KeyToSeqNumObject tmp = buf.key();
-          if (min == null || (diff = Bytes.compareTo(tmp.regionkey, min.regionkey)) < 0) {
-            min = tmp;
-            cursor = buf;
-            
-          } else if (diff == 0 && !advance(buf, min)) {
-            merge.remove();
-          }
-        }
-        
-        value = cursor.value();
-        assert value != null;
-
-        if (!advance(cursor, min)) {
-          iters.remove(cursor);
-        }
-        return current();
-      }
-      
-      @Override
-      public final HDFSGatewayEventImpl current() {
-        return value;
-      }
-
-      @Override 
-      public void remove() {
-        throw new UnsupportedOperationException();
-      }
-      
-      private boolean advance(SortedEventBuffer.BufferIterator iter, KeyToSeqNumObject key) {
-        while (iter.hasNext()) {
-          if (Bytes.compareTo(iter.next().regionkey, key.regionkey) > 0) {
-            return true;
-          }
-        }
-        return false;
-      }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java
deleted file mode 100644
index c8b7b28..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.IOException;
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.WeakReference;
-import java.util.AbstractSet;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.SortedEventQueueIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.HDFSRegionMap;
-import com.gemstone.gemfire.internal.cache.KeyWithRegionContext;
-import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
-import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@SuppressWarnings("rawtypes")
-public class HDFSEntriesSet extends AbstractSet {
-  private final IteratorType type;
-
-  private final HoplogOrganizer hoplogs;
-  private final HDFSBucketRegionQueue brq;
-  
-  private final BucketRegion region; 
-  private final ReferenceQueue<HDFSIterator> refs;
-  
-  public HDFSEntriesSet(BucketRegion region, HDFSBucketRegionQueue brq, 
-      HoplogOrganizer hoplogs, IteratorType type, ReferenceQueue<HDFSIterator> refs) {
-    this.region = region;
-    this.brq = brq;
-    this.hoplogs = hoplogs;
-    this.type = type;
-    this.refs = refs;
-  }
-  
-  @Override
-  public HDFSIterator iterator() {
-    HDFSIterator iter = new HDFSIterator(type, region.getPartitionedRegion(), true);
-    if (refs != null) {
-      // we can't rely on an explicit close but we need to free resources
-      //
-      // This approach has the potential to cause excessive memory load and/or
-      // GC problems if an app holds an iterator ref too long. A lease-based
-      // approach where iterators are automatically for X secs of inactivity is
-      // a potential alternative (but may require tuning for certain
-      // applications)
-      new WeakReference<HDFSEntriesSet.HDFSIterator>(iter, refs);
-    }
-    return iter;
-  }
-
-  @Override
-  public int size() {
-    // TODO this is the tortoise version, need a fast version for estimation
-    // note: more than 2^31-1 records will cause this counter to wrap
-    int size = 0;
-    HDFSIterator iter = new HDFSIterator(null, region.getPartitionedRegion(), false);
-    try {
-      while (iter.hasNext()) {
-        if (includeEntry(iter.next())) {
-          size++;
-        }
-      }
-    } finally {
-      iter.close();
-    }
-    return size;
-  }
-
-  @Override
-  public boolean isEmpty() {
-    HDFSIterator iter = new HDFSIterator(null, region.getPartitionedRegion(), false);
-    try {
-      while (iter.hasNext()) {
-        if (includeEntry(iter.next())) {
-          return false;
-        }
-      }
-    } finally {
-      iter.close();
-    }
-    return true;
-  }
-
-  private boolean includeEntry(Object val) {
-    if (val instanceof HDFSGatewayEventImpl) {
-      HDFSGatewayEventImpl evt = (HDFSGatewayEventImpl) val;
-      if (evt.getOperation().isDestroy()) {
-        return false;
-      }
-    } else if (val instanceof PersistedEventImpl) {
-      PersistedEventImpl evt = (PersistedEventImpl) val;
-      if (evt.getOperation().isDestroy()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  public class HDFSIterator implements Iterator {
-    private final IteratorType type;
-    private final boolean deserialize;
-    
-    private final SortedEventQueueIterator queue;
-    private final HoplogIterator<byte[], SortedHoplogPersistedEvent> hdfs;
-    private Iterator txCreatedEntryIterator;
-    
-    private boolean queueNext;
-    private boolean hdfsNext;
-    private boolean forUpdate;
-    private boolean hasTxEntry;
-
-    private byte[] currentHdfsKey;
-
-    public HDFSIterator(IteratorType type, Region region, boolean deserialize) {
-      this.type = type;
-      this.deserialize = deserialize;
-
-      // Check whether the queue has become primary here.
-      // There could be some time between bucket becoming a primary 
-      // and underlying queue becoming a primary, so isPrimaryWithWait() 
-      // waits for some time for the queue to become a primary on this member
-      if (!brq.getBucketAdvisor().isPrimaryWithWait()) {
-        InternalDistributedMember primaryHolder = brq.getBucketAdvisor()
-            .basicGetPrimaryMember();
-        throw new PrimaryBucketException("Bucket " + brq.getName()
-            + " is not primary. Current primary holder is " + primaryHolder);
-      }
-      // We are deliberating NOT sync'ing while creating the iterators.  If done
-      // in the correct order, we may get duplicates (due to an in-progress
-      // flush) but we won't miss any entries.  The dupes will be eliminated
-      // during iteration.
-      queue = brq.iterator(region);
-      advanceQueue();
-      
-      HoplogIterator<byte[], SortedHoplogPersistedEvent> tmp = null;
-      try {
-        tmp = hoplogs.scan();
-      } catch (IOException e) {
-        HDFSEntriesSet.this.region.checkForPrimary();
-        throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e);
-      }
-      
-      hdfs = tmp;
-      if (hdfs != null) {
-        advanceHdfs();
-      }
-    }
-    
-    @Override
-    public boolean hasNext() {
-      boolean nonTxHasNext = hdfsNext || queueNext;
-      if (!nonTxHasNext && this.txCreatedEntryIterator != null) {
-        this.hasTxEntry = this.txCreatedEntryIterator.hasNext();
-        return this.hasTxEntry;
-      }
-      return nonTxHasNext;
-    }
-    
-    @Override
-    public Object next() {
-      if (!hasNext()) {
-        throw new NoSuchElementException();
-      }
-      if (hasTxEntry) {
-        hasTxEntry = false;
-        return this.txCreatedEntryIterator.next();
-      }
-
-      Object val;
-      if (!queueNext) {
-        val = getFromHdfs();
-        advanceHdfs();
-        
-      } else if (!hdfsNext) {
-        val = getFromQueue();
-        advanceQueue();
-        
-      } else {
-        byte[] qKey = queue.current().getSerializedKey();
-        byte[] hKey = this.currentHdfsKey;
-        
-        int diff = Bytes.compareTo(qKey, hKey);
-        if (diff < 0) {
-          val = getFromQueue();
-          advanceQueue();
-          
-        } else if (diff == 0) {
-          val = getFromQueue();
-          advanceQueue();
-
-          // ignore the duplicate
-          advanceHdfs();
-
-        } else {
-          val = getFromHdfs();
-          advanceHdfs();
-        }
-      }
-      return val;
-    }
-    
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException();
-    }
-    
-    public void close() {
-      if (queueNext) {
-        queue.close();
-      }
-
-      if (hdfsNext) {
-        hdfs.close();
-      }
-    }
-
-    private Object getFromQueue() {
-      HDFSGatewayEventImpl evt = queue.current();
-      if (type == null) {
-        return evt;
-      }
-      
-      switch (type) {
-      case KEYS:
-        byte[] key = evt.getSerializedKey();
-        return deserialize ? EntryEventImpl.deserialize(key) : key;
-        
-      case VALUES:
-        return evt.getValue();
-        
-      default:
-        Object keyObj = EntryEventImpl.deserialize(evt.getSerializedKey());
-        if(keyObj instanceof KeyWithRegionContext) {
-          ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion());
-        }
-        return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, evt, true, forUpdate);
-      }
-    }
-
-    private Object getFromHdfs() {
-      if (type == null) {
-        return hdfs.getValue();
-      }
-      
-      switch (type) {
-      case KEYS:
-        byte[] key = this.currentHdfsKey;
-        return deserialize ? EntryEventImpl.deserialize(key) : key;
-        
-      case VALUES:
-        PersistedEventImpl evt = hdfs.getValue();
-        return evt.getValue();
-        
-      default:
-        Object keyObj = EntryEventImpl.deserialize(this.currentHdfsKey);
-        if(keyObj instanceof KeyWithRegionContext) {
-          ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion());
-        }
-        return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, hdfs.getValue(), true, forUpdate);
-      }
-    }
-    
-    private void advanceHdfs() {
-      if (hdfsNext = hdfs.hasNext()) {
-        try {
-          this.currentHdfsKey = hdfs.next();
-        } catch (IOException e) {
-          region.checkForPrimary();
-          throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e);
-        }
-      } else {
-        this.currentHdfsKey = null;
-        hdfs.close();
-      }
-    }
-    
-    private void advanceQueue() {
-      if (queueNext = queue.hasNext()) {
-        queue.next();
-      } else {
-        brq.checkForPrimary();
-        queue.close();
-      }
-    }
-    
-    public void setForUpdate(){
-      this.forUpdate = true;
-    }
-    
-    /**MergeGemXDHDFSToGFE not sure of this function is required */ 
-    /*public void setTXState(TXState txState) {
-      TXRegionState txr = txState.getTXRegionState(region);
-      if (txr != null) {
-        txr.lock();
-        try {
-          this.txCreatedEntryIterator = txr.getCreatedEntryKeys().iterator();
-        }
-        finally{
-          txr.unlock();
-        }
-      }
-    }*/
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java
deleted file mode 100644
index 607650f..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.i18n.StringId;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
-import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-
-
-/**
- * Listener that persists events to HDFS
- *
- */
-public class HDFSEventListener implements AsyncEventListener {
-  private final LogWriterI18n logger;
-  private volatile boolean senderStopped = false;
-
-  private final FailureTracker failureTracker = new FailureTracker(10L, 60 * 1000L, 1.5f);
-
-  public HDFSEventListener(LogWriterI18n logger) {
-    this.logger = logger;
-  }
-  
-  @Override
-  public void close() {
-    senderStopped = true;
-  }
-  
-  @Override
-  public boolean processEvents(List<AsyncEvent> events) {
-    if (Hoplog.NOP_WRITE) {
-      return true;
-    }
-    
-    // The list of events that async queue receives are sorted at the
-    // bucket level. Events for multiple regions are concatenated together.
-    // Events for multiple buckets are sent which are concatenated
-    // one after the other for e.g.
-    //
-    // <Region1, Key1, bucket1>, <Region1, Key19, bucket1>, 
-    // <Region1, Key4, bucket2>, <Region1, Key6, bucket2>
-    // <Region2, Key1, bucket1>, <Region2, Key4, bucket1>
-    // ..
-    
-    Region previousRegion = null;
-    int prevBucketId = -1;
-    ArrayList<QueuedPersistentEvent> list = null;
-    boolean success = false;
-    try {
-      //Back off if we are experiencing failures
-      failureTracker.sleepIfRetry();
-      
-      HoplogOrganizer bucketOrganizer = null; 
-      for (AsyncEvent asyncEvent : events) {
-        if (senderStopped){
-          failureTracker.failure();
-          if (logger.fineEnabled()) {
-            logger.fine("HDFSEventListener.processEvents: Cache is closing down. Ignoring the batch of data.");
-          }
-          return false;
-        }
-        HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)asyncEvent;
-        Region region = hdfsEvent.getRegion();
-        
-        if (prevBucketId != hdfsEvent.getBucketId() || region != previousRegion){
-          if (prevBucketId != -1) {
-            bucketOrganizer.flush(list.iterator(), list.size());
-            success=true;
-            if (logger.fineEnabled()) {
-              logger.fine("Batch written to HDFS of size " + list.size() + " for region " + previousRegion);
-            }
-          }
-          bucketOrganizer = getOrganizer((PartitionedRegion) region, hdfsEvent.getBucketId());
-          // Bucket organizer can be null only when the bucket has moved. throw an exception so that the 
-          // batch is discarded. 
-          if (bucketOrganizer == null)
-            throw new BucketMovedException("Bucket moved. BucketId: " + hdfsEvent.getBucketId() +  " HDFSRegion: " + region.getName());
-          list = new  ArrayList<QueuedPersistentEvent>();
-        }
-        try {
-          //TODO:HDFS check if there is any serialization overhead
-          list.add(new SortedHDFSQueuePersistedEvent(hdfsEvent));
-        } catch (ClassNotFoundException e) {
-          //TODO:HDFS add localized string
-          logger.warning(new StringId(0, "Error while converting HDFSGatewayEvent to PersistedEventImpl."), e);
-          return false;
-        }
-        prevBucketId = hdfsEvent.getBucketId();
-        previousRegion = region;
-        
-      }
-      if (bucketOrganizer != null) {
-        bucketOrganizer.flush(list.iterator(), list.size());
-        success = true;
-        
-        if (logger.fineEnabled()) {
-          logger.fine("Batch written to HDFS of size " + list.size() + " for region " + previousRegion);
-        }
-      }
-    } catch (IOException e) {
-      logger.warning(LocalizedStrings.HOPLOG_FLUSH_FOR_BATCH_FAILED, e);
-      return false;
-    }
-    catch (ForceReattemptException e) {
-      if (logger.fineEnabled())
-        logger.fine(e);
-      return false;
-    }
-    catch(PrimaryBucketException e) {
-      //do nothing, the bucket is no longer primary so we shouldn't get the same
-      //batch next time.
-      if (logger.fineEnabled())
-        logger.fine(e);
-      return false;
-    }
-    catch(BucketMovedException e) {
-      //do nothing, the bucket is no longer primary so we shouldn't get the same
-      //batch next time.
-      if (logger.fineEnabled())
-        logger.fine(e);
-      return false;
-    }
-    catch (CacheClosedException e) {
-      if (logger.fineEnabled())
-        logger.fine(e);
-      // exit silently
-      return false;
-    } catch (InterruptedException e1) {
-      if (logger.fineEnabled())
-        logger.fine(e1);
-      return false;
-    } finally {
-      failureTracker.record(success);
-    }
-
-    return true;
-  }
-  
-  private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
-    BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
-    if (br == null) {
-      // got rebalanced or something
-      throw new PrimaryBucketException("Bucket region is no longer available " + bucketId + region);
-    }
-
-    return br.getHoplogOrganizer();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java
deleted file mode 100644
index 0860e75..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
-import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-
-/**
- * Current use of this class is limited to ignoring the Bulk DML operations. 
- * 
- *
- */
-public class HDFSEventQueueFilter implements GatewayEventFilter{
-  private LogWriterI18n logger;
-  
-  public HDFSEventQueueFilter(LogWriterI18n logger) {
-    this.logger = logger; 
-  }
-  @Override
-  public void close() {
-    
-  }
-
-  @Override
-  public boolean beforeEnqueue(GatewayQueueEvent event) {
-    Operation op = event.getOperation();
-    
-    
-    /* MergeGemXDHDFSToGFE - Disabled as it is gemxd specific 
-    if (op == Operation.BULK_DML_OP) {
-     // On accessors there are no parallel queues, so with the 
-     // current logic, isSerialWanEnabled function in LocalRegion 
-     // always returns true on an accessor. So when a bulk dml 
-     // op is fired on accessor, this behavior results in distribution 
-     // of the bulk dml operation to other members. To avoid putting 
-     // of this bulk dml in parallel queues, added this filter. This 
-     // is not the efficient way as the filters are used before inserting 
-     // in the queue. The bulk dmls should be blocked before they are distributed.
-     if (logger.fineEnabled())
-       logger.fine( "HDFSEventQueueFilter:beforeEnqueue: Disallowing insertion of a bulk DML in HDFS queue.");
-      return false;
-    }*/
-    
-    return true;
-  }
-
-  @Override
-  public boolean beforeTransmit(GatewayQueueEvent event) {
-   // No op
-   return true;
-  }
-
-  @Override
-  public void afterAcknowledgement(GatewayQueueEvent event) {
-    // No op
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java
deleted file mode 100644
index db99e7e..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.cache.EntryEvent;
-import com.gemstone.gemfire.internal.InternalDataSerializer;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.lru.Sizeable;
-import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
-import com.gemstone.gemfire.internal.offheap.StoredObject;
-import com.gemstone.gemfire.internal.offheap.annotations.Retained;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-
-
-/**
- * Gateway event extended for HDFS functionality 
- *
- */
-public class HDFSGatewayEventImpl extends GatewaySenderEventImpl {
-  
-  private static final long serialVersionUID = 4642852957292192406L;
-  protected transient boolean keyIsSerialized = false;
-  protected byte[] serializedKey = null; 
-  protected VersionTag versionTag; 
-  
-  public HDFSGatewayEventImpl(){
-  }
-  
-  @Retained
-  public HDFSGatewayEventImpl(EnumListenerEvent operation, EntryEvent event,
-      Object substituteValue)
-      throws IOException {
-    super(operation, event, substituteValue);
-    initializeHDFSGatewayEventObject(event);
-  }
-
-  @Retained
-  public HDFSGatewayEventImpl(EnumListenerEvent operation, EntryEvent event,
-      Object substituteValue, boolean initialize, int bucketId) throws IOException {
-    super(operation, event,substituteValue, initialize, bucketId);
-    initializeHDFSGatewayEventObject(event);
-  }
-
-  @Retained
-  public HDFSGatewayEventImpl(EnumListenerEvent operation, EntryEvent event,
-      Object substituteValue, boolean initialize) throws IOException {
-    super(operation, event, substituteValue, initialize);
-    initializeHDFSGatewayEventObject(event);
-  }
-
-  protected HDFSGatewayEventImpl(HDFSGatewayEventImpl offHeapEvent) {
-    super(offHeapEvent);
-    this.keyIsSerialized = offHeapEvent.keyIsSerialized;
-    this.serializedKey = offHeapEvent.serializedKey;
-    this.versionTag = offHeapEvent.versionTag;
-  }
-  
-  @Override
-  protected GatewaySenderEventImpl makeCopy() {
-    return new HDFSGatewayEventImpl(this);
-  }
-
-  private void initializeHDFSGatewayEventObject(EntryEvent event)
-      throws IOException {
-
-    serializeKey();
-    versionTag = ((EntryEventImpl)event).getVersionTag();
-    if (versionTag != null && versionTag.getMemberID() == null) {
-      versionTag.setMemberID(((LocalRegion)getRegion()).getVersionMember());
-    }
-  }
-
-  private void serializeKey() throws IOException {
-    if (!keyIsSerialized && isInitialized())
-    {
-      this.serializedKey = CacheServerHelper.serialize(this.key);
-      keyIsSerialized = true;
-    } 
-  }
-  /**MergeGemXDHDFSToGFE This function needs to enabled if similar functionality is added to gatewaysendereventimpl*/
-  /*@Override
-  protected StoredObject obtainOffHeapValueBasedOnOp(EntryEventImpl event,
-      boolean hasNonWanDispatcher) {
-    return  event.getOffHeapNewValue();
-  }*/
-  
-  /**MergeGemXDHDFSToGFE This function needs to enabled if similar functionality is added to gatewaysendereventimpl*/
-  /*@Override
-  protected Object obtainHeapValueBasedOnOp(EntryEventImpl event,
-      boolean hasNonWanDispatcher) {
-    return   event.getRawNewValue(shouldApplyDelta());
-  }*/
-  
-  @Override
-  protected boolean shouldApplyDelta() {
-    return true;
-  }
-
-  
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    super.toData(out);
-    DataSerializer.writeObject(this.versionTag, out);
-    
-  }
-  
-  @Override
-  protected void serializeKey(DataOutput out) throws IOException {
-    DataSerializer.writeByteArray((byte[])this.serializedKey, out);
-  }
-  
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    super.fromData(in);
-    this.versionTag = (VersionTag)DataSerializer.readObject(in);
-  }
-  
-  @Override
-  protected void deserializeKey(DataInput in) throws IOException,
-    ClassNotFoundException {
-    this.serializedKey = DataSerializer.readByteArray(in);
-    this.key = BlobHelper.deserializeBlob(this.serializedKey,
-        InternalDataSerializer.getVersionForDataStreamOrNull(in), null);
-    keyIsSerialized = true;
-  }
-
-  @Override
-  public int getDSFID() {
-    
-    return HDFS_GATEWAY_EVENT_IMPL;
-  }
-  public byte[] getSerializedKey() {
-    
-    return this.serializedKey;
-  }
-  
-  public VersionTag getVersionTag() {
-    
-    return this.versionTag;
-  }
-  
-  /**
-   * Returns the size on HDFS of this event  
-   * @param writeOnly
-   */
-  public int getSizeOnHDFSInBytes(boolean writeOnly) {
-  
-    if (writeOnly)
-      return UnsortedHDFSQueuePersistedEvent.getSizeInBytes(this.serializedKey.length,  
-          getSerializedValueSize(), this.versionTag);
-    else
-      return SortedHDFSQueuePersistedEvent.getSizeInBytes(this.serializedKey.length,  
-          getSerializedValueSize(), this.versionTag);
-  
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
deleted file mode 100644
index 740a607..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionAttributes;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-
-/**
- * Contains utility functions
- *
- *
- */
-public class HDFSIntegrationUtil {
-
-  public static <K, V> AsyncEventQueue createDefaultAsyncQueueForHDFS(Cache cache, boolean writeOnly, String regionPath) {
-    return createAsyncQueueForHDFS(cache, regionPath, writeOnly, null);
-  }
-
-  private static AsyncEventQueue createAsyncQueueForHDFS(Cache cache, String regionPath, boolean writeOnly,
-      HDFSStore configView) {
-    LogWriterI18n logger = cache.getLoggerI18n();
-    String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
-
-    if (configView == null) {
-      configView = new HDFSStoreFactoryImpl(cache).getConfigView();
-    }
-    
-
-    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
-    factory.setBatchSize(configView.getBatchSize());
-    factory.setPersistent(configView.getBufferPersistent());
-    factory.setDiskStoreName(configView.getDiskStoreName());
-    factory.setMaximumQueueMemory(configView.getMaxMemory());
-    factory.setBatchTimeInterval(configView.getBatchInterval());
-    factory.setDiskSynchronous(configView.getSynchronousDiskWrite());
-    factory.setDispatcherThreads(configView.getDispatcherThreads());
-    factory.setParallel(true);
-    factory.addGatewayEventFilter(new HDFSEventQueueFilter(logger));
-    ((AsyncEventQueueFactoryImpl) factory).setBucketSorted(!writeOnly);
-    ((AsyncEventQueueFactoryImpl) factory).setIsHDFSQueue(true);
-
-    AsyncEventQueue asyncQ = null;
-
-    if (!writeOnly)
-      asyncQ = factory.create(defaultAsyncQueueName, new HDFSEventListener(cache.getLoggerI18n()));
-    else
-      asyncQ = factory.create(defaultAsyncQueueName, new HDFSWriteOnlyStoreEventListener(cache.getLoggerI18n()));
-
-    logger.fine("HDFS: async queue created for HDFS. Id: " + asyncQ.getId() + ". Disk store: "
-        + asyncQ.getDiskStoreName() + ". Batch size: " + asyncQ.getBatchSize() + ". bucket sorted:  " + !writeOnly);
-    return asyncQ;
-
-  }
-
-  public static void createAndAddAsyncQueue(String regionPath, RegionAttributes regionAttributes, Cache cache) {
-    if (!regionAttributes.getDataPolicy().withHDFS()) {
-      return;
-    }
-
-    String leaderRegionPath = getLeaderRegionPath(regionPath, regionAttributes, cache);
-
-    String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(leaderRegionPath);
-    if (cache.getAsyncEventQueue(defaultAsyncQueueName) == null) {
-      if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null
-          && !(regionAttributes.getPartitionAttributes().getLocalMaxMemory() == 0)) {
-        HDFSStore store = ((GemFireCacheImpl) cache).findHDFSStore(regionAttributes.getHDFSStoreName());
-        if (store == null) {
-          throw new IllegalStateException(
-              LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND.toLocalizedString(regionAttributes.getHDFSStoreName()));
-        }
-        HDFSIntegrationUtil
-            .createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), store);
-      }
-    }
-  }
-
-  private static String getLeaderRegionPath(String regionPath, RegionAttributes regionAttributes, Cache cache) {
-    String colocated;
-    while (regionAttributes.getPartitionAttributes() != null
-        && (colocated = regionAttributes.getPartitionAttributes().getColocatedWith()) != null) {
-      // Do not waitOnInitialization() for PR
-      GemFireCacheImpl gfc = (GemFireCacheImpl) cache;
-      Region colocatedRegion = gfc.getPartitionedRegion(colocated, false);
-      if (colocatedRegion == null) {
-        Assert.fail("Could not find parent region " + colocated + " for " + regionPath);
-      }
-      regionAttributes = colocatedRegion.getAttributes();
-      regionPath = colocatedRegion.getFullPath();
-    }
-    return regionPath;
-  }
-
-}


[42/63] [abbrv] incubator-geode git commit: GEODE-1324: Convert SessionReplicationIntegrationJUnitTest to use TemporaryFolder

Posted by kl...@apache.org.
GEODE-1324: Convert SessionReplicationIntegrationJUnitTest to use TemporaryFolder


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b2f8e594
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b2f8e594
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b2f8e594

Branch: refs/heads/feature/GEODE-1276
Commit: b2f8e594cab721755c7c47cf1e609775a81c3340
Parents: a20efb9
Author: Jens Deppe <jd...@pivotal.io>
Authored: Fri Apr 29 09:34:07 2016 -0700
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Fri Apr 29 10:47:20 2016 -0700

----------------------------------------------------------------------
 .../SessionReplicationIntegrationJUnitTest.java | 30 +++++++-------------
 1 file changed, 10 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2f8e594/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/internal/filter/SessionReplicationIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/internal/filter/SessionReplicationIntegrationJUnitTest.java b/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/internal/filter/SessionReplicationIntegrationJUnitTest.java
index 004f9fe..ef89a37 100644
--- a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/internal/filter/SessionReplicationIntegrationJUnitTest.java
+++ b/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/internal/filter/SessionReplicationIntegrationJUnitTest.java
@@ -41,8 +41,12 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 import org.apache.jasper.servlet.JspServlet;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletHolder;
@@ -68,23 +72,13 @@ public class SessionReplicationIntegrationJUnitTest {
 
   private FilterHolder filterHolder;
 
-  private static final File tmpdir;
-
-  private static final String gemfire_log;
-
-  static {
-    // Create a per-user scratch directory
-    tmpdir = new File(System.getProperty("java.io.tmpdir"), // TODO: use junit rule TemporaryFolder
-        "gemfire_modules-" + System.getProperty("user.name"));
-    tmpdir.mkdirs();
-    tmpdir.deleteOnExit();
-
-    gemfire_log = tmpdir.getPath() +
-        System.getProperty("file.separator") + "gemfire_modules.log";
-  }
+  @Rule
+  public TemporaryFolder tmpdir = new TemporaryFolder();
 
   @Before
   public void setUp() throws Exception {
+    File gemfireLogFile = new File(tmpdir.newFolder(), "gemfire_modules.log");
+
     request = HttpTester.newRequest();
 
     tester = new MyServletTester();
@@ -92,7 +86,7 @@ public class SessionReplicationIntegrationJUnitTest {
 
     filterHolder = tester.addFilter(SessionCachingFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST));
     filterHolder.setInitParameter("gemfire.property.mcast-port", "0");
-    filterHolder.setInitParameter("gemfire.property.log-file", gemfire_log);
+    filterHolder.setInitParameter("gemfire.property.log-file", gemfireLogFile.getAbsolutePath());
     filterHolder.setInitParameter("cache-type", "peer-to-peer");
 
     servletHolder = tester.addServlet(BasicServlet.class, "/hello");
@@ -107,10 +101,6 @@ public class SessionReplicationIntegrationJUnitTest {
 
   @After
   public void tearDown() throws Exception {
-//    if (tester.isStarted()) {
-//      ContextManager.getInstance().removeContext(
-//          servletHolder.getServlet().getServletConfig().getServletContext());
-//    }
     tester.stop();
   }
 
@@ -1467,7 +1457,7 @@ public class SessionReplicationIntegrationJUnitTest {
     ServletHolder jspHolder = tester.addServlet(JspServlet.class, "/test/*");
     jspHolder.setInitOrder(1);
 
-    jspHolder.setInitParameter("scratchdir", tmpdir.getPath());
+    jspHolder.setInitParameter("scratchdir", tmpdir.toString());
 
     Callback c_1 = new Callback() {
       @Override


[56/63] [abbrv] incubator-geode git commit: GEODE-1240: Changed the test to use Awaitility with a maximum timeout period. This might work better than the time sensitive conditionals that this test uses.

Posted by kl...@apache.org.
GEODE-1240: Changed the test to use Awaitility with a maximum timeout period. This might work better than the time sensitive conditionals that this test uses.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1aa08cd5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1aa08cd5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1aa08cd5

Branch: refs/heads/feature/GEODE-1276
Commit: 1aa08cd54d2023ac9101c779c1e52024a2aa7967
Parents: b893abe
Author: Udo Kohlmeyer <uk...@pivotal.io>
Authored: Mon May 2 14:51:32 2016 +1000
Committer: Udo Kohlmeyer <uk...@pivotal.io>
Committed: Tue May 3 10:38:07 2016 +1000

----------------------------------------------------------------------
 .../cache30/ClientMembershipDUnitTest.java      | 347 ++++++++-----------
 1 file changed, 141 insertions(+), 206 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1aa08cd5/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
index f8e036b..4652e74 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
@@ -81,23 +81,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
   private void waitForAcceptsInProgressToBe(final int target)
       throws Exception {
-//    WaitCriterion ev = new WaitCriterion() {
-//      String excuse;
-//
-//      public boolean done() {
-//        int actual = getAcceptsInProgress();
-//        if (actual == getAcceptsInProgress()) {
-//          return true;
-//        }
-//        excuse = "accepts in progress (" + actual + ") never became " + target;
-//        return false;
-//      }
-//
-//      public String description() {
-//        return excuse;
-//      }
-//    };
-    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100,TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
         .until(() -> {
           int actual = getAcceptsInProgress();
           if (actual == getAcceptsInProgress()) {
@@ -105,7 +89,6 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           }
           return false;
         });
-//    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
 
   protected int getAcceptsInProgress() {
@@ -156,20 +139,20 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         assertEquals(0, getAcceptsInProgress());
 
         System.out.println("creating mean socket");
-        vm0.invoke(createMeanSocket);
+        vm0.invoke("Connect to server with socket", () -> createMeanSocket);
         try {
           System.out.println("waiting to see it connect on server");
           waitForAcceptsInProgressToBe(1);
         } finally {
           System.out.println("closing mean socket");
-          vm0.invoke(closeMeanSocket);
+          vm0.invoke("close mean socket", () -> closeMeanSocket);
         }
         System.out.println("waiting to see accept to go away on server");
         waitForAcceptsInProgressToBe(0);
 
         // now try it without a close. Server should timeout the mean connect
         System.out.println("creating mean socket 2");
-        vm0.invoke(createMeanSocket);
+        vm0.invoke("Connect to server with socket", () -> createMeanSocket);
         try {
           System.out.println("waiting to see it connect on server 2");
           waitForAcceptsInProgressToBe(1);
@@ -177,7 +160,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           waitForAcceptsInProgressToBe(0);
         } finally {
           System.out.println("closing mean socket 2");
-          vm0.invoke(closeMeanSocket);
+          vm0.invoke("close mean socket", () -> closeMeanSocket);
         }
 
         //       SerializableRunnable denialOfService = new CacheSerializableRunnable("Do lots of connects") {
@@ -247,28 +230,25 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final boolean[] isClient = new boolean[3];
 
     ClientMembershipListener listener = new ClientMembershipListener() {
-      public synchronized void memberJoined(ClientMembershipEvent event) {
+      public void memberJoined(ClientMembershipEvent event) {
         fired[JOINED] = true;
         member[JOINED] = event.getMember();
         memberId[JOINED] = event.getMemberId();
         isClient[JOINED] = event.isClient();
-        notify();
       }
 
-      public synchronized void memberLeft(ClientMembershipEvent event) {
+      public void memberLeft(ClientMembershipEvent event) {
         fired[LEFT] = true;
         member[LEFT] = event.getMember();
         memberId[LEFT] = event.getMemberId();
         isClient[LEFT] = event.isClient();
-        notify();
       }
 
-      public synchronized void memberCrashed(ClientMembershipEvent event) {
+      public void memberCrashed(ClientMembershipEvent event) {
         fired[CRASHED] = true;
         member[CRASHED] = event.getMember();
         memberId[CRASHED] = event.getMemberId();
         isClient[CRASHED] = event.isClient();
-        notify();
       }
     };
     ClientMembership.registerClientMembershipListener(listener);
@@ -276,11 +256,12 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test JOIN for server
     DistributedMember serverJoined = new TestDistributedMember("serverJoined");
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized (listener) {
-      if (!fired[JOINED]) {
-        listener.wait(2000);
-      }
-    }
+
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED];
+        });
+
     assertTrue(fired[JOINED]);
     assertEquals(serverJoined, member[JOINED]);
     assertEquals(serverJoined.getId(), memberId[JOINED]);
@@ -296,11 +277,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test JOIN for client
     DistributedMember clientJoined = new TestDistributedMember("clientJoined");
     InternalClientMembership.notifyJoined(clientJoined, CLIENT);
-    synchronized (listener) {
-      if (!fired[JOINED]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED];
+        });
+
     assertTrue(fired[JOINED]);
     assertEquals(clientJoined, member[JOINED]);
     assertEquals(clientJoined.getId(), memberId[JOINED]);
@@ -316,11 +297,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test LEFT for server
     DistributedMember serverLeft = new TestDistributedMember("serverLeft");
     InternalClientMembership.notifyLeft(serverLeft, SERVER);
-    synchronized (listener) {
-      if (!fired[LEFT]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[LEFT];
+        });
+
     assertFalse(fired[JOINED]);
     assertNull(memberId[JOINED]);
     assertFalse(isClient[JOINED]);
@@ -336,11 +317,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test LEFT for client
     DistributedMember clientLeft = new TestDistributedMember("clientLeft");
     InternalClientMembership.notifyLeft(clientLeft, CLIENT);
-    synchronized (listener) {
-      if (!fired[LEFT]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[LEFT];
+        });
+
     assertFalse(fired[JOINED]);
     assertNull(memberId[JOINED]);
     assertFalse(isClient[JOINED]);
@@ -356,11 +337,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test CRASHED for server
     DistributedMember serverCrashed = new TestDistributedMember("serverCrashed");
     InternalClientMembership.notifyCrashed(serverCrashed, SERVER);
-    synchronized (listener) {
-      if (!fired[CRASHED]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[CRASHED];
+        });
+
     assertFalse(fired[JOINED]);
     assertNull(memberId[JOINED]);
     assertFalse(isClient[JOINED]);
@@ -376,11 +357,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test CRASHED for client
     DistributedMember clientCrashed = new TestDistributedMember("clientCrashed");
     InternalClientMembership.notifyCrashed(clientCrashed, CLIENT);
-    synchronized (listener) {
-      if (!fired[CRASHED]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[CRASHED];
+        });
+
     assertFalse(fired[JOINED]);
     assertNull(memberId[JOINED]);
     assertFalse(isClient[JOINED]);
@@ -423,12 +404,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     getSystem();
 
     ClientMembershipListener listener = new ClientMembershipListener() {
-      public synchronized void memberJoined(ClientMembershipEvent event) {
+      public void memberJoined(ClientMembershipEvent event) {
         fired[0] = true;
         member[0] = event.getMember();
         memberId[0] = event.getMemberId();
         isClient[0] = event.isClient();
-        notify();
       }
 
       public void memberLeft(ClientMembershipEvent event) {
@@ -442,11 +422,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // fire event to make sure listener is registered
     DistributedMember clientJoined = new TestDistributedMember("clientJoined");
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized (listener) {
-      if (!fired[0]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED];
+        });
+
     assertTrue(fired[0]);
     assertEquals(clientJoined, member[0]);
     assertEquals(clientJoined.getId(), memberId[0]);
@@ -460,9 +440,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // unregister and verify listener is not notified
     ClientMembership.unregisterClientMembershipListener(listener);
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized (listener) {
-      listener.wait(20);
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS)
+        .until(() -> {
+          return true;
+        });
+
     assertFalse(fired[0]);
     assertNull(member[0]);
     assertNull(memberId[0]);
@@ -482,7 +464,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     for (int i = 0; i < NUM_LISTENERS; i++) {
       final int whichListener = i;
       listeners[i] = new ClientMembershipListener() {
-        public synchronized void memberJoined(ClientMembershipEvent event) {
+        public void memberJoined(ClientMembershipEvent event) {
           assertFalse(fired[whichListener]);
           assertNull(member[whichListener]);
           assertNull(memberId[whichListener]);
@@ -491,7 +473,6 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           member[whichListener] = event.getMember();
           memberId[whichListener] = event.getMemberId();
           isClient[whichListener] = event.isClient();
-          notify();
         }
 
         public void memberLeft(ClientMembershipEvent event) {
@@ -823,13 +804,8 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     DistributedMember test = new TestDistributedMember("test");
     InternalClientMembership.notifyJoined(test, SERVER);
 
-    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
-        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
-//    synchronized (listener) {
-//      if (!fired[JOINED] && !fired[CRASHED]) {
-//        listener.wait(2000);
-//      }
-//    }
+    Awaitility.await().pollInterval(50, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .pollDelay(50, TimeUnit.MILLISECONDS).until(() -> fired[JOINED] || fired[CRASHED]);
 
     assertTrue(fired[JOINED]);
     assertEquals(test, member[JOINED]);
@@ -863,13 +839,8 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       Assert.fail("While creating Region on Edge", ex);
     }
 
-    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
-        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
-//    synchronized(listener) {
-//      if (!fired[JOINED] && !fired[CRASHED]) {
-//        listener.wait(60 * 1000);
-//      }
-//    }
+    Awaitility.await().pollInterval(50, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .pollDelay(50, TimeUnit.MILLISECONDS).until(() -> fired[JOINED] || fired[CRASHED]);
 
     System.out.println("[testClientMembershipEventsInClient] assert client detected server join");
 
@@ -900,14 +871,8 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     vm0.invoke("Stop BridgeServer", () -> stopBridgeServers(getCache()));
 
-//    synchronized (listener) {
-//      if (!fired[JOINED] && !fired[CRASHED]) {
-//        listener.wait(60 * 1000);
-//      }
-//    }
-
-    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
-        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
+    Awaitility.await().pollInterval(50, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .pollDelay(50, TimeUnit.MILLISECONDS).until(() -> fired[JOINED] || fired[CRASHED]);
 
     System.out.println("[testClientMembershipEventsInClient] assert client detected server departure");
     assertFalse(fired[JOINED]);
@@ -936,14 +901,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         fail("Failed to start CacheServer on VM1: " + e.getMessage());
       }
     });
-//    synchronized (listener) {
-//      if (!fired[JOINED] && !fired[CRASHED]) {
-//        listener.wait(60 * 1000);
-//      }
-//    }
 
-    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
-        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
+    Awaitility.await().pollInterval(50, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .pollDelay(50, TimeUnit.MILLISECONDS).until(() -> fired[JOINED] || fired[CRASHED]);
 
     System.out.println("[testClientMembershipEventsInClient] assert client detected server recovery");
     assertTrue(fired[JOINED]);
@@ -1041,11 +1001,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     System.out.println("[testClientMembershipEventsInServer] sanity check");
     DistributedMember test = new TestDistributedMember("test");
     InternalClientMembership.notifyJoined(test, CLIENT);
-    synchronized (listener) {
-      if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED] || fired[LEFT] || fired[CRASHED];
+        });
+
     assertTrue(fired[JOINED]);
     assertEquals(test, member[JOINED]);
     assertEquals(test.getId(), memberId[JOINED]);
@@ -1083,11 +1043,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     DistributedMember clientMember = (DistributedMember) vm0.invoke(createConnectionPool);
     String clientMemberId = clientMember.toString();
 
-    synchronized (listener) {
-      if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
-        listener.wait(60000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED] || fired[LEFT] || fired[CRASHED];
+        });
 
     System.out.println("[testClientMembershipEventsInServer] assert server detected client join");
     assertTrue(fired[JOINED]);
@@ -1121,11 +1080,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     });
 
-    synchronized (listener) {
-      if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
-        listener.wait(60000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED] || fired[LEFT] || fired[CRASHED];
+        });
 
     System.out.println("[testClientMembershipEventsInServer] assert server detected client left");
     assertFalse(fired[JOINED]);
@@ -1145,11 +1103,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // reconnect bridge client to test for crashed event
     clientMemberId = vm0.invoke(createConnectionPool).toString();
 
-    synchronized (listener) {
-      if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
-        listener.wait(60000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED] || fired[LEFT] || fired[CRASHED];
+        });
 
     System.out.println("[testClientMembershipEventsInServer] assert server detected client re-join");
     assertTrue(fired[JOINED]);
@@ -1183,11 +1140,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         }
       });
 
-      synchronized (listener) {
-        if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
-          listener.wait(60000);
-        }
-      }
+      Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+          .until(() -> {
+            return fired[JOINED] || fired[LEFT] || fired[CRASHED];
+          });
 
       System.out.println("[testClientMembershipEventsInServer] assert server detected client crashed");
       assertFalse(fired[JOINED]);
@@ -1232,7 +1188,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     // create and register ClientMembershipListener in controller vm...
     ClientMembershipListener listener = new ClientMembershipListener() {
-      public synchronized void memberJoined(ClientMembershipEvent event) {
+      public void memberJoined(ClientMembershipEvent event) {
         assertFalse(fired[JOINED]);
         assertNull(member[JOINED]);
         assertNull(memberId[JOINED]);
@@ -1241,13 +1197,12 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         member[JOINED] = event.getMember();
         memberId[JOINED] = event.getMemberId();
         isClient[JOINED] = event.isClient();
-        notifyAll();
       }
 
-      public synchronized void memberLeft(ClientMembershipEvent event) {
+      public void memberLeft(ClientMembershipEvent event) {
       }
 
-      public synchronized void memberCrashed(ClientMembershipEvent event) {
+      public void memberCrashed(ClientMembershipEvent event) {
       }
     };
     ClientMembership.registerClientMembershipListener(listener);
@@ -1262,11 +1217,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // assert that event is fired while connected
     DistributedMember serverJoined = new TestDistributedMember("serverJoined");
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized (listener) {
-      if (!fired[JOINED]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED];
+        });
     assertTrue(fired[JOINED]);
     assertEquals(serverJoined, member[JOINED]);
     assertEquals(serverJoined.getId(), memberId[JOINED]);
@@ -1277,9 +1231,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     disconnectFromDS();
 
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized (listener) {
-      listener.wait(20);
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS)
+        .until(() -> {
+          return true;
+        });
+
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
     assertNull(memberId[JOINED]);
@@ -1292,11 +1248,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertTrue(sys.isConnected());
 
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized (listener) {
-      if (!fired[JOINED]) {
-        listener.wait(2000);
-      }
-    }
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
+          return fired[JOINED];
+        });
+
     assertTrue(fired[JOINED]);
     assertEquals(serverJoined, member[JOINED]);
     assertEquals(serverJoined.getId(), memberId[JOINED]);
@@ -1360,23 +1316,17 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     {
       final int expectedClientCount = clientMemberIds.size();
-      WaitCriterion wc = new WaitCriterion() {
-        public String description() {
-          return "wait for clients";
-        }
-
-        public boolean done() {
-          Map connectedClients = InternalClientMembership.getConnectedClients(false);
-          if (connectedClients == null) {
-            return false;
-          }
-          if (connectedClients.size() != expectedClientCount) {
-            return false;
-          }
-          return true;
-        }
-      };
-      Wait.waitForCriterion(wc, 30000, 100, false);
+      Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+          .until(() -> {
+            Map connectedClients = InternalClientMembership.getConnectedClients(false);
+            if (connectedClients == null) {
+              return false;
+            }
+            if (connectedClients.size() != expectedClientCount) {
+              return false;
+            }
+            return true;
+          });
     }
 
     Map connectedClients = InternalClientMembership.getConnectedClients(false);
@@ -1406,32 +1356,30 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
-      vm.invoke(new CacheSerializableRunnable("Create bridge server") {
-        public void run2() throws CacheException {
-          // create BridgeServer in controller vm...
-          System.out.println("[testGetConnectedServers] Create BridgeServer");
-          getSystem();
-          AttributesFactory factory = new AttributesFactory();
-          factory.setScope(Scope.LOCAL);
-          Region region = createRegion(name + "_" + whichVM, factory.create());
-          assertNotNull(region);
-          assertNotNull(getRootRegion().getSubregion(name + "_" + whichVM));
-          region.put("KEY-1", "VAL-1");
+      vm.invoke("Create bridge server", () -> {
+        // create BridgeServer in controller vm...
+        System.out.println("[testGetConnectedServers] Create BridgeServer");
+        getSystem();
+        AttributesFactory factory = new AttributesFactory();
+        factory.setScope(Scope.LOCAL);
+        Region region = createRegion(name + "_" + whichVM, factory.create());
+        assertNotNull(region);
+        assertNotNull(getRootRegion().getSubregion(name + "_" + whichVM));
+        region.put("KEY-1", "VAL-1");
 
-          try {
-            testGetConnectedServers_port = startBridgeServer(0);
-          } catch (IOException e) {
-            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
-            fail("startBridgeServer threw IOException " + e.getMessage());
-          }
+        try {
+          testGetConnectedServers_port = startBridgeServer(0);
+        } catch (IOException e) {
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
+          fail("startBridgeServer threw IOException " + e.getMessage());
+        }
 
-          assertTrue(testGetConnectedServers_port != 0);
+        assertTrue(testGetConnectedServers_port != 0);
 
-          System.out.println("[testGetConnectedServers] port=" +
-              ports[whichVM]);
-          System.out.println("[testGetConnectedServers] serverMemberId=" +
-              getDistributedMember());
-        }
+        System.out.println("[testGetConnectedServers] port=" +
+            ports[whichVM]);
+        System.out.println("[testGetConnectedServers] serverMemberId=" +
+            getDistributedMember());
       });
       ports[whichVM] = vm.invoke("getTestGetConnectedServers_port",
           () -> ClientMembershipDUnitTest.getTestGetConnectedServers_port());
@@ -1459,14 +1407,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       region.get("KEY-1");
     }
 
-    {
-      final int expectedVMCount = host.getVMCount();
-      WaitCriterion wc = new WaitCriterion() {
-        public String description() {
-          return "wait for pools and servers";
-        }
-
-        public boolean done() {
+    final int expectedVMCount = host.getVMCount();
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100, TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
+        .until(() -> {
           if (PoolManager.getAll().size() != expectedVMCount) {
             return false;
           }
@@ -1478,15 +1421,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
             return false;
           }
           return true;
-        }
-      };
-      Wait.waitForCriterion(wc, 60000, 100, false);
-    }
-
-    {
-      assertEquals(host.getVMCount(), PoolManager.getAll().size());
+        });
 
-    }
+    assertEquals(host.getVMCount(), PoolManager.getAll().size());
 
     Map connectedServers = InternalClientMembership.getConnectedServers();
     assertNotNull(connectedServers);
@@ -1583,15 +1520,13 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
-      vm.invoke(new CacheSerializableRunnable("Create bridge server") {
-        public void run2() throws CacheException {
-          Map clients = InternalClientMembership.getConnectedClients(true);
-          assertNotNull(clients);
-          testGetNotifiedClients_clientCount = clients.size();
-          if (testGetNotifiedClients_clientCount > 0) {
-            // assert that the clientMemberId matches
-            assertEquals(clientMemberId, clients.keySet().iterator().next());
-          }
+      vm.invoke("Create bridge server", () -> {
+        Map clients = InternalClientMembership.getConnectedClients(true);
+        assertNotNull(clients);
+        testGetNotifiedClients_clientCount = clients.size();
+        if (testGetNotifiedClients_clientCount > 0) {
+          // assert that the clientMemberId matches
+          assertEquals(clientMemberId, clients.keySet().iterator().next());
         }
       });
       clientCounts[whichVM] = vm.invoke("getTestGetNotifiedClients_clientCount",


[37/63] [abbrv] incubator-geode git commit: GEODE-17: enhance the GeodeSecurityUtil and review changes

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
new file mode 100644
index 0000000..cc6af0e
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.management.internal.security;
+
+import java.util.Properties;
+
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.security.CustomAuthRealm;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+
+import org.apache.shiro.SecurityUtils;
+import org.apache.shiro.mgt.DefaultSecurityManager;
+import org.apache.shiro.mgt.SecurityManager;
+import org.apache.shiro.realm.Realm;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+/**
+ * this test and ShiroUtilWithIniFileJunitTest uses the same test body, but initialize the SecurityUtils differently.
+ * If you change shiro-ini.json, remmber to change the shiro.ini to match the changes as well.
+ */
+
+@Category(UnitTest.class)
+public class GeodeSecurityUtilCustomRealmJUnitTest extends GeodeSecurityUtilWithIniFileJUnitTest {
+  @BeforeClass
+  public static void beforeClass() throws Exception{
+    Properties properties = new Properties();
+    properties.put(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME, JSONAuthorization.class.getName() + ".create");
+    properties.put(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME, JSONAuthorization.class.getName() + ".create");
+    JSONAuthorization.setUpWithJsonFile("shiro-ini.json");
+
+    Realm realm = new CustomAuthRealm(properties);
+    SecurityManager securityManager = new DefaultSecurityManager(realm);
+    SecurityUtils.setSecurityManager(securityManager);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
new file mode 100644
index 0000000..4ad390d
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.management.internal.security;
+
+import static org.assertj.core.api.Assertions.*;
+
+import com.gemstone.gemfire.cache.operations.OperationContext;
+import com.gemstone.gemfire.security.GemFireSecurityException;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+
+import org.apache.shiro.SecurityUtils;
+import org.apache.shiro.config.IniSecurityManagerFactory;
+import org.apache.shiro.mgt.SecurityManager;
+import org.apache.shiro.util.ThreadContext;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * this test and ShiroUtilCustomRealmJUunitTest uses the same test body, but initialize the SecurityUtils differently.
+ * If you change shiro.ini, remmber to change the shiro-ini.json to match the changes as well.
+ */
+@Category(UnitTest.class)
+public class GeodeSecurityUtilWithIniFileJUnitTest {
+  @BeforeClass
+  public static void beforeClass() throws Exception{
+    ThreadContext.remove();
+    IniSecurityManagerFactory factory = new IniSecurityManagerFactory("classpath:shiro.ini");
+    SecurityManager securityManager = factory.getInstance();
+    SecurityUtils.setSecurityManager(securityManager);
+  }
+
+  @AfterClass
+  public static void afterClass(){
+    ThreadContext.remove();
+  }
+
+  @Test
+  public void testRoot(){
+    GeodeSecurityUtil.login("root", "secret");
+    GeodeSecurityUtil.authorize(TestCommand.none);
+    GeodeSecurityUtil.authorize(TestCommand.everyOneAllowed);
+    GeodeSecurityUtil.authorize(TestCommand.dataRead);
+    GeodeSecurityUtil.authorize(TestCommand.dataWrite);
+    GeodeSecurityUtil.authorize(TestCommand.regionARead);
+    GeodeSecurityUtil.authorize(TestCommand.regionAWrite);
+    GeodeSecurityUtil.authorize(TestCommand.clusterWrite);
+    GeodeSecurityUtil.authorize(TestCommand.clusterRead);
+  }
+
+  @Test
+  public void testGuest(){
+    GeodeSecurityUtil.login("guest", "guest");
+    GeodeSecurityUtil.authorize(TestCommand.none);
+    GeodeSecurityUtil.authorize(TestCommand.everyOneAllowed);
+
+    assertNotAuthorized(TestCommand.dataRead);
+    assertNotAuthorized(TestCommand.dataWrite);
+    assertNotAuthorized(TestCommand.regionARead);
+    assertNotAuthorized(TestCommand.regionAWrite);
+    assertNotAuthorized(TestCommand.clusterRead);
+    assertNotAuthorized(TestCommand.clusterWrite);
+    GeodeSecurityUtil.logout();
+  }
+
+  @Test
+  public void testRegionAReader(){
+    GeodeSecurityUtil.login("regionAReader", "password");
+    GeodeSecurityUtil.authorize(TestCommand.none);
+    GeodeSecurityUtil.authorize(TestCommand.everyOneAllowed);
+    GeodeSecurityUtil.authorize(TestCommand.regionARead);
+
+    assertNotAuthorized(TestCommand.regionAWrite);
+    assertNotAuthorized(TestCommand.dataRead);
+    assertNotAuthorized(TestCommand.dataWrite);
+    assertNotAuthorized(TestCommand.clusterRead);
+    assertNotAuthorized(TestCommand.clusterWrite);
+    GeodeSecurityUtil.logout();
+  }
+
+  @Test
+  public void testRegionAUser(){
+    GeodeSecurityUtil.login("regionAUser", "password");
+    GeodeSecurityUtil.authorize(TestCommand.none);
+    GeodeSecurityUtil.authorize(TestCommand.everyOneAllowed);
+    GeodeSecurityUtil.authorize(TestCommand.regionAWrite);
+    GeodeSecurityUtil.authorize(TestCommand.regionARead);
+
+    assertNotAuthorized(TestCommand.dataRead);
+    assertNotAuthorized(TestCommand.dataWrite);
+    assertNotAuthorized(TestCommand.clusterRead);
+    assertNotAuthorized(TestCommand.clusterWrite);
+    GeodeSecurityUtil.logout();
+  }
+
+  @Test
+  public void testDataReader(){
+    GeodeSecurityUtil.login("dataReader", "12345");
+    GeodeSecurityUtil.authorize(TestCommand.none);
+    GeodeSecurityUtil.authorize(TestCommand.everyOneAllowed);
+    GeodeSecurityUtil.authorize(TestCommand.regionARead);
+    GeodeSecurityUtil.authorize(TestCommand.dataRead);
+
+    assertNotAuthorized(TestCommand.regionAWrite);
+    assertNotAuthorized(TestCommand.dataWrite);
+    assertNotAuthorized(TestCommand.clusterRead);
+    assertNotAuthorized(TestCommand.clusterWrite);
+    GeodeSecurityUtil.logout();
+  }
+
+  @Test
+  public void testReader(){
+    GeodeSecurityUtil.login("reader", "12345");
+    GeodeSecurityUtil.authorize(TestCommand.none);
+    GeodeSecurityUtil.authorize(TestCommand.everyOneAllowed);
+    GeodeSecurityUtil.authorize(TestCommand.regionARead);
+    GeodeSecurityUtil.authorize(TestCommand.dataRead);
+    GeodeSecurityUtil.authorize(TestCommand.clusterRead);
+
+    assertNotAuthorized(TestCommand.regionAWrite);
+    assertNotAuthorized(TestCommand.dataWrite);
+    assertNotAuthorized(TestCommand.clusterWrite);
+    GeodeSecurityUtil.logout();
+  }
+
+  private void assertNotAuthorized(OperationContext context){
+    assertThatThrownBy(()-> GeodeSecurityUtil.authorize(context)).isInstanceOf(GemFireSecurityException.class).hasMessageContaining(context.toString());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GfshCommandsSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GfshCommandsSecurityTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GfshCommandsSecurityTest.java
index 56d7030..b5ef0a6 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GfshCommandsSecurityTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GfshCommandsSecurityTest.java
@@ -31,6 +31,8 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.result.ErrorResultData;
 import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+import org.apache.shiro.authz.permission.WildcardPermission;
 import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -122,7 +124,7 @@ public class GfshCommandsSecurityTest {
 
 
   private void runCommandsWithAndWithout(String permission) throws Exception{
-    List<TestCommand> permitted = TestCommand.getCommandsOfPermission(permission);
+    List<TestCommand> permitted = TestCommand.getPermittedCommands(new WildcardPermission(permission));
     for(TestCommand clusterRead:permitted) {
       LogService.getLogger().info("Processing authorized command: "+clusterRead.getCommand());gfsh.executeCommand(clusterRead.getCommand());
       CommandResult result = (CommandResult) gfsh.getResult();
@@ -155,7 +157,7 @@ public class GfshCommandsSecurityTest {
       }
 
       assertEquals(ResultBuilder.ERRORCODE_UNAUTHORIZED, ((ErrorResultData) result.getResultData()).getErrorCode());
-      assertTrue(result.getContent().toString().contains(other.getPermission()));
+      assertTrue(result.getContent().toString().contains(other.getPermission().toString()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
index 48e0a39..83f4876 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
@@ -32,8 +32,6 @@ import javax.management.remote.JMXPrincipal;
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.operations.OperationContext;
-import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
-import com.gemstone.gemfire.cache.operations.OperationContext.Resource;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.security.AccessControl;
@@ -41,41 +39,16 @@ import com.gemstone.gemfire.security.AuthenticationFailedException;
 import com.gemstone.gemfire.security.Authenticator;
 import com.gemstone.gemfire.security.NotAuthorizedException;
 import com.gemstone.gemfire.util.test.TestUtil;
+
 import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
 
 public class JSONAuthorization implements AccessControl, Authenticator {
 
-  static class Permission {
-
-    private final Resource resource;
-    private final OperationCode operationCode;
-
-    Permission(Resource resource, OperationCode operationCode) {
-      this.resource = resource;
-      this.operationCode = operationCode;
-    }
-
-    public Resource getResource() {
-      return resource;
-    }
-
-    public OperationCode getOperationCode() {
-      return operationCode;
-    }
-
-    @Override
-    public String toString() {
-      String result = resource.toString() + ":" + operationCode.toString();
-      return result;
-    }
-  }
-
   public static class Role {
-    List<Permission> permissions = new ArrayList<>();
+    List<OperationContext> permissions = new ArrayList<>();
     String name;
-    List<String> regionNames = null; // when checking, if regionNames is null, that means all regions are allowed.
     String serverGroup;
   }
 
@@ -139,29 +112,18 @@ public class JSONAuthorization implements AccessControl, Authenticator {
       JSONObject obj = array.getJSONObject(i);
       Role role = new Role();
       role.name = obj.getString("name");
+      String regionNames = null;
+      if(obj.has("regions")) {
+        regionNames = obj.getString("regions");
+      }
       JSONArray ops = obj.getJSONArray("operationsAllowed");
       for (int j = 0; j < ops.length(); j++) {
         String[] parts = ops.getString(j).split(":");
-        Resource r = Resource.valueOf(parts[0]);
-        OperationCode op = parts.length > 1 ? OperationCode.valueOf(parts[1]) : OperationCode.READ;
-        role.permissions.add(new Permission(r, op));
-      }
-
-      if(obj.has("region")) {
-        if (role.regionNames == null) {
-          role.regionNames = new ArrayList<>();
-        }
-        role.regionNames.add(obj.getString("region"));
-      }
-
-      if(obj.has("regions")) {
-        JSONArray regions = obj.getJSONArray("regions");
-        if (role.regionNames == null) {
-          role.regionNames = new ArrayList<>();
-        }
-        for (int j = 0; j < regions.length(); j++) {
-          role.regionNames.add(regions.getString(j));
+        if(regionNames!=null) {
+          role.permissions.add(new ResourceOperationContext(parts[0], parts[1], regionNames));
         }
+        else
+          role.permissions.add(new ResourceOperationContext(parts[0], parts[1], "*"));
       }
 
       roleMap.put(role.name, role);
@@ -194,28 +156,15 @@ public class JSONAuthorization implements AccessControl, Authenticator {
     if(user == null)
       return false; // this user is not authorized to do anything
 
-    LogService.getLogger().info("Checking for permission " + context.getResource() + ":" + context.getOperationCode());
-
     // check if the user has this permission defined in the context
     for(Role role:acl.get(user.name).roles) {
-      for (Permission perm : role.permissions) {
-        if (context.getResource() == perm.getResource() && context.getOperationCode() == perm.getOperationCode()) {
-          LogService.getLogger().info("Found permission " + perm);
-
-          //no need to further check the rgionName
-          if(context.getRegionName()==null){
-            return true;
-          }
-
-          if(role.regionNames == null || role.regionNames.contains(context.getRegionName())){
-            // if regionName is null, i.e. all regions are allowed
-            return true;
-          }
+      for (OperationContext permitted : role.permissions) {
+        if (permitted.implies(context)) {
+          return true;
         }
       }
     }
 
-    LogService.getLogger().info("Did not find code " + context);
     return false;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
index b4b3f72..f07358b 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/LockServiceMBeanAuthorizationJUnitTest.java
@@ -81,10 +81,10 @@ public class LockServiceMBeanAuthorizationJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "data-user", password = "1234567")
   public void testNoAccess() throws Exception {
-    assertThatThrownBy(() -> lockServiceMBean.becomeLockGrantor()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> lockServiceMBean.fetchGrantorMember()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> lockServiceMBean.getMemberCount()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> lockServiceMBean.isDistributed()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> lockServiceMBean.listThreadsHoldingLock()).hasMessageContaining("CLUSTER:READ");
+    assertThatThrownBy(() -> lockServiceMBean.becomeLockGrantor()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> lockServiceMBean.fetchGrantorMember()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> lockServiceMBean.getMemberCount()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> lockServiceMBean.isDistributed()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> lockServiceMBean.listThreadsHoldingLock()).hasMessageContaining(TestCommand.clusterRead.toString());
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
index 2548d21..425c467 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ManagerMBeanAuthorizationJUnitTest.java
@@ -71,8 +71,8 @@ public class ManagerMBeanAuthorizationJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testSomeAccess() throws Exception {
-    assertThatThrownBy(() -> managerMXBean.start()).hasMessageContaining("CLUSTER:MANAGE");
-    assertThatThrownBy(() -> managerMXBean.getPulseURL()).hasMessageContaining("CLUSTER:WRITE");
+    assertThatThrownBy(() -> managerMXBean.start()).hasMessageContaining(TestCommand.clusterManage.toString());
+    assertThatThrownBy(() -> managerMXBean.getPulseURL()).hasMessageContaining(TestCommand.clusterWrite.toString());
     managerMXBean.isRunning();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/MemberMBeanSecurityJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/MemberMBeanSecurityJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/MemberMBeanSecurityJUnitTest.java
index c5ff369..8261d09 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/MemberMBeanSecurityJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/MemberMBeanSecurityJUnitTest.java
@@ -66,7 +66,7 @@ public class MemberMBeanSecurityJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "cluster-admin", password = "1234567")
   public void testClusterAdmin() throws Exception {
-    assertThatThrownBy(() -> bean.compactAllDiskStores()).hasMessageContaining("DATA:MANAGE");
+    assertThatThrownBy(() -> bean.compactAllDiskStores()).hasMessageContaining(TestCommand.dataManage.toString());
     bean.shutDownMember();
     bean.createManager();
     bean.fetchJvmThreads();
@@ -84,8 +84,8 @@ public class MemberMBeanSecurityJUnitTest {
   @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testDataAdmin() throws Exception {
     bean.compactAllDiskStores();
-    assertThatThrownBy(() -> bean.shutDownMember()).hasMessageContaining("CLUSTER:MANAGE");
-    assertThatThrownBy(() -> bean.createManager()).hasMessageContaining("CLUSTER:MANAGE");
+    assertThatThrownBy(() -> bean.shutDownMember()).hasMessageContaining(TestCommand.clusterManage.toString());
+    assertThatThrownBy(() -> bean.createManager()).hasMessageContaining(TestCommand.clusterManage.toString());
     bean.showJVMMetrics();
     bean.status();
   }
@@ -93,18 +93,18 @@ public class MemberMBeanSecurityJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "data-user", password = "1234567")
   public void testDataUser() throws Exception {
-    assertThatThrownBy(() -> bean.shutDownMember()).hasMessageContaining("CLUSTER:MANAGE");
-    assertThatThrownBy(() -> bean.createManager()).hasMessageContaining("CLUSTER:MANAGE");
-    assertThatThrownBy(() -> bean.compactAllDiskStores()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.fetchJvmThreads()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getName()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getDiskStores()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.hasGatewayReceiver()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.isCacheServer()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.isServer()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.listConnectedGatewayReceivers()).hasMessageContaining("CLUSTER:READ");
+    assertThatThrownBy(() -> bean.shutDownMember()).hasMessageContaining(TestCommand.clusterManage.toString());
+    assertThatThrownBy(() -> bean.createManager()).hasMessageContaining(TestCommand.clusterManage.toString());
+    assertThatThrownBy(() -> bean.compactAllDiskStores()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.fetchJvmThreads()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getName()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getDiskStores()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.hasGatewayReceiver()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.isCacheServer()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.isServer()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.listConnectedGatewayReceivers()).hasMessageContaining(TestCommand.clusterRead.toString());
     //assertThatThrownBy(() -> bean.processCommand("create region --name=Region_A")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.showJVMMetrics()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.status()).hasMessageContaining("CLUSTER:READ");
+    assertThatThrownBy(() -> bean.showJVMMetrics()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.status()).hasMessageContaining(TestCommand.clusterRead.toString());
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
new file mode 100644
index 0000000..318d327
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.management.internal.security;
+
+import static org.junit.Assert.*;
+
+import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
+import com.gemstone.gemfire.cache.operations.OperationContext.Resource;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+
+import org.apache.shiro.authz.permission.WildcardPermission;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(UnitTest.class)
+public class ResourceOperationContextJUnitTest {
+
+  private ResourceOperationContext context;
+
+  @Test
+  public void testEmptyConstructor(){
+    context = new ResourceOperationContext();
+    assertEquals(Resource.NULL, context.getResource());
+    assertEquals(OperationCode.NULL, context.getOperationCode());
+    assertEquals("NULL", context.getRegionName());
+  }
+
+  @Test
+  public void testIsPermission(){
+    context = new ResourceOperationContext();
+    assertTrue(context instanceof WildcardPermission);
+  }
+
+  @Test
+  public void testConstructor(){
+    context = new ResourceOperationContext(null, null, null);
+    assertEquals(Resource.NULL, context.getResource());
+    assertEquals(OperationCode.NULL, context.getOperationCode());
+    assertEquals("NULL", context.getRegionName());
+
+    context = new ResourceOperationContext(null, null);
+    assertEquals(Resource.NULL, context.getResource());
+    assertEquals(OperationCode.NULL, context.getOperationCode());
+    assertEquals("NULL", context.getRegionName());
+
+    context = new ResourceOperationContext("DATA", null, null);
+    assertEquals(Resource.DATA, context.getResource());
+    assertEquals(OperationCode.NULL, context.getOperationCode());
+    assertEquals("NULL", context.getRegionName());
+
+    context = new ResourceOperationContext(null, "MANAGE", "REGIONA");
+    assertEquals(Resource.NULL, context.getResource());
+    assertEquals(OperationCode.MANAGE, context.getOperationCode());
+    assertEquals("REGIONA", context.getRegionName());
+
+    context = new ResourceOperationContext("DATA", "MANAGE", "REGIONA");
+    assertEquals(Resource.DATA, context.getResource());
+    assertEquals(OperationCode.MANAGE, context.getOperationCode());
+    assertEquals("REGIONA", context.getRegionName());
+  }
+
+  @Test
+  public void testToString(){
+    context = new ResourceOperationContext();
+    assertEquals("[null]:[null]:[null]", context.toString());
+
+    context = new ResourceOperationContext("DATA", "MANAGE");
+    assertEquals("[data]:[manage]:[null]", context.toString());
+
+    context = new ResourceOperationContext("DATA", "MANAGE", "REGIONA");
+    assertEquals("[data]:[manage]:[regiona]", context.toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/TestCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/TestCommand.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/TestCommand.java
index c25044d..56eeeec 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/TestCommand.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/TestCommand.java
@@ -20,8 +20,24 @@ package com.gemstone.gemfire.management.internal.security;
 import java.util.ArrayList;
 import java.util.List;
 
+import com.gemstone.gemfire.cache.operations.OperationContext;
+
+import org.apache.shiro.authz.Permission;
+
 public class TestCommand {
-  
+  public static OperationContext none = null;
+  public static OperationContext everyOneAllowed = new ResourceOperationContext();
+  public static OperationContext dataRead = new ResourceOperationContext("DATA", "READ");
+  public static OperationContext dataWrite = new ResourceOperationContext("DATA", "WRITE");
+  public static OperationContext dataManage = new ResourceOperationContext("DATA", "MANAGE");
+
+  public static OperationContext regionARead = new ResourceOperationContext("DATA", "READ", "RegionA");
+  public static OperationContext regionAWrite = new ResourceOperationContext("DATA", "WRITE", "RegionA");
+
+  public static OperationContext clusterRead = new ResourceOperationContext("CLUSTER", "READ");
+  public static OperationContext clusterWrite = new ResourceOperationContext("CLUSTER", "WRITE");
+  public static OperationContext clusterManage = new ResourceOperationContext("CLUSTER", "MANAGE");
+
   private static List<TestCommand> testCommands = new ArrayList<>();
 
   static{
@@ -29,14 +45,14 @@ public class TestCommand {
   }
   
   private final String command;
-  private final String permission;
+  private final OperationContext permission;
   
-  public TestCommand(String command, String permission) {
+  public TestCommand(String command, OperationContext permission) {
     this.command = command;
     this.permission = permission;
   }
   
-  private static void createTestCommand(String command, String permission) {
+  private static void createTestCommand(String command, OperationContext permission) {
     TestCommand instance = new TestCommand(command, permission);
     testCommands.add(instance);
   }
@@ -45,7 +61,7 @@ public class TestCommand {
     return this.command;
   }
 
-  public String getPermission() {
+  public OperationContext getPermission() {
     return this.permission;
   }
 
@@ -53,11 +69,11 @@ public class TestCommand {
     return testCommands;
   }
 
-  public static List<TestCommand> getCommandsOfPermission(String permission){
+  public static List<TestCommand> getPermittedCommands(Permission permission){
     List<TestCommand> result = new ArrayList<>();
     for(TestCommand testCommand:testCommands){
-      String cPerm = testCommand.getPermission();
-      if(cPerm!=null && cPerm.startsWith(permission)){
+      OperationContext cPerm = testCommand.getPermission();
+      if(cPerm!=null && permission.implies(cPerm)){
         result.add(testCommand);
       }
     }
@@ -66,75 +82,75 @@ public class TestCommand {
 
   private static void init() {
     // ClientCommands
-    createTestCommand("list clients", "CLUSTER:READ");
-    createTestCommand("describe client --clientID=172.16.196.144", "CLUSTER:READ");
+    createTestCommand("list clients", clusterRead);
+    createTestCommand("describe client --clientID=172.16.196.144", clusterRead);
 
     // ConfigCommands
-    createTestCommand("alter runtime", "CLUSTER:MANAGE");
-    createTestCommand("describe config --member=Member1", "CLUSTER:READ");
-    createTestCommand("export config --member=member1", "CLUSTER:READ");
+    createTestCommand("alter runtime", clusterManage);
+    createTestCommand("describe config --member=Member1", clusterRead);
+    createTestCommand("export config --member=member1", clusterRead);
 
     //CreateAlterDestroyRegionCommands
-    createTestCommand("alter region --name=region1 --eviction-max=5000", "DATA:MANAGE");
-    createTestCommand("create region --name=region12 --type=REPLICATE", "DATA:MANAGE");
-    createTestCommand("destroy region --name=value", "DATA:MANAGE");
+    createTestCommand("alter region --name=region1 --eviction-max=5000", dataManage);
+    createTestCommand("create region --name=region12 --type=REPLICATE", dataManage);
+    createTestCommand("destroy region --name=value", dataManage);
 
     //Data Commands
-    createTestCommand("rebalance --include-region=region1", "DATA:MANAGE");
-    createTestCommand("export data --region=region1 --file=export.txt --member=exportMember", "DATA:READ");
-    createTestCommand("import data --region=region1 --file=import.txt --member=importMember", "DATA:WRITE");
-    createTestCommand("put --key=key1 --value=value1 --region=region1", "DATA:WRITE");
-    createTestCommand("get --key=key1 --region=region1", "DATA:READ");
-    createTestCommand("remove --region=region1", "DATA:MANAGE");
-    createTestCommand("query --query='SELECT * FROM /region1'", "DATA:READ");
-    createTestCommand("locate entry --key=k1 --region=secureRegion", "DATA:READ");
+    createTestCommand("rebalance --include-region=regionA", dataManage);
+    createTestCommand("export data --region=regionA --file=export.txt --member=exportMember", regionARead);
+    createTestCommand("import data --region=regionA --file=import.txt --member=importMember", regionAWrite);
+    createTestCommand("put --key=key1 --value=value1 --region=regionA", regionAWrite);
+    createTestCommand("get --key=key1 --region=regionA", regionARead);
+    createTestCommand("remove --region=regionA", dataManage);
+    createTestCommand("query --query='SELECT * FROM /region1'", dataRead);
+    createTestCommand("locate entry --key=k1 --region=regionA", regionARead);
 
     // Deploy commands
-    //createTestCommand("deploy --jar=group1_functions.jar --group=Group1", "DATA:MANAGE"); // TODO: this command will fail in GfshCommandsSecurityTest at interceptor for jar file checking
-    createTestCommand("undeploy --group=Group1", "DATA:MANAGE");
+    //createTestCommand("deploy --jar=group1_functions.jar --group=Group1", dataManage); // TODO: this command will fail in GfshCommandsSecurityTest at interceptor for jar file checking
+    createTestCommand("undeploy --group=Group1", dataManage);
 
     // Diskstore Commands
-    createTestCommand("backup disk-store --dir=foo", "DATA:READ");
-    createTestCommand("list disk-stores", "CLUSTER:READ");
-    createTestCommand("create disk-store --name=foo --dir=bar", "DATA:MANAGE");
-    createTestCommand("compact disk-store --name=foo", "DATA:MANAGE");
+    createTestCommand("backup disk-store --dir=foo", dataRead);
+    createTestCommand("list disk-stores", clusterRead);
+    createTestCommand("create disk-store --name=foo --dir=bar", dataManage);
+    createTestCommand("compact disk-store --name=foo", dataManage);
     createTestCommand("compact offline-disk-store --name=foo --disk-dirs=bar", null);
     createTestCommand("upgrade offline-disk-store --name=foo --disk-dirs=bar", null);
-    createTestCommand("describe disk-store --name=foo --member=baz", "CLUSTER:READ");
-    createTestCommand("revoke missing-disk-store --id=foo", "DATA:MANAGE");
-    createTestCommand("show missing-disk-stores", "CLUSTER:READ");
+    createTestCommand("describe disk-store --name=foo --member=baz", clusterRead);
+    createTestCommand("revoke missing-disk-store --id=foo", dataManage);
+    createTestCommand("show missing-disk-stores", clusterRead);
     createTestCommand("describe offline-disk-store --name=foo --disk-dirs=bar", null);
     createTestCommand("export offline-disk-store --name=foo --disk-dirs=bar --dir=baz", null);
     createTestCommand("validate offline-disk-store --name=foo --disk-dirs=bar", null);
     createTestCommand("alter disk-store --name=foo --region=xyz --disk-dirs=bar", null);
-    createTestCommand("destroy disk-store --name=foo", "DATA:MANAGE");
+    createTestCommand("destroy disk-store --name=foo", dataManage);
 
     // DurableClientCommands
-    createTestCommand("close durable-client --durable-client-id=client1", "DATA:MANAGE");
-    createTestCommand("close durable-cq --durable-client-id=client1 --durable-cq-name=cq1", "DATA:MANAGE");
-    createTestCommand("show subscription-queue-size --durable-client-id=client1", "CLUSTER:READ");
-    createTestCommand("list durable-cqs --durable-client-id=client1", "CLUSTER:READ");
+    createTestCommand("close durable-client --durable-client-id=client1", dataManage);
+    createTestCommand("close durable-cq --durable-client-id=client1 --durable-cq-name=cq1", dataManage);
+    createTestCommand("show subscription-queue-size --durable-client-id=client1", clusterRead);
+    createTestCommand("list durable-cqs --durable-client-id=client1", clusterRead);
 
     //ExportIMportSharedConfigurationCommands
-    createTestCommand("export cluster-configuration --zip-file-name=mySharedConfig.zip", "CLUSTER:READ");
-    createTestCommand("import cluster-configuration --zip-file-name=value.zip", "CLUSTER:MANAGE");
+    createTestCommand("export cluster-configuration --zip-file-name=mySharedConfig.zip", clusterRead);
+    createTestCommand("import cluster-configuration --zip-file-name=value.zip", clusterManage);
 
     //FunctionCommands
-    //createTestCommand("destroy function --id=InterestCalculations", "DATA:MANAGE");
-    createTestCommand("execute function --id=InterestCalculations --group=Group1", "DATA:WRITE");
-    createTestCommand("list functions", "CLUSTER:READ");
+    //createTestCommand("destroy function --id=InterestCalculations", dataManage);
+    createTestCommand("execute function --id=InterestCalculations --group=Group1", dataWrite);
+    createTestCommand("list functions", clusterRead);
 
     //GfshHelpCommands
     createTestCommand("hint", null);
     createTestCommand("help", null);
 
     //IndexCommands
-    createTestCommand("clear defined indexes", "DATA:MANAGE");
-    createTestCommand("create defined indexes", "DATA:MANAGE");
-    createTestCommand("create index --name=myKeyIndex --expression=region1.Id --region=region1 --type=key", "DATA:MANAGE");
-    createTestCommand("define index --name=myIndex1 --expression=exp1 --region=/exampleRegion", "DATA:MANAGE");
-    createTestCommand("destroy index --member=server2", "DATA:MANAGE");
-    createTestCommand("list indexes", "CLUSTER:READ");
+    createTestCommand("clear defined indexes", dataManage);
+    createTestCommand("create defined indexes", dataManage);
+    createTestCommand("create index --name=myKeyIndex --expression=region1.Id --region=region1 --type=key", dataManage);
+    createTestCommand("define index --name=myIndex1 --expression=exp1 --region=/exampleRegion", dataManage);
+    createTestCommand("destroy index --member=server2", dataManage);
+    createTestCommand("list indexes", clusterRead);
 
     //LauncherLifecycleCommands
     createTestCommand("start jconsole", null);
@@ -145,38 +161,38 @@ public class TestCommand {
     createTestCommand("start vsd", null);
     createTestCommand("status locator", null);
     createTestCommand("status server", null);
-    //createTestCommand("stop locator --name=locator1", "CLUSTER:MANAGE");
-    //createTestCommand("stop server --name=server1", "CLUSTER:MANAGE");
+    //createTestCommand("stop locator --name=locator1", clusterManage);
+    //createTestCommand("stop server --name=server1", clusterManage);
 
     //MemberCommands
-    createTestCommand("describe member --name=server1", "CLUSTER:READ");
-    createTestCommand("list members", "CLUSTER:READ");
+    createTestCommand("describe member --name=server1", clusterRead);
+    createTestCommand("list members", clusterRead);
 
     // Misc Commands
-    createTestCommand("change loglevel --loglevel=severe --member=server1", "CLUSTER:WRITE");
-    createTestCommand("export logs --dir=data/logs", "CLUSTER:READ");
-    createTestCommand("export stack-traces --file=stack.txt", "CLUSTER:READ");
-    createTestCommand("gc", "CLUSTER:MANAGE");
-    createTestCommand("netstat --member=server1", "CLUSTER:READ");
-    createTestCommand("show dead-locks --file=deadlocks.txt", "CLUSTER:READ");
-    createTestCommand("show log --member=locator1 --lines=5", "CLUSTER:READ");
-    createTestCommand("show metrics", "CLUSTER:READ");
+    createTestCommand("change loglevel --loglevel=severe --member=server1", clusterWrite);
+    createTestCommand("export logs --dir=data/logs", clusterRead);
+    createTestCommand("export stack-traces --file=stack.txt", clusterRead);
+    createTestCommand("gc", clusterManage);
+    createTestCommand("netstat --member=server1", clusterRead);
+    createTestCommand("show dead-locks --file=deadlocks.txt", clusterRead);
+    createTestCommand("show log --member=locator1 --lines=5", clusterRead);
+    createTestCommand("show metrics", clusterRead);
 
 
     // PDX Commands
-    createTestCommand("configure pdx --read-serialized=true", "DATA:MANAGE");
-    //createTestCommand("pdx rename --old=com.gemstone --new=com.pivotal --disk-store=ds1 --disk-dirs=/diskDir1", "DATA:MANAGE");
+    createTestCommand("configure pdx --read-serialized=true", dataManage);
+    //createTestCommand("pdx rename --old=com.gemstone --new=com.pivotal --disk-store=ds1 --disk-dirs=/diskDir1", dataManage);
 
     // Queue Commands
-    createTestCommand("create async-event-queue --id=myAEQ --listener=myApp.myListener", "DATA:MANAGE");
-    createTestCommand("list async-event-queues", "CLUSTER:READ");
+    createTestCommand("create async-event-queue --id=myAEQ --listener=myApp.myListener", dataManage);
+    createTestCommand("list async-event-queues", clusterRead);
 
     //RegionCommands
-    createTestCommand("describe region --name=value", "CLUSTER:READ");
-    createTestCommand("list regions", "CLUSTER:READ");
+    createTestCommand("describe region --name=value", clusterRead);
+    createTestCommand("list regions", clusterRead);
 
     // StatusCommands
-    createTestCommand("status cluster-config-service", "CLUSTER:READ");
+    createTestCommand("status cluster-config-service", clusterRead);
 
     // Shell Commands
     createTestCommand("connect", null);
@@ -190,22 +206,22 @@ public class TestCommand {
 
 
     // WAN Commands
-    createTestCommand("create gateway-sender --id=sender1 --remote-distributed-system-id=2", "DATA:MANAGE");
-    createTestCommand("start gateway-sender --id=sender1", "DATA:MANAGE");
-    createTestCommand("pause gateway-sender --id=sender1", "DATA:MANAGE");
-    createTestCommand("resume gateway-sender --id=sender1", "DATA:MANAGE");
-    createTestCommand("stop gateway-sender --id=sender1", "DATA:MANAGE");
-    createTestCommand("load-balance gateway-sender --id=sender1", "DATA:MANAGE");
-    createTestCommand("list gateways", "CLUSTER:READ");
-    createTestCommand("create gateway-receiver", "DATA:MANAGE");
-    createTestCommand("start gateway-receiver", "DATA:MANAGE");
-    createTestCommand("stop gateway-receiver", "DATA:MANAGE");
-    createTestCommand("status gateway-receiver", "CLUSTER:READ");
-    createTestCommand("status gateway-sender --id=sender1", "CLUSTER:READ");
+    createTestCommand("create gateway-sender --id=sender1 --remote-distributed-system-id=2", dataManage);
+    createTestCommand("start gateway-sender --id=sender1", dataManage);
+    createTestCommand("pause gateway-sender --id=sender1", dataManage);
+    createTestCommand("resume gateway-sender --id=sender1", dataManage);
+    createTestCommand("stop gateway-sender --id=sender1", dataManage);
+    createTestCommand("load-balance gateway-sender --id=sender1", dataManage);
+    createTestCommand("list gateways", clusterRead);
+    createTestCommand("create gateway-receiver", dataManage);
+    createTestCommand("start gateway-receiver", dataManage);
+    createTestCommand("stop gateway-receiver", dataManage);
+    createTestCommand("status gateway-receiver", clusterRead);
+    createTestCommand("status gateway-sender --id=sender1", clusterRead);
 
     //ShellCommand
     createTestCommand("disconnect", null);
     //Misc commands
-    //createTestCommand("shutdown", "CLUSTER:MANAGE");
+    //createTestCommand("shutdown", clusterManage);
   };
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/auth3.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/auth3.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/auth3.json
index cfd43f5..635cff5 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/auth3.json
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/auth3.json
@@ -11,7 +11,7 @@
       "operationsAllowed": [
         "REGION:GET"
       ],
-      "region": "secureRegion"
+      "regions": "secureRegion"
     }
   ],
   "users": [

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/cacheServer.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/cacheServer.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/cacheServer.json
index 01c9fd6..638ae07 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/cacheServer.json
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/cacheServer.json
@@ -76,16 +76,18 @@
     {
       "name": "region1-use",
       "operationsAllowed": [
-        "DATA"
+        "DATA:READ",
+        "DATA:WRITE"
       ],
-      "region": "region1"
+      "regions": "null,region1"
     },
     {
       "name": "secure-use",
       "operationsAllowed": [
-        "DATA"
+        "DATA:READ",
+        "DATA:WRITE"
       ],
-      "regions": ["region1", "secureRegion"]
+      "regions": "null,region1,secureRegion"
     }
   ],
   "users": [

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/shiro-ini.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/shiro-ini.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/shiro-ini.json
new file mode 100644
index 0000000..d586fa1
--- /dev/null
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/shiro-ini.json
@@ -0,0 +1,87 @@
+{
+  "roles": [
+    {
+      "name": "admin",
+      "operationsAllowed": [
+        "CLUSTER:MANAGE",
+        "CLUSTER:WRITE",
+        "CLUSTER:READ",
+        "DATA:MANAGE",
+        "DATA:WRITE",
+        "DATA:READ"
+      ]
+    },
+    {
+      "name": "readRegionA",
+      "operationsAllowed": [
+        "DATA:READ"
+      ],
+      "regions": "RegionA"
+    },
+    {
+      "name": "useRegionA",
+      "operationsAllowed": [
+        "DATA:MANAGE",
+        "DATA:WRITE",
+        "DATA:READ"
+      ],
+      "regions": "RegionA"
+    },
+    {
+      "name": "readData",
+      "operationsAllowed": [
+        "DATA:READ"
+      ]
+    },
+    {
+      "name": "readAll",
+      "operationsAllowed": [
+        "CLUSTER:READ",
+        "DATA:READ"
+      ]
+    }
+  ],
+  "users": [
+    {
+      "name": "root",
+      "password": "secret",
+      "roles": [
+        "admin"
+      ]
+    },
+    {
+      "name": "guest",
+      "password": "guest",
+      "roles": [
+      ]
+    },
+    {
+      "name": "regionAReader",
+      "password": "password",
+      "roles": [
+        "readRegionA"
+      ]
+    },
+    {
+      "name": "regionAUser",
+      "password": "password",
+      "roles": [
+        "useRegionA"
+      ]
+    },
+    {
+      "name": "dataReader",
+      "password": "12345",
+      "roles": [
+        "readData"
+      ]
+    },
+    {
+      "name": "reader",
+      "password": "12345",
+      "roles": [
+        "readAll"
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testInheritRole.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testInheritRole.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testInheritRole.json
deleted file mode 100644
index 3053a92..0000000
--- a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testInheritRole.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-"roles" : [	
-			{
-				"name" : "jmxReader",
-				"operationsAllowed" : ["QUERY"]				
-			},
-			{
-				"name" : "jmxWriter",
-				"operationsAllowed" : ["CHANGE_LOG_LEVEL"]				
-			},
-			{
-				"name" : "admin",
-				"operationsAllowed" : ["CMD_SHUTDOWN"]	
-			},
-			{
-				"name" : "adminSG1",
-				"inherit" : [ "admin" ],
-				"serverGroup" : "SG1"
-			},
-			{
-				"name" : "adminSG2",
-				"inherit" : [ "admin" , "jmxWriter"],
-				"serverGroup" : "SG2"
-			}
-		],
-users : [
-	 		{
-	 			"name" : "tushark",
-	 			"roles" : ["jmxReader"]
-	 		},
-	 		{
-	 			"name" : "admin1",
-	 			"roles" : ["adminSG1"]
-	 		},
-	 		{
-	 			"name" : "admin2",
-	 			"roles" : ["adminSG2"]
-	 		}
-		]
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testSimpleUserAndRole.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testSimpleUserAndRole.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testSimpleUserAndRole.json
deleted file mode 100644
index 0542cf4..0000000
--- a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testSimpleUserAndRole.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-  "roles": [
-    {
-      "name": "jmxReader",
-      "operationsAllowed": [
-        "QUERY:EXECUTE"
-      ]
-    }
-  ],
-  "users": [
-    {
-      "name": "tushark",
-      "roles": [
-        "jmxReader"
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserAndRoleRegionServerGroup.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserAndRoleRegionServerGroup.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserAndRoleRegionServerGroup.json
deleted file mode 100644
index 6bb28bf..0000000
--- a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserAndRoleRegionServerGroup.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "roles": [
-    {
-      "name": "jmxReader",
-      "operationsAllowed": [
-        "QUERY:EXECUTE"
-      ],
-      "serverGroup": "SG2",
-      "region": "secureRegion"
-    }
-  ],
-  "users": [
-    {
-      "name": "tushark",
-      "roles": [
-        "jmxReader"
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserMultipleRole.json
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserMultipleRole.json b/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserMultipleRole.json
deleted file mode 100644
index 7a07a21..0000000
--- a/geode-core/src/test/resources/com/gemstone/gemfire/management/internal/security/testUserMultipleRole.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
-  "roles": [
-    {
-      "name": "jmxReader",
-      "operationsAllowed": [
-        "QUERY:EXECUTE"
-      ]
-    },
-    {
-      "name": "sysMonitors",
-      "operationsAllowed": [
-        "MEMBER:EXPORT_LOGS",
-        "MEMBER:GC"
-      ]
-    }
-  ],
-  "users": [
-    {
-      "name": "tushark",
-      "roles": [
-        "jmxReader",
-        "sysMonitors"
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/resources/shiro.ini
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/shiro.ini b/geode-core/src/test/resources/shiro.ini
index 37b81b2..a9746a5 100644
--- a/geode-core/src/test/resources/shiro.ini
+++ b/geode-core/src/test/resources/shiro.ini
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# the users and roles in this file needs to be kept in sync with shiro.ini
+# since they are used by the same test to test ShiroUtil
 # -----------------------------------------------------------------------------
 # Users and their (optional) assigned roles
 # username = password, role1, role2, ..., roleN
@@ -20,7 +22,10 @@
 [users]
 root = secret, admin
 guest = guest, guest
-stranger = 12345, none
+regionAReader = password, readRegionA
+regionAUser = password, useRegionA
+dataReader = 12345, readData
+reader = 12345, readAll
 
 # -----------------------------------------------------------------------------
 # Roles with assigned permissions
@@ -28,4 +33,8 @@ stranger = 12345, none
 # -----------------------------------------------------------------------------
 [roles]
 admin = *
-guest = none
\ No newline at end of file
+guest = none
+readRegionA = DATA:READ:RegionA
+useRegionA = *:*:RegionA
+readData = DATA:READ
+readAll = *:READ
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-junit/src/main/java/com/gemstone/gemfire/test/junit/rules/DescribedExternalResource.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/com/gemstone/gemfire/test/junit/rules/DescribedExternalResource.java b/geode-junit/src/main/java/com/gemstone/gemfire/test/junit/rules/DescribedExternalResource.java
index 543b7fc..b12bab1 100644
--- a/geode-junit/src/main/java/com/gemstone/gemfire/test/junit/rules/DescribedExternalResource.java
+++ b/geode-junit/src/main/java/com/gemstone/gemfire/test/junit/rules/DescribedExternalResource.java
@@ -14,18 +14,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-/**
- * this class extends the capability of JUnit's ExternalResource in that
- * it provides a Description object in the before and after methods, so that
- * the implementation would have access to the annotation of the test methods
- */
 package com.gemstone.gemfire.test.junit.rules;
 
 import org.junit.rules.TestRule;
 import org.junit.runner.Description;
 import org.junit.runners.model.Statement;
 
+/**
+ * this class extends the capability of JUnit's ExternalResource in that
+ * it provides a Description object in the before and after methods, so that
+ * the implementation would have access to the annotation of the test methods
+ */
 public class DescribedExternalResource implements TestRule {
   public Statement apply(Statement base, Description description) {
     return statement(base, description);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthentication.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthentication.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthentication.java
index 5253f2f..a7a611d 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthentication.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthentication.java
@@ -14,9 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package com.vmware.gemfire.tools.pulse.internal.security;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import javax.management.MBeanServerConnection;
@@ -33,87 +33,59 @@ import org.springframework.security.core.authority.SimpleGrantedAuthority;
 
 /**
  * Spring security authentication object for GemFire
- * 
+ * <p>
  * To use GemFire Integrated Security Model set Spring Application Profile to pulse.authentication.gemfire
- * 
- * 1. Authentication : 
- *    1.a GemFire profile creates JMX connection with given credentials at the login time. 
- *    1.b Successful connect is considered as Successful Authentication for Pulse WebApp
- *    
- *    
+ * <p>
+ * 1. Authentication :
+ * 1.a GemFire profile creates JMX connection with given credentials at the login time.
+ * 1.b Successful connect is considered as Successful Authentication for Pulse WebApp
+ * <p>
+ * <p>
  * 2. Authorization :
- *    2.a Using newly created authenticated connection AccessControlMXBean is called to get authentication
- *      levels. See @See {@link #populateAuthorities(JMXConnector)}. This sets Spring Security Authorities
- *    2.b DataBrowser end-points are required to be authorized against Spring Granted Authority
- *      @See spring-security.xml
- *    2.c When executing Data-Browser query, user-level jmx connection is used so at to put access-control
- *      over the resources query is accessing. 
- *      @See #com.vmware.gemfire.tools.pulse.internal.data.JMXDataUpdater#executeQuery
- *         
- * 3. Connection Management - Spring Security LogoutHandler closes session level connection
- *
- * TODO : Better model would be to maintain background connection map for Databrowser instead
- * of each web session creating rmi connection and map user to correct entry in the connection map
- *
+ * 2.a Using newly created authenticated connection AccessControlMXBean is called to get authentication
+ * levels. See @See {@link #populateAuthorities(JMXConnector)}. This sets Spring Security Authorities
+ * 2.b DataBrowser end-points are required to be authorized against Spring Granted Authority
  * @since version 9.0
  */
-public class GemFireAuthentication extends UsernamePasswordAuthenticationToken {	
+public class GemFireAuthentication extends UsernamePasswordAuthenticationToken {
 
   private final static PulseLogWriter logger = PulseLogWriter.getLogger();
-  
-	private JMXConnector jmxc=null;	
-	
-	public GemFireAuthentication(Object principal, Object credentials, Collection<GrantedAuthority> list, JMXConnector jmxc) {
-		super(principal, credentials, list);
-		this.jmxc = jmxc;
-	}
 
-	private static final long serialVersionUID = SpringSecurityCoreVersion.SERIAL_VERSION_UID;
-		
-	
-	public void closeJMXConnection(){
-		try {
-			jmxc.close();
-		} catch (IOException e) {
-			throw new RuntimeException(e);
-		}
-	}
-	
-	public MBeanServerConnection getRemoteMBeanServer() {
-		try {
-			return jmxc.getMBeanServerConnection();
-		} catch (IOException e) {
-			throw new RuntimeException(e);
-		}
-	}
+  private JMXConnector jmxc = null;
+
+  public GemFireAuthentication(Object principal, Object credentials, Collection<GrantedAuthority> list, JMXConnector jmxc) {
+    super(principal, credentials, list);
+    this.jmxc = jmxc;
+  }
+
+  private static final long serialVersionUID = SpringSecurityCoreVersion.SERIAL_VERSION_UID;
 
-	public static ArrayList<GrantedAuthority> populateAuthorities(JMXConnector jmxc) {
-		ObjectName name;
-		ArrayList<GrantedAuthority> authorities = new ArrayList<>();
-		try {
-			name = new ObjectName(PulseConstants.OBJECT_NAME_ACCESSCONTROL_MBEAN);
-			MBeanServerConnection mbeanServer = jmxc.getMBeanServerConnection();
+  public static ArrayList<GrantedAuthority> populateAuthorities(JMXConnector jmxc) {
+    ObjectName name;
+    ArrayList<GrantedAuthority> authorities = new ArrayList<>();
+    try {
+      name = new ObjectName(PulseConstants.OBJECT_NAME_ACCESSCONTROL_MBEAN);
+      MBeanServerConnection mbeanServer = jmxc.getMBeanServerConnection();
 
-			for(String role : PulseConstants.PULSE_ROLES){
-				Object[] params = role.split(":");
-				String[] signature = new String[] {String.class.getCanonicalName(), String.class.getCanonicalName()};
-				boolean result = (Boolean)mbeanServer.invoke(name, "authorize", params, signature);
-				if(result){
-					authorities.add(new SimpleGrantedAuthority(role));
-				}
-			}
-		}catch (Exception e){
-			throw new RuntimeException(e.getMessage(), e);
-		}
+      for (String role : PulseConstants.PULSE_ROLES) {
+        Object[] params = role.split(":");
+        String[] signature = new String[] { String.class.getCanonicalName(), String.class.getCanonicalName() };
+        boolean result = (Boolean) mbeanServer.invoke(name, "authorize", params, signature);
+        if (result) {
+          authorities.add(new SimpleGrantedAuthority(role));
+        }
+      }
+    }
+    catch (Exception e) {
+      throw new RuntimeException(e.getMessage(), e);
+    }
 
-		return authorities;
+    return authorities;
 
-	}
+  }
 
-	public JMXConnector getJmxc() {
-		return jmxc;
-	}
-	
-	
+  public JMXConnector getJmxc() {
+    return jmxc;
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthenticationProvider.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthenticationProvider.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthenticationProvider.java
index 548c3a5..ee263b1 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthenticationProvider.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/GemFireAuthenticationProvider.java
@@ -16,8 +16,12 @@
  */
 package com.vmware.gemfire.tools.pulse.internal.security;
 
+import java.util.Collection;
+import javax.management.remote.JMXConnector;
+
 import com.vmware.gemfire.tools.pulse.internal.data.Repository;
 import com.vmware.gemfire.tools.pulse.internal.log.PulseLogWriter;
+
 import org.springframework.security.authentication.AuthenticationProvider;
 import org.springframework.security.authentication.AuthenticationServiceException;
 import org.springframework.security.authentication.BadCredentialsException;
@@ -26,14 +30,9 @@ import org.springframework.security.core.Authentication;
 import org.springframework.security.core.AuthenticationException;
 import org.springframework.security.core.GrantedAuthority;
 
-import javax.management.remote.JMXConnector;
-import java.util.Collection;
-
 /**
  * Spring security AuthenticationProvider for GemFire. It connects to gemfire manager using given credentials.
  * Successful connect is treated as successful authentication and web user is authenticated
- *
- * @author Tushar Khairnar
  * @since version 9.0
  */
 public class GemFireAuthenticationProvider implements AuthenticationProvider {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/LogoutHandler.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/LogoutHandler.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/LogoutHandler.java
index a70925d..7309f90 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/LogoutHandler.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/security/LogoutHandler.java
@@ -16,20 +16,20 @@
  */
 package com.vmware.gemfire.tools.pulse.internal.security;
 
+import java.io.IOException;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
 import com.vmware.gemfire.tools.pulse.internal.data.Repository;
 import com.vmware.gemfire.tools.pulse.internal.log.PulseLogWriter;
+
 import org.springframework.security.core.Authentication;
 import org.springframework.security.web.authentication.logout.LogoutSuccessHandler;
 import org.springframework.security.web.authentication.logout.SimpleUrlLogoutSuccessHandler;
 
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
 /**
  * Handler is used to close jmx connection maintained at user-level
- * @author tushark
  *
  */
 public class LogoutHandler extends SimpleUrlLogoutSuccessHandler implements LogoutSuccessHandler {


[63/63] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into feature/GEODE-1276

Posted by kl...@apache.org.
Merge remote-tracking branch 'origin/develop' into feature/GEODE-1276


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9bdd0d59
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9bdd0d59
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9bdd0d59

Branch: refs/heads/feature/GEODE-1276
Commit: 9bdd0d59591fa5c3a18d516ed05c2a20cdcef83c
Parents: d754e70 8dc2d30
Author: Kirk Lund <kl...@pivotal.io>
Authored: Wed May 4 15:24:49 2016 -0700
Committer: Kirk Lund <kl...@pivotal.io>
Committed: Wed May 4 15:24:49 2016 -0700

----------------------------------------------------------------------
 BUILDING.md                                     |    2 +-
 build.gradle                                    |    4 +
 .../SessionReplicationIntegrationJUnitTest.java |   30 +-
 .../session/junit/PerTestClassLoaderRunner.java |  138 +-
 geode-assembly/build.gradle                     |    5 +-
 .../LauncherLifecycleCommandsDUnitTest.java     |    3 +
 .../SharedConfigurationEndToEndDUnitTest.java   |    1 -
 .../src/test/resources/expected_jars.txt        |    1 +
 geode-core/build.gradle                         |   26 +-
 .../gemfire/cache/AttributesFactory.java        |   58 -
 .../gemfire/cache/AttributesMutator.java        |   14 -
 .../gemfire/cache/CustomEvictionAttributes.java |   78 -
 .../com/gemstone/gemfire/cache/DataPolicy.java  |   11 -
 .../gemfire/cache/EvictionCriteria.java         |   57 -
 .../com/gemstone/gemfire/cache/Operation.java   |   13 -
 .../gemfire/cache/RegionAttributes.java         |   23 -
 .../gemstone/gemfire/cache/RegionFactory.java   |   24 -
 .../internal/AsyncEventQueueFactoryImpl.java    |    5 -
 .../gemfire/cache/hdfs/HDFSIOException.java     |   52 -
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  |  341 --
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |  203 -
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    |  196 -
 .../cache/hdfs/StoreExistsException.java        |   32 -
 .../cache/hdfs/internal/FailureTracker.java     |   96 -
 .../cache/hdfs/internal/FlushObserver.java      |   53 -
 .../hdfs/internal/HDFSBucketRegionQueue.java    | 1232 ------
 .../cache/hdfs/internal/HDFSEntriesSet.java     |  329 --
 .../cache/hdfs/internal/HDFSEventListener.java  |  179 -
 .../hdfs/internal/HDFSEventQueueFilter.java     |   73 -
 .../hdfs/internal/HDFSGatewayEventImpl.java     |  180 -
 .../hdfs/internal/HDFSIntegrationUtil.java      |  117 -
 .../HDFSParallelGatewaySenderQueue.java         |  471 ---
 .../hdfs/internal/HDFSStoreConfigHolder.java    |  559 ---
 .../cache/hdfs/internal/HDFSStoreCreation.java  |  198 -
 .../hdfs/internal/HDFSStoreFactoryImpl.java     |   77 -
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  638 ---
 .../hdfs/internal/HDFSStoreMutatorImpl.java     |  200 -
 .../HDFSWriteOnlyStoreEventListener.java        |  184 -
 .../hdfs/internal/HoplogListenerForRegion.java  |   72 -
 .../cache/hdfs/internal/PersistedEventImpl.java |  202 -
 .../hdfs/internal/QueuedPersistentEvent.java    |   27 -
 .../hdfs/internal/SignalledFlushObserver.java   |  122 -
 .../internal/SortedHDFSQueuePersistedEvent.java |   86 -
 .../internal/SortedHoplogPersistedEvent.java    |  114 -
 .../UnsortedHDFSQueuePersistedEvent.java        |   76 -
 .../internal/UnsortedHoplogPersistedEvent.java  |   92 -
 .../hdfs/internal/hoplog/AbstractHoplog.java    |  357 --
 .../hoplog/AbstractHoplogOrganizer.java         |  430 --
 .../cache/hdfs/internal/hoplog/BloomFilter.java |   36 -
 .../hoplog/CloseTmpHoplogsTimerTask.java        |  108 -
 .../hdfs/internal/hoplog/CompactionStatus.java  |   72 -
 .../cache/hdfs/internal/hoplog/FlushStatus.java |   72 -
 .../internal/hoplog/HDFSCompactionManager.java  |  330 --
 .../internal/hoplog/HDFSFlushQueueArgs.java     |   93 -
 .../internal/hoplog/HDFSFlushQueueFunction.java |  287 --
 .../hoplog/HDFSForceCompactionArgs.java         |  107 -
 .../hoplog/HDFSForceCompactionFunction.java     |  129 -
 .../HDFSForceCompactionResultCollector.java     |  131 -
 .../hoplog/HDFSLastCompactionTimeFunction.java  |   56 -
 .../internal/hoplog/HDFSRegionDirector.java     |  480 ---
 .../hdfs/internal/hoplog/HDFSStoreDirector.java |   78 -
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |  447 ---
 .../hdfs/internal/hoplog/HFileSortedOplog.java  |  853 ----
 .../hoplog/HdfsSortedOplogOrganizer.java        | 2004 ----------
 .../cache/hdfs/internal/hoplog/Hoplog.java      |  263 --
 .../hdfs/internal/hoplog/HoplogConfig.java      |   74 -
 .../hdfs/internal/hoplog/HoplogListener.java    |   47 -
 .../hdfs/internal/hoplog/HoplogOrganizer.java   |  123 -
 .../hdfs/internal/hoplog/HoplogSetIterator.java |  166 -
 .../hdfs/internal/hoplog/HoplogSetReader.java   |  114 -
 .../internal/hoplog/SequenceFileHoplog.java     |  395 --
 .../hoplog/mapred/AbstractGFRecordReader.java   |  106 -
 .../internal/hoplog/mapred/GFInputFormat.java   |   95 -
 .../internal/hoplog/mapred/GFOutputFormat.java  |   75 -
 .../mapreduce/AbstractGFRecordReader.java       |  140 -
 .../hoplog/mapreduce/GFInputFormat.java         |  124 -
 .../hdfs/internal/hoplog/mapreduce/GFKey.java   |   72 -
 .../hoplog/mapreduce/GFOutputFormat.java        |  198 -
 .../hoplog/mapreduce/HDFSSplitIterator.java     |  197 -
 .../internal/hoplog/mapreduce/HoplogUtil.java   |  463 ---
 .../hoplog/mapreduce/RWSplitIterator.java       |   48 -
 .../hoplog/mapreduce/StreamSplitIterator.java   |   46 -
 .../org/apache/hadoop/io/SequenceFile.java      | 3726 ------------------
 .../cache/operations/OperationContext.java      |  556 +--
 .../query/internal/index/DummyQRegion.java      |    3 -
 .../cache/query/internal/index/HashIndex.java   |    1 -
 .../query/internal/index/IndexManager.java      |    8 -
 .../gemfire/cache/wan/GatewaySender.java        |    2 -
 .../gemfire/distributed/DistributedSystem.java  |    7 +-
 .../internal/AbstractDistributionConfig.java    |   48 +-
 .../distributed/internal/ConfigAttribute.java   |    1 -
 .../internal/ConfigAttributeChecker.java        |    1 -
 .../internal/ConfigAttributeDesc.java           |    3 -
 .../internal/ConfigAttributeGetter.java         |    3 -
 .../internal/ConfigAttributeSetter.java         |    3 -
 .../internal/DistributionConfig.java            |    7 +
 .../internal/DistributionConfigImpl.java        |   38 +-
 .../internal/InternalDistributedSystem.java     |  255 +-
 .../distributed/internal/InternalLocator.java   |    6 +-
 .../membership/InternalDistributedMember.java   |   68 +-
 .../internal/membership/NetMember.java          |    4 +-
 .../internal/membership/gms/GMSMember.java      |   36 +-
 .../internal/membership/gms/ServiceConfig.java  |   18 +
 .../membership/gms/fd/GMSHealthMonitor.java     |    7 +-
 .../membership/gms/locator/GMSLocator.java      |    2 +-
 .../membership/gms/membership/GMSJoinLeave.java |  152 +-
 .../gms/messenger/JGroupsMessenger.java         |    7 +-
 .../gemfire/internal/AbstractConfig.java        |   28 +-
 .../gemstone/gemfire/internal/DSFIDFactory.java |    3 -
 .../internal/DataSerializableFixedID.java       |    1 -
 .../gemfire/internal/HeapDataOutputStream.java  |    3 -
 .../admin/remote/RemoteRegionAttributes.java    |   25 -
 .../cache/AbstractBucketRegionQueue.java        |   18 +-
 .../gemfire/internal/cache/AbstractRegion.java  |  147 -
 .../internal/cache/AbstractRegionEntry.java     |   36 +-
 .../internal/cache/AbstractRegionMap.java       |   86 +-
 .../gemfire/internal/cache/BucketAdvisor.java   |    1 -
 .../gemfire/internal/cache/BucketRegion.java    |  212 +-
 .../internal/cache/BucketRegionQueue.java       |    6 +-
 .../cache/CacheDistributionAdvisor.java         |   22 +-
 .../gemfire/internal/cache/CachePerfStats.java  |   75 -
 .../internal/cache/ColocationHelper.java        |    3 -
 .../cache/CustomEvictionAttributesImpl.java     |   35 -
 .../gemfire/internal/cache/DiskEntry.java       |    1 -
 .../gemfire/internal/cache/DistTXState.java     |    2 +-
 .../cache/DistributedCacheOperation.java        |    7 +-
 .../cache/DistributedPutAllOperation.java       |   33 +-
 .../internal/cache/DistributedRegion.java       |   42 +-
 .../cache/DistributedRemoveAllOperation.java    |   19 +-
 .../gemfire/internal/cache/EntryEventImpl.java  |   55 +-
 .../gemfire/internal/cache/EvictorService.java  |  284 --
 .../gemfire/internal/cache/FilterProfile.java   |   13 +-
 .../internal/cache/GemFireCacheImpl.java        |  119 +-
 .../gemfire/internal/cache/HARegion.java        |   15 +-
 .../internal/cache/HDFSLRURegionMap.java        |  111 -
 .../gemfire/internal/cache/HDFSRegionMap.java   |   32 -
 .../internal/cache/HDFSRegionMapDelegate.java   |  540 ---
 .../internal/cache/HDFSRegionMapImpl.java       |   74 -
 .../gemfire/internal/cache/InternalCache.java   |    4 -
 .../internal/cache/InternalDataView.java        |   28 +-
 .../internal/cache/InternalRegionArguments.java |   16 -
 .../gemfire/internal/cache/LocalRegion.java     |  288 +-
 .../internal/cache/LocalRegionDataView.java     |   35 +-
 .../internal/cache/NonLocalRegionEntry.java     |   20 -
 .../gemstone/gemfire/internal/cache/Oplog.java  |   14 -
 .../gemfire/internal/cache/OverflowOplog.java   |    1 -
 .../internal/cache/PartitionedRegion.java       |  482 +--
 .../cache/PartitionedRegionDataStore.java       |   49 +-
 .../cache/PartitionedRegionDataView.java        |   27 +-
 .../gemfire/internal/cache/ProxyRegionMap.java  |   21 -
 .../gemfire/internal/cache/RegionEntry.java     |   20 -
 .../internal/cache/RegionMapFactory.java        |    6 -
 .../internal/cache/RemoteDestroyMessage.java    |    2 +-
 .../internal/cache/RemoteGetMessage.java        |    2 +-
 .../internal/cache/RemotePutMessage.java        |    2 +-
 .../gemfire/internal/cache/TXEntry.java         |    3 +-
 .../gemfire/internal/cache/TXEntryState.java    |   14 -
 .../gemfire/internal/cache/TXState.java         |   38 +-
 .../internal/cache/TXStateInterface.java        |   10 +-
 .../internal/cache/TXStateProxyImpl.java        |   30 +-
 .../gemfire/internal/cache/TXStateStub.java     |   32 +-
 .../gemfire/internal/cache/UpdateOperation.java |    3 -
 .../cache/UserSpecifiedRegionAttributes.java    |   24 +-
 .../cache/VMStatsDiskLRURegionEntryHeap.java    |    2 +-
 .../cache/VMStatsDiskLRURegionEntryOffHeap.java |    2 +-
 .../cache/VMStatsDiskRegionEntryHeap.java       |    2 +-
 .../cache/VMStatsDiskRegionEntryOffHeap.java    |    2 +-
 .../cache/VMStatsLRURegionEntryHeap.java        |    2 +-
 .../cache/VMStatsLRURegionEntryOffHeap.java     |    2 +-
 .../internal/cache/VMStatsRegionEntryHeap.java  |    2 +-
 .../cache/VMStatsRegionEntryOffHeap.java        |    2 +-
 .../cache/VMThinDiskLRURegionEntryHeap.java     |    2 +-
 .../cache/VMThinDiskLRURegionEntryOffHeap.java  |    2 +-
 .../cache/VMThinDiskRegionEntryHeap.java        |    2 +-
 .../cache/VMThinDiskRegionEntryOffHeap.java     |    2 +-
 .../cache/VMThinLRURegionEntryHeap.java         |    2 +-
 .../cache/VMThinLRURegionEntryOffHeap.java      |    2 +-
 .../internal/cache/VMThinRegionEntryHeap.java   |    2 +-
 .../cache/VMThinRegionEntryOffHeap.java         |    2 +-
 .../internal/cache/ValidatingDiskRegion.java    |   13 -
 .../VersionedStatsDiskLRURegionEntryHeap.java   |    2 +-
 ...VersionedStatsDiskLRURegionEntryOffHeap.java |    2 +-
 .../VersionedStatsDiskRegionEntryHeap.java      |    2 +-
 .../VersionedStatsDiskRegionEntryOffHeap.java   |    2 +-
 .../cache/VersionedStatsLRURegionEntryHeap.java |    2 +-
 .../VersionedStatsLRURegionEntryOffHeap.java    |    2 +-
 .../cache/VersionedStatsRegionEntryHeap.java    |    2 +-
 .../cache/VersionedStatsRegionEntryOffHeap.java |    2 +-
 .../VersionedThinDiskLRURegionEntryHeap.java    |    2 +-
 .../VersionedThinDiskLRURegionEntryOffHeap.java |    2 +-
 .../cache/VersionedThinDiskRegionEntryHeap.java |    2 +-
 .../VersionedThinDiskRegionEntryOffHeap.java    |    2 +-
 .../cache/VersionedThinLRURegionEntryHeap.java  |    2 +-
 .../VersionedThinLRURegionEntryOffHeap.java     |    2 +-
 .../cache/VersionedThinRegionEntryHeap.java     |    2 +-
 .../cache/VersionedThinRegionEntryOffHeap.java  |    2 +-
 .../cache/control/InternalResourceManager.java  |   10 -
 .../cache/partitioned/DestroyMessage.java       |    2 +-
 .../partitioned/FetchBulkEntriesMessage.java    |    2 +-
 .../internal/cache/partitioned/GetMessage.java  |   22 +-
 .../cache/partitioned/PutAllPRMessage.java      |   16 +-
 .../internal/cache/partitioned/PutMessage.java  |   12 +-
 .../persistence/soplog/ByteComparator.java      |   55 -
 .../persistence/soplog/CursorIterator.java      |   81 -
 .../soplog/DelegatingSerializedComparator.java  |   37 -
 .../soplog/HFileStoreStatistics.java            |  205 -
 .../persistence/soplog/KeyValueIterator.java    |   42 -
 .../soplog/SortedOplogStatistics.java           |  505 ---
 .../cache/persistence/soplog/SortedReader.java  |  255 --
 .../persistence/soplog/TrackedReference.java    |  153 -
 .../cache/tier/sockets/BaseCommand.java         |   11 +-
 .../cache/tier/sockets/CacheClientNotifier.java |   36 +-
 .../internal/cache/tier/sockets/Message.java    |   16 -
 .../cache/tier/sockets/command/Destroy65.java   |    2 +-
 .../cache/tier/sockets/command/Get70.java       |    4 +-
 .../cache/tier/sockets/command/Request.java     |    4 +-
 .../internal/cache/tx/ClientTXRegionStub.java   |    4 +-
 .../cache/tx/DistributedTXRegionStub.java       |   14 +-
 .../cache/tx/PartitionedTXRegionStub.java       |    8 +-
 .../gemfire/internal/cache/tx/TXRegionStub.java |    4 +-
 .../cache/wan/AbstractGatewaySender.java        |   22 +-
 .../cache/wan/GatewaySenderAttributes.java      |    5 -
 .../cache/wan/GatewaySenderEventImpl.java       |    5 -
 ...rentParallelGatewaySenderEventProcessor.java |    3 -
 .../ConcurrentParallelGatewaySenderQueue.java   |   12 -
 .../ParallelGatewaySenderEventProcessor.java    |   22 +-
 .../parallel/ParallelGatewaySenderQueue.java    |   21 +-
 .../cache/xmlcache/AsyncEventQueueCreation.java |    9 -
 .../internal/cache/xmlcache/CacheCreation.java  |   39 +-
 .../internal/cache/xmlcache/CacheXml.java       |   31 -
 .../internal/cache/xmlcache/CacheXmlParser.java |  170 -
 .../xmlcache/RegionAttributesCreation.java      |   55 +-
 .../gemfire/internal/i18n/LocalizedStrings.java |   32 +-
 .../internal/i18n/ParentLocalizedStrings.java   |    2 +-
 .../internal/offheap/AbstractStoredObject.java  |    1 -
 .../internal/offheap/FreeListManager.java       |    7 -
 .../internal/offheap/OffHeapStoredObject.java   |    6 -
 .../internal/offheap/TinyStoredObject.java      |    4 -
 .../internal/security/AuthorizeRequest.java     |    5 -
 .../internal/security/GeodeSecurityUtil.java    |  167 +
 .../security/shiro/CustomAuthRealm.java         |  176 +
 .../security/shiro/JMXShiroAuthenticator.java   |   69 +
 .../gemfire/internal/tcp/MsgStreamer.java       |   11 -
 .../management/AsyncEventQueueMXBean.java       |    5 +
 .../gemfire/management/CacheServerMXBean.java   |   12 +-
 .../gemfire/management/DiskStoreMXBean.java     |    9 +
 .../DistributedLockServiceMXBean.java           |    8 +-
 .../management/DistributedRegionMXBean.java     |   16 +-
 .../management/DistributedSystemMXBean.java     |   38 +-
 .../management/GatewayReceiverMXBean.java       |    8 +-
 .../gemfire/management/GatewaySenderMXBean.java |   12 +-
 .../gemfire/management/LocatorMXBean.java       |    5 +
 .../gemfire/management/LockServiceMXBean.java   |   10 +-
 .../gemfire/management/ManagerMXBean.java       |   12 +-
 .../gemfire/management/MemberMXBean.java        |   22 +-
 .../gemfire/management/RegionMXBean.java        |   14 +-
 .../management/internal/ManagementAgent.java    |  126 +-
 .../internal/SystemManagementService.java       |   39 +-
 .../internal/beans/DistributedRegionBridge.java |    5 -
 .../internal/beans/DistributedRegionMBean.java  |    5 -
 .../internal/beans/DistributedSystemBridge.java |   19 -
 .../internal/beans/DistributedSystemMBean.java  |    7 -
 .../internal/beans/GatewaySenderMBean.java      |    8 +-
 .../internal/beans/HDFSRegionBridge.java        |  173 -
 .../management/internal/beans/MemberMBean.java  |    5 -
 .../internal/beans/MemberMBeanBridge.java       |   44 +-
 .../internal/beans/PartitionedRegionBridge.java |   13 +-
 .../management/internal/beans/RegionMBean.java  |    5 -
 .../internal/beans/RegionMBeanBridge.java       |    5 -
 .../beans/stats/RegionClusterStatsMonitor.java  |    7 -
 .../management/internal/cli/CommandManager.java |    3 -
 .../internal/cli/commands/ClientCommands.java   |   50 +-
 .../internal/cli/commands/ConfigCommands.java   |   44 +-
 .../CreateAlterDestroyRegionCommands.java       |   42 +-
 .../internal/cli/commands/DataCommands.java     |   36 +-
 .../internal/cli/commands/DeployCommands.java   |   21 +-
 .../cli/commands/DiskStoreCommands.java         |   64 +-
 .../cli/commands/DurableClientCommands.java     |   45 +-
 ...ExportImportSharedConfigurationCommands.java |   23 +-
 .../internal/cli/commands/FunctionCommands.java |   33 +-
 .../internal/cli/commands/GfshHelpCommands.java |   13 +-
 .../internal/cli/commands/IndexCommands.java    |   39 +-
 .../cli/commands/LauncherLifecycleCommands.java |   81 +-
 .../internal/cli/commands/MemberCommands.java   |   27 +-
 .../cli/commands/MiscellaneousCommands.java     |   99 +-
 .../internal/cli/commands/PDXCommands.java      |   33 +-
 .../internal/cli/commands/QueueCommands.java    |   25 +-
 .../internal/cli/commands/RegionCommands.java   |   17 +-
 .../internal/cli/commands/ShellCommands.java    |  106 +-
 .../internal/cli/commands/StatusCommands.java   |   24 +-
 .../internal/cli/commands/WanCommands.java      |  336 +-
 .../cli/domain/RegionAttributesInfo.java        |   21 +-
 .../functions/DescribeHDFSStoreFunction.java    |   86 -
 .../internal/cli/remote/CommandProcessor.java   |   24 +-
 .../internal/cli/result/ErrorResultData.java    |   10 +-
 .../internal/cli/result/ResultBuilder.java      |    7 +-
 .../internal/cli/shell/JmxOperationInvoker.java |   70 +-
 .../cli/util/HDFSStoreNotFoundException.java    |   47 -
 .../cli/util/RegionAttributesNames.java         |    4 +-
 .../internal/security/AccessControl.java        |   51 -
 .../internal/security/AccessControlContext.java |   37 -
 .../internal/security/AccessControlMBean.java   |   41 +
 .../internal/security/AccessControlMXBean.java  |    6 +-
 .../internal/security/CLIOperationContext.java  |  138 -
 .../internal/security/JMXOperationContext.java  |  177 -
 .../internal/security/JSONAuthorization.java    |  308 --
 .../internal/security/MBeanServerWrapper.java   |  168 +-
 .../security/ManagementInterceptor.java         |  271 --
 .../management/internal/security/Resource.java  |   26 -
 .../internal/security/ResourceConstants.java    |   91 +-
 .../internal/security/ResourceOperation.java    |   13 +-
 .../security/ResourceOperationContext.java      |  221 +-
 .../controllers/AbstractCommandsController.java |   82 +-
 .../controllers/ConfigCommandsController.java   |   20 +-
 .../web/controllers/DataCommandsController.java |   26 +-
 .../DiskStoreCommandsController.java            |   14 +-
 .../controllers/FunctionCommandsController.java |    9 +-
 .../MiscellaneousCommandsController.java        |    8 +-
 .../web/controllers/WanCommandsController.java  |    2 +-
 .../EnvironmentVariablesHandlerInterceptor.java |   92 -
 .../support/LoginHandlerInterceptor.java        |  122 +
 .../support/MemberMXBeanAdapter.java            |    5 -
 .../web/http/support/SimpleHttpRequester.java   |  105 +-
 .../web/shell/AbstractHttpOperationInvoker.java |   28 +-
 .../web/shell/RestHttpOperationInvoker.java     |   26 +-
 .../web/shell/SimpleHttpOperationInvoker.java   |   11 +-
 .../gemfire/pdx/internal/PdxReaderImpl.java     |    3 -
 .../gemfire/security/AccessControl.java         |   16 +-
 .../gemfire/security/Authenticator.java         |   18 +-
 .../geode.apache.org/schema/cache/cache-1.0.xsd |   31 -
 .../gemfire/cache/ConnectionPoolDUnitTest.java  |   21 -
 .../SignalledFlushObserverJUnitTest.java        |   97 -
 .../SortedListForAsyncQueueJUnitTest.java       |  564 ---
 .../GetOperationContextImplJUnitTest.java       |    1 -
 .../com/gemstone/gemfire/cache/query/Utils.java |   38 +
 .../dunit/QueryDataInconsistencyDUnitTest.java  |    2 -
 .../QueryUsingFunctionContextDUnitTest.java     |    8 +-
 .../QueryREUpdateInProgressJUnitTest.java       |   12 +-
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |   27 +-
 ...ndexOperationsOnOverflowRegionDUnitTest.java |   97 +-
 ...pdateWithInplaceObjectModFalseDUnitTest.java |   46 +-
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |   48 +-
 ...itializeIndexEntryDestroyQueryDUnitTest.java |   96 +-
 .../PRBasicIndexCreationDUnitTest.java          |  315 +-
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |   42 +-
 .../PRBasicMultiIndexCreationDUnitTest.java     |  276 +-
 .../partitioned/PRBasicQueryDUnitTest.java      |   36 +-
 .../PRBasicRemoveIndexDUnitTest.java            |   37 +-
 .../PRColocatedEquiJoinDUnitTest.java           |  106 +-
 .../partitioned/PRInvalidQueryDUnitTest.java    |   26 +-
 .../partitioned/PRQueryCacheCloseDUnitTest.java |   81 +-
 .../query/partitioned/PRQueryDUnitHelper.java   |  780 +---
 .../query/partitioned/PRQueryDUnitTest.java     |   87 +-
 .../query/partitioned/PRQueryPerfDUnitTest.java |  504 ---
 .../PRQueryRegionCloseDUnitTest.java            |   28 +-
 .../PRQueryRegionDestroyedDUnitTest.java        |   36 +-
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |   48 +-
 .../gemfire/cache30/Bug38741DUnitTest.java      |    2 +-
 .../cache30/ClientMembershipDUnitTest.java      | 1016 +++--
 .../gemfire/cache30/ReconnectDUnitTest.java     |  235 +-
 .../gemfire/distributed/LocatorDUnitTest.java   |  169 +-
 .../internal/DistributionConfigJUnitTest.java   |   51 +-
 .../gms/membership/GMSJoinLeaveJUnitTest.java   |    7 +-
 .../gms/membership/GMSJoinLeaveTestHelper.java  |    7 +-
 ...hreadPoolExecutorWithKeepAliveJUnitTest.java |   10 +-
 .../cache/FixedPRSinglehopDUnitTest.java        |   16 +
 .../extension/mock/MockExtensionCommands.java   |   23 +-
 .../internal/cache/ha/Bug48571DUnitTest.java    |   34 +-
 .../cache/wan/AsyncEventQueueTestBase.java      |   12 -
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 1112 +++---
 .../AsyncEventQueueStatsDUnitTest.java          |  186 +-
 .../ConcurrentAsyncEventQueueDUnitTest.java     |  168 +-
 .../CommonParallelAsyncEventQueueDUnitTest.java |    8 +-
 .../ParallelGatewaySenderQueueJUnitTest.java    |    2 +-
 .../gemfire/management/QueryDataDUnitTest.java  |    6 +-
 .../internal/cli/CommandManagerJUnitTest.java   |    8 +
 .../internal/cli/GfshParserJUnitTest.java       |    9 +-
 .../management/internal/cli/HeadlessGfsh.java   |    2 +-
 .../cli/commands/CliCommandTestBase.java        |  134 +-
 .../cli/commands/ConfigCommandsDUnitTest.java   |   26 +-
 ...eateAlterDestroyRegionCommandsDUnitTest.java |   34 +-
 .../cli/commands/DeployCommandsDUnitTest.java   |   14 +-
 .../commands/DiskStoreCommandsDUnitTest.java    |   30 +-
 .../commands/DiskStoreCommandsJUnitTest.java    |    1 +
 .../cli/commands/FunctionCommandsDUnitTest.java |  112 +-
 .../commands/GemfireDataCommandsDUnitTest.java  |   28 +-
 ...WithCacheLoaderDuringCacheMissDUnitTest.java |   15 +-
 .../cli/commands/IndexCommandsDUnitTest.java    |   21 +-
 ...stAndDescribeDiskStoreCommandsDUnitTest.java |   14 +-
 .../ListAndDescribeRegionDUnitTest.java         |   13 +-
 .../cli/commands/ListIndexCommandDUnitTest.java |   14 +-
 .../MiscellaneousCommandsDUnitTest.java         |   40 +-
 ...laneousCommandsExportLogsPart1DUnitTest.java |   15 +-
 ...laneousCommandsExportLogsPart2DUnitTest.java |   16 +-
 ...laneousCommandsExportLogsPart3DUnitTest.java |   24 +-
 ...laneousCommandsExportLogsPart4DUnitTest.java |   15 +-
 .../cli/commands/QueueCommandsDUnitTest.java    |   16 +-
 .../SharedConfigurationCommandsDUnitTest.java   |   16 +-
 .../cli/commands/ShellCommandsDUnitTest.java    |   12 +-
 .../cli/commands/ShowMetricsDUnitTest.java      |   16 +-
 .../cli/commands/ShowStackTraceDUnitTest.java   |   14 +-
 .../cli/commands/ToUpperResultCollector.java    |   65 +
 .../cli/commands/UserCommandsDUnitTest.java     |   16 +-
 .../shell/GfshExecutionStrategyJUnitTest.java   |    8 +-
 .../domain/CacheElementJUnitTest.java           |    1 -
 .../security/AccessControlMBeanJUnitTest.java   |   58 +
 ...rDistributedSystemMXBeanIntegrationTest.java |   50 -
 ...horizeOperationForMBeansIntegrationTest.java |  323 --
 ...erationForRegionCommandsIntegrationTest.java |  136 -
 ...CacheServerMBeanAuthenticationJUnitTest.java |   58 +
 .../CacheServerMBeanAuthorizationJUnitTest.java |   90 +
 .../CacheServerMBeanShiroJUnitTest.java         |   93 +
 .../security/CliCommandsSecurityTest.java       |   83 +
 .../security/DataCommandsSecurityTest.java      |   83 +
 .../DiskStoreMXBeanSecurityJUnitTest.java       |   83 +
 .../security/ExampleJSONAuthorization.java      |  197 +
 .../GatewayReceiverMBeanSecurityTest.java       |   90 +
 .../GatewaySenderMBeanSecurityTest.java         |  105 +
 .../GeodeSecurityUtilCustomRealmJUnitTest.java  |   52 +
 .../GeodeSecurityUtilWithIniFileJUnitTest.java  |  147 +
 .../security/GfshCommandsSecurityTest.java      |  165 +
 .../security/GfshShellConnectionRule.java       |  109 +
 .../security/JMXConnectionConfiguration.java    |   33 +
 .../internal/security/JSONAuthorization.java    |  201 +
 ...JSONAuthorizationDetailsIntegrationTest.java |  163 -
 .../JsonAuthorizationCacheStartRule.java        |   83 +
 .../LockServiceMBeanAuthorizationJUnitTest.java |   90 +
 .../security/MBeanSecurityJUnitTest.java        |  117 +
 .../security/MBeanServerConnectionRule.java     |  130 +
 .../ManagerMBeanAuthorizationJUnitTest.java     |   78 +
 .../security/MemberMBeanSecurityJUnitTest.java  |  110 +
 ...tionCodesForDataCommandsIntegrationTest.java |  101 -
 ...tionCodesForDistributedSystemMXBeanTest.java |   77 -
 .../ResourceOperationContextJUnitTest.java      |   88 +
 .../internal/security/ShiroCacheStartRule.java  |   63 +
 .../internal/security/TestCommand.java          |  227 ++
 .../ReadOpFileAccessControllerJUnitTest.java    |   19 +-
 .../security/ClientAuthorizationDUnitTest.java  |    7 +-
 .../security/ClientAuthorizationTestCase.java   |   20 +-
 .../DeltaClientPostAuthorizationDUnitTest.java  |   21 +-
 .../security/templates/XmlAuthorization.java    |    2 +-
 .../com/gemstone/gemfire/test/dunit/VM.java     |   62 +-
 .../internal/JUnit4DistributedTestCase.java     |    3 -
 .../gemstone/gemfire/util/test/TestUtil.java    |    8 +-
 .../gemfire/codeAnalysis/excludedClasses.txt    |   28 +-
 .../sanctionedDataSerializables.txt             |   92 +-
 .../codeAnalysis/sanctionedSerializables.txt    |   29 +-
 .../management/internal/security/auth1.json     |   28 +-
 .../management/internal/security/auth3.json     |   55 +-
 .../internal/security/cacheServer.json          |  188 +
 .../management/internal/security/shiro-ini.json |   87 +
 .../internal/security/testInheritRole.json      |   40 -
 .../security/testSimpleUserAndRole.json         |   14 -
 .../testUserAndRoleRegionServerGroup.json       |   16 -
 .../internal/security/testUserMultipleRole.json |   20 -
 geode-core/src/test/resources/shiro.ini         |   40 +
 .../cache/query/internal/cq/CqServiceImpl.java  |    2 +-
 .../cli/commands/ClientCommandsDUnitTest.java   |   53 +-
 .../DurableClientCommandsDUnitTest.java         |    2 +-
 .../junit/rules/DescribedExternalResource.java  |   63 +
 geode-lucene/build.gradle                       |    4 -
 .../gemfire/cache/lucene/LuceneIndex.java       |    2 +-
 .../LuceneIndexForReplicatedRegion.java         |    2 +-
 .../cache/lucene/internal/LuceneIndexImpl.java  |   14 +-
 .../lucene/internal/LuceneServiceImpl.java      |   21 +-
 .../internal/distributed/CollectorManager.java  |    3 +-
 .../internal/distributed/LuceneFunction.java    |   31 +-
 .../distributed/TopEntriesCollectorManager.java |    2 +-
 .../TopEntriesFunctionCollector.java            |    9 +-
 .../internal/xml/LuceneIndexCreation.java       |   42 +-
 .../internal/xml/LuceneIndexXmlGenerator.java   |    5 +
 .../lucene/internal/xml/LuceneXmlConstants.java |    1 +
 .../lucene/internal/xml/LuceneXmlParser.java    |   27 +-
 .../geode.apache.org/lucene/lucene-1.0.xsd      |    1 +
 .../gemfire/cache/lucene/LuceneQueriesBase.java |  148 +
 .../lucene/LuceneQueriesIntegrationTest.java    |   88 +
 .../cache/lucene/LuceneQueriesPRBase.java       |   75 +
 .../lucene/LuceneQueriesPeerPRDUnitTest.java    |   36 +
 .../LuceneQueriesPeerPROverflowDUnitTest.java   |   41 +
 .../distributed/LuceneFunctionJUnitTest.java    |  451 +--
 .../LuceneFunctionReadPathDUnitTest.java        |  238 --
 .../TopEntriesFunctionCollectorJUnitTest.java   |   35 +-
 ...neIndexXmlGeneratorIntegrationJUnitTest.java |   10 +-
 ...uceneIndexXmlParserIntegrationJUnitTest.java |   52 +-
 .../xml/LuceneIndexXmlParserJUnitTest.java      |  100 +-
 ...erIntegrationJUnitTest.createIndex.cache.xml |    7 +-
 ...nJUnitTest.parseIndexWithAnalyzers.cache.xml |   36 +
 geode-pulse/build.gradle                        |    1 +
 .../tools/pulse/internal/PulseAppListener.java  |   68 +-
 .../internal/controllers/PulseController.java   |   28 +-
 .../tools/pulse/internal/data/Cluster.java      |   23 +-
 .../pulse/internal/data/JMXDataUpdater.java     |  217 +-
 .../pulse/internal/data/PulseConstants.java     |   15 +-
 .../tools/pulse/internal/data/Repository.java   |   54 +-
 .../pulse/internal/log/PulseLogWriter.java      |    4 -
 .../security/GemFireAuthentication.java         |   91 +
 .../security/GemFireAuthenticationProvider.java |   80 +
 .../pulse/internal/security/LogoutHandler.java  |   55 +
 .../internal/service/ClusterRegionService.java  |   11 -
 .../internal/service/ClusterRegionsService.java |   11 -
 .../service/ClusterSelectedRegionService.java   |    6 -
 geode-pulse/src/main/resources/pulse.properties |   12 +-
 .../src/main/webapp/WEB-INF/spring-security.xml |   59 +-
 .../scripts/pulsescript/PulseCallbacks.js       |    2 -
 .../webapp/scripts/pulsescript/clusterDetail.js |    7 +-
 .../controllers/PulseControllerJUnitTest.java   |   23 +-
 .../tools/pulse/testbed/driver/PulseUITest.java |    2 +-
 .../pulse/tests/DataBrowserResultLoader.java    |   14 +-
 .../tools/pulse/tests/PulseAbstractTest.java    | 1047 +++++
 .../tools/pulse/tests/PulseAuthTest.java        |   33 +
 .../tools/pulse/tests/PulseAutomatedTest.java   |   17 +-
 .../tools/pulse/tests/PulseBaseTest.java        |    4 +-
 .../tools/pulse/tests/PulseNoAuthTest.java      |   33 +
 .../gemfire/tools/pulse/tests/PulseTest.java    | 1039 -----
 .../gemfire/tools/pulse/tests/Region.java       |   11 +-
 .../gemfire/tools/pulse/tests/Server.java       |  156 +-
 geode-pulse/src/test/resources/pulse-auth.json  |   21 +
 geode-pulse/src/test/resources/test.properties  |    6 +-
 geode-rebalancer/build.gradle                   |    7 -
 geode-site/website/content/docs/index.html      |    2 +-
 geode-site/website/content/releases/index.html  |   52 +
 .../cache/wan/GatewaySenderFactoryImpl.java     |    4 -
 .../internal/cache/UpdateVersionDUnitTest.java  |    6 +-
 .../cache/wan/CacheClientNotifierDUnitTest.java |  106 +-
 .../cache/wan/Simple2CacheServerDUnitTest.java  |  157 +
 .../gemfire/internal/cache/wan/WANTestBase.java |   17 +
 .../wan/misc/NewWanAuthenticationDUnitTest.java |  309 ++
 .../serial/SerialWANPropogationDUnitTest.java   |    4 -
 .../wan/wancommand/WANCommandTestBase.java      |  104 +-
 ...anCommandCreateGatewayReceiverDUnitTest.java |  174 +-
 .../WanCommandCreateGatewaySenderDUnitTest.java |  192 +-
 ...WanCommandGatewayReceiverStartDUnitTest.java |  120 +-
 .../WanCommandGatewayReceiverStopDUnitTest.java |  120 +-
 .../WanCommandGatewaySenderStartDUnitTest.java  |  177 +-
 .../WanCommandGatewaySenderStopDUnitTest.java   |  158 +-
 .../wan/wancommand/WanCommandListDUnitTest.java |  135 +-
 .../WanCommandPauseResumeDUnitTest.java         |  370 +-
 .../wancommand/WanCommandStatusDUnitTest.java   |  174 +-
 .../src/main/webapp/WEB-INF/gemfire-servlet.xml |    2 +-
 ...entVariablesHandlerInterceptorJUnitTest.java |  267 --
 .../LoginHandlerInterceptorJUnitTest.java       |  273 ++
 gradle.properties                               |    1 +
 gradle/dependency-versions.properties           |    1 +
 gradle/publish.gradle                           |    4 +-
 gradle/wrapper/gradle-wrapper.jar               |  Bin 53637 -> 53639 bytes
 gradle/wrapper/gradle-wrapper.properties        |    2 +-
 settings.gradle                                 |    1 -
 547 files changed, 13681 insertions(+), 35913 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/internal/filter/SessionReplicationIntegrationJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache/ConnectionPoolDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache/operations/internal/GetOperationContextImplJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
index 1532d2a,2cf8c3c..bc901f4
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
@@@ -16,6 -16,9 +16,8 @@@
   */
  package com.gemstone.gemfire.cache.query.partitioned;
  
+ import static com.gemstone.gemfire.cache.query.Utils.*;
+ 
 -
  import java.util.Collection;
  
  import com.gemstone.gemfire.cache.Cache;
@@@ -33,18 -39,18 +38,7 @@@ import com.gemstone.gemfire.test.dunit.
  import com.gemstone.gemfire.test.dunit.ThreadUtils;
  import com.gemstone.gemfire.test.dunit.VM;
  
--/**
-- * 
-- */
--public class PRBasicIndexCreationDUnitTest extends
--    PartitionedRegionDUnitTestCase
--
--{
--  /**
--   * constructor
--   * 
--   * @param name
--   */
++public class PRBasicIndexCreationDUnitTest extends PartitionedRegionDUnitTestCase {
  
    public PRBasicIndexCreationDUnitTest(String name) {
      super(name);
@@@ -412,42 -400,8 +388,9 @@@
          cnt, cntDest));
      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
          "PrIndexOnStatus", "status",null, ""));
-     /*
-     vm1.invoke(new CacheSerializableRunnable("IndexCreationOnPosition") {
-       public void run2(){
-         try {
-           Cache cache = getCache();
-           QueryService qs = cache.getQueryService();
-           Region region = cache.getRegion(name);
-           LogWriter logger = cache.getLogger();
-          // logger.info("Test Creating index with Name : [ "+indexName+" ] " +
-          //               "IndexedExpression : [ "+indexedExpression+" ] Alias : [ "+alias+" ] FromClause : [ "+region.getFullPath() + " " + alias+" ] " );
-           Index parIndex = qs.createIndex("IndexOnPotionMktValue", IndexType.FUNCTIONAL, "pVal.mktValue"
-               ,region.getFullPath()+" pf, pf.positions pVal TYPE Position", "import parReg.\"query\".Position;");
-           logger.info(
-               "Index creted on partitioned region : " + parIndex);
-           logger.info(
-               "Number of buckets indexed in the partitioned region locally : "
-                   + "" + ((PartitionedIndex)parIndex).getNumberOfIndexedBucket()
-                   + " and remote buckets indexed : "
-                   + ((PartitionedIndex)parIndex).getNumRemoteBucketsIndexed());
-           /*
-            * assertIndexDetailsEquals("Max num of buckets in the partiotion regions and
-            * the " + "buckets indexed should be equal",
-            * ((PartitionedRegion)region).getTotalNumberOfBuckets(),
-            * (((PartionedIndex)parIndex).getNumberOfIndexedBucket()+((PartionedIndex)parIndex).getNumRemtoeBucketsIndexed()));
-            * should put all the assetion in a seperate function.
-            */ 
-        /* } 
-         catch (Exception ex) {
-           fail("Creating Index in this vm failed : ", ex);
-         }
-       
-       }
-     });*/
++
      vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
      vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-     // vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
      vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
    } 
    

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
index 8ce3949,7b93734..fcc5c49
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
@@@ -32,26 -34,26 +34,19 @@@ import com.gemstone.gemfire.test.dunit.
  import com.gemstone.gemfire.test.dunit.SerializableRunnable;
  import com.gemstone.gemfire.test.dunit.VM;
  
--
--public class PRBasicMultiIndexCreationDUnitTest extends
--    PartitionedRegionDUnitTestCase
--
--{
--  /**
--   * constructor
--   * 
--   * @param name
--   */
++public class PRBasicMultiIndexCreationDUnitTest extends PartitionedRegionDUnitTestCase {
  
    public PRBasicMultiIndexCreationDUnitTest(String name) {
      super(name);
    }
 +
-   // int totalNumBuckets = 131;
- 
-   int queryTestCycle = 10;
+   public void setCacheInVMs(VM... vms) {
+     for (VM vm : vms) {
+       vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+     }
+   }
 +
-   PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+   PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
  
    final String name = "PartionedPortfolios";
  

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
index 45d91c9,84ef866..241aeb6
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
@@@ -16,11 -16,12 +16,9 @@@
   */
  package com.gemstone.gemfire.cache.query.partitioned;
  
- import java.io.BufferedWriter;
+ import static org.junit.Assert.*;
+ 
 -import java.io.BufferedWriter;
  import java.io.File;
--import java.io.FileWriter;
--import java.io.IOException;
- import java.io.PrintWriter;
  import java.io.Serializable;
  import java.util.ArrayList;
  import java.util.Collection;
@@@ -31,6 -32,6 +29,8 @@@ import java.util.Random
  import java.util.Set;
  import java.util.concurrent.CountDownLatch;
  
++import util.TestException;
++
  import com.gemstone.gemfire.CancelException;
  import com.gemstone.gemfire.LogWriter;
  import com.gemstone.gemfire.cache.AttributesFactory;
@@@ -85,12 -81,12 +80,10 @@@ import com.gemstone.gemfire.internal.ca
  import com.gemstone.gemfire.test.dunit.Assert;
  import com.gemstone.gemfire.test.dunit.SerializableRunnable;
  import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+ import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+ import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
  import com.gemstone.gemfire.util.test.TestUtil;
  
- import parReg.query.unittest.NewPortfolio;
--import util.TestException;
--
  /**
   * This is a helper class for the various Partitioned Query DUnit Test Cases
   * 
@@@ -1334,44 -1230,8 +1230,43 @@@ public class PRQueryDUnitHelper impleme
        
            Collection indexes = qs.getIndexes();
            Iterator it = indexes.iterator();
-           while(it.hasNext()) {         
-             //logger.info("Following indexes found : " + it.next());
+           while(it.hasNext()) {
              PartitionedIndex ind = (PartitionedIndex)it.next();
 +            /*List bucketIndex = ind.getBucketIndexes();
 +            int k = 0;
 +            logger.info("Total number of bucket index : "+bucketIndex.size());
 +            while ( k < bucketIndex.size() ){
 +              Index bukInd = (Index)bucketIndex.get(k);
 +              logger.info("Buket Index "+bukInd+"  usage : "+bukInd.getStatistics().getTotalUses());
 +              // if number of quries on pr change in getCacheSerializableRunnableForPRQueryAndCompareResults
 +              // literal 6  should change.
 +              //Asif :  With the optmization of Range Queries a where clause
 +              // containing something like ID > 4 AND ID < 9 will be evaluated 
 +              //using a single index lookup, so accordingly modifying the 
 +              //assert value from 7 to 6
 +              // Anil : With aquiringReadLock during Index.getSizeEstimate(), the
 +              // Index usage in case of "ID = 0 OR ID = 1" is increased by 3.
 +              int indexUsageWithSizeEstimation = 3;
 +              int expectedUse = 6;
 +              long indexUse = bukInd.getStatistics().getTotalUses();
 +              // Anil : With chnages to use single index for PR query evaluation, once the index
 +              // is identified the same index is used on other PR buckets, the sieEstimation is
 +              // done only once, which adds additional index use for only one bucket index.
 +              if (!(indexUse == expectedUse || indexUse == (expectedUse + indexUsageWithSizeEstimation))){
 +                fail ("Index usage is not as expected, expected it to be either " + 
 +                    expectedUse + " or " + (expectedUse + indexUsageWithSizeEstimation) + 
 +                    " it is: " + indexUse);
 +                //assertIndexDetailsEquals(6 + indexUsageWithSizeEstimation, bukInd.getStatistics().getTotalUses());
 +              }
 +              k++;
 +            }*/
 +            //Shobhit: Now we dont need to check stats per bucket index,
 +            //stats are accumulated in single pr index stats.
 +            
 +            // Anil : With aquiringReadLock during Index.getSizeEstimate(), the
 +            // Index usage in case of "ID = 0 OR ID = 1" is increased by 3.
 +            int indexUsageWithSizeEstimation = 3;
 +            
              logger.info("index uses for "+ind.getNumberOfIndexedBuckets()+" index "+ind.getName()+": "+ind.getStatistics().getTotalUses());
              assertEquals(6, ind.getStatistics().getTotalUses());
            }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
index 3055228,4652e74..8530c0f
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
@@@ -1611,9 -1539,11 +1539,11 @@@ public class ClientMembershipDUnitTest 
        totalClientCounts += clientCounts[i];
      }
      // this assertion fails because the count is 4
 -    //assertEquals(1, totalClientCounts);
 +    //assertIndexDetailsEquals(1, totalClientCounts);
    }
+ 
    protected static int testGetNotifiedClients_port;
+ 
    private static int getTestGetNotifiedClients_port() {
      return testGetNotifiedClients_port;
    }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionConfigJUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionConfigJUnitTest.java
index cc6310f,31acc47..8e1031f
--- a/geode-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionConfigJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionConfigJUnitTest.java
@@@ -28,42 -29,30 +29,41 @@@ import java.util.List
  import java.util.Map;
  import java.util.Properties;
  
- import org.junit.AfterClass;
 +import org.junit.Before;
- import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
  import com.gemstone.gemfire.InternalGemFireException;
  import com.gemstone.gemfire.UnmodifiableException;
  import com.gemstone.gemfire.internal.ConfigSource;
+ import com.gemstone.gemfire.management.internal.security.JSONAuthorization;
  import com.gemstone.gemfire.test.junit.categories.UnitTest;
  
 -import org.junit.Before;
 -import org.junit.BeforeClass;
 -import org.junit.Test;
 -import org.junit.experimental.categories.Category;
 -
 -
  @Category(UnitTest.class)
  public class DistributionConfigJUnitTest {
 -  static Map<String, ConfigAttribute> attributes;
 -  static Map<String, Method> setters;
 -  static Map<String, Method> getters;
 -  static Map<String, Method> isModifiables;
 -  static Map<String, Method> checkers;
 -  static String[] attNames;
 -  DistributionConfigImpl config;
 -
 -  @BeforeClass
 -  public static void beforeClass() {
 +
 +  private Map<Class<?>, Class<?>> classMap;
 +
 +  private Map<String, ConfigAttribute> attributes;
 +  private Map<String, Method> setters;
 +  private Map<String, Method> getters;
 +  private Map<String, Method> checkers;
 +  private String[] attNames;
 +
 +  private DistributionConfigImpl config;
 +
 +  @Before
 +  public void before() {
 +    classMap = new HashMap<Class<?>, Class<?>>();
 +    classMap.put(boolean.class, Boolean.class);
 +    classMap.put(byte.class, Byte.class);
 +    classMap.put(short.class, Short.class);
 +    classMap.put(char.class, Character.class);
 +    classMap.put(int.class, Integer.class);
 +    classMap.put(long.class, Long.class);
 +    classMap.put(float.class, Float.class);
 +    classMap.put(double.class, Double.class);
 +
      attributes = DistributionConfig.attributes;
      setters = DistributionConfig.setters;
      getters = DistributionConfig.getters;
@@@ -107,10 -99,9 +107,10 @@@
      System.out.println("filelList: " + fileList);
      System.out.println();
      System.out.println("otherList: " + otherList);
 +
      assertEquals(boolList.size(), 30);
      assertEquals(intList.size(), 33);
-     assertEquals(stringList.size(), 69);
+     assertEquals(stringList.size(), 70);
      assertEquals(fileList.size(), 5);
      assertEquals(otherList.size(), 3);
    }
@@@ -309,4 -315,45 +324,32 @@@
      assertTrue(config.isAttributeModifiable(DistributionConfig.HTTP_SERVICE_PORT_NAME));
      assertTrue(config.isAttributeModifiable("jmx-manager-http-port"));
    }
+ 
+ 
+   @Test
+   public void testSecurityProps(){
+     Properties props = new Properties();
+     props.put(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME, JSONAuthorization.class.getName() + ".create");
+     props.put(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME, JSONAuthorization.class.getName() + ".create");
+     props.put(DistributionConfig.SECURITY_LOG_LEVEL_NAME, "config");
+     // add another non-security property to verify it won't get put in the security properties
+     props.put(DistributionConfig.ACK_WAIT_THRESHOLD_NAME, 2);
+ 
+     DistributionConfig config = new DistributionConfigImpl(props);
+     assertEquals(config.getSecurityProps().size(), 3);
+   }
+ 
+   @Test
+   public void testSecurityPropsWithNoSetter(){
+     Properties props = new Properties();
+     props.put(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME, JSONAuthorization.class.getName() + ".create");
+     props.put(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME, JSONAuthorization.class.getName() + ".create");
+     props.put(DistributionConfig.SECURITY_LOG_LEVEL_NAME, "config");
+     // add another non-security property to verify it won't get put in the security properties
+     props.put(DistributionConfig.ACK_WAIT_THRESHOLD_NAME, 2);
+     props.put("security-username", "testName");
+ 
+     DistributionConfig config = new DistributionConfigImpl(props);
+     assertEquals(config.getSecurityProps().size(), 4);
+   }
 -
 -  public final static Map<Class<?>, Class<?>> classMap = new HashMap<Class<?>, Class<?>>();
 -
 -  static {
 -    classMap.put(boolean.class, Boolean.class);
 -    classMap.put(byte.class, Byte.class);
 -    classMap.put(short.class, Short.class);
 -    classMap.put(char.class, Character.class);
 -    classMap.put(int.class, Integer.class);
 -    classMap.put(long.class, Long.class);
 -    classMap.put(float.class, Float.class);
 -    classMap.put(double.class, Double.class);
 -  }
  }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CommandManagerJUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CommandManagerJUnitTest.java
index fb9450b,66decd6..f026a52
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CommandManagerJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CommandManagerJUnitTest.java
@@@ -16,14 -16,16 +16,15 @@@
   */
  package com.gemstone.gemfire.management.internal.cli;
  
 -import com.gemstone.gemfire.management.cli.CliMetaData;
 -import com.gemstone.gemfire.management.cli.ConverterHint;
 -import com.gemstone.gemfire.management.cli.Result;
 -import com.gemstone.gemfire.management.internal.cli.annotation.CliArgument;
 -import com.gemstone.gemfire.management.internal.cli.parser.Argument;
 -import com.gemstone.gemfire.management.internal.cli.parser.AvailabilityTarget;
 -import com.gemstone.gemfire.management.internal.cli.parser.CommandTarget;
 -import com.gemstone.gemfire.management.internal.cli.parser.Option;
 -import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 -import com.gemstone.gemfire.test.junit.categories.UnitTest;
++import static com.gemstone.gemfire.cache.operations.OperationContext.*;
 +import static org.junit.Assert.*;
 +
 +import java.lang.annotation.Annotation;
 +import java.lang.reflect.Method;
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +
  import org.junit.After;
  import org.junit.Test;
  import org.junit.experimental.categories.Category;
@@@ -35,15 -37,15 +36,16 @@@ import org.springframework.shell.core.a
  import org.springframework.shell.core.annotation.CliCommand;
  import org.springframework.shell.core.annotation.CliOption;
  
 -import java.lang.annotation.Annotation;
 -import java.lang.reflect.Method;
 -import java.util.ArrayList;
 -import java.util.List;
 -import java.util.Map;
 -
 -import static com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 -import static com.gemstone.gemfire.cache.operations.OperationContext.Resource;
 -import static org.junit.Assert.*;
 +import com.gemstone.gemfire.management.cli.CliMetaData;
 +import com.gemstone.gemfire.management.cli.ConverterHint;
 +import com.gemstone.gemfire.management.cli.Result;
 +import com.gemstone.gemfire.management.internal.cli.annotation.CliArgument;
 +import com.gemstone.gemfire.management.internal.cli.parser.Argument;
 +import com.gemstone.gemfire.management.internal.cli.parser.AvailabilityTarget;
 +import com.gemstone.gemfire.management.internal.cli.parser.CommandTarget;
 +import com.gemstone.gemfire.management.internal.cli.parser.Option;
++import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 +import com.gemstone.gemfire.test.junit.categories.UnitTest;
  
  /**
   * CommandManagerTest - Includes tests to check the CommandManager functions
@@@ -230,13 -222,12 +232,14 @@@ public class CommandManagerJUnitTest 
      assertTrue("Should not find unlisted plugin.", !commandManager.getCommands().containsKey("mock plugin command unlisted"));
    }
  
 +  /**
 +   * class that represents dummy commands
 +   */
 +  public static class Commands implements CommandMarker {
  
 -  // class that represents dummy commands
 -  static public class Commands implements CommandMarker {
      @CliCommand(value = { COMMAND1_NAME, COMMAND1_NAME_ALIAS }, help = COMMAND1_HELP)
      @CliMetaData(shellOnly = true, relatedTopic = { "relatedTopicOfCommand1" })
+     @ResourceOperation(resource = Resource.CLUSTER, operation = OperationCode.READ)
      public static String command1(
          @CliArgument(name = ARGUMENT1_NAME, argumentContext = ARGUMENT1_CONTEXT, help = ARGUMENT1_HELP, mandatory = true)
          String argument1,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
index 468039f,d51df2a..c94d8d5
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
@@@ -16,12 -16,18 +16,13 @@@
   */
  package com.gemstone.gemfire.management.internal.cli;
  
 -import com.gemstone.gemfire.management.cli.CliMetaData;
 -import com.gemstone.gemfire.management.cli.CommandProcessingException;
 -import com.gemstone.gemfire.management.cli.ConverterHint;
 -import com.gemstone.gemfire.management.cli.Result;
 -import com.gemstone.gemfire.management.internal.cli.annotation.CliArgument;
 -import com.gemstone.gemfire.management.internal.cli.converters.StringArrayConverter;
 -import com.gemstone.gemfire.management.internal.cli.converters.StringListConverter;
 -import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 -import com.gemstone.gemfire.management.internal.cli.parser.SyntaxConstants;
 -import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 -import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 -import com.gemstone.gemfire.test.junit.categories.UnitTest;
++import static com.gemstone.gemfire.cache.operations.OperationContext.*;
 +import static org.junit.Assert.*;
 +
 +import java.lang.reflect.Method;
 +import java.util.ArrayList;
 +import java.util.List;
 +
  import org.junit.After;
  import org.junit.Before;
  import org.junit.Test;
@@@ -36,17 -42,13 +37,18 @@@ import org.springframework.shell.core.a
  import org.springframework.shell.core.annotation.CliOption;
  import org.springframework.shell.event.ParseResult;
  
 -import java.lang.reflect.Method;
 -import java.util.ArrayList;
 -import java.util.List;
 -
 -import static com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 -import static com.gemstone.gemfire.cache.operations.OperationContext.Resource;
 -import static org.junit.Assert.*;
 +import com.gemstone.gemfire.management.cli.CliMetaData;
 +import com.gemstone.gemfire.management.cli.CommandProcessingException;
 +import com.gemstone.gemfire.management.cli.ConverterHint;
 +import com.gemstone.gemfire.management.cli.Result;
 +import com.gemstone.gemfire.management.internal.cli.annotation.CliArgument;
 +import com.gemstone.gemfire.management.internal.cli.converters.StringArrayConverter;
 +import com.gemstone.gemfire.management.internal.cli.converters.StringListConverter;
 +import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 +import com.gemstone.gemfire.management.internal.cli.parser.SyntaxConstants;
 +import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
++import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 +import com.gemstone.gemfire.test.junit.categories.UnitTest;
  
  /**
   * GfshParserJUnitTest - Includes tests to check the parsing and auto-completion

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/shell/GfshExecutionStrategyJUnitTest.java
----------------------------------------------------------------------
diff --cc geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/shell/GfshExecutionStrategyJUnitTest.java
index dde1ba9,4579178..088a6a1
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/shell/GfshExecutionStrategyJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/shell/GfshExecutionStrategyJUnitTest.java
@@@ -16,18 -16,6 +16,19 @@@
   */
  package com.gemstone.gemfire.management.internal.cli.shell;
  
++import static com.gemstone.gemfire.cache.operations.OperationContext.*;
 +import static org.junit.Assert.*;
 +
 +import java.util.List;
 +
 +import org.junit.After;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +import org.springframework.shell.core.CommandMarker;
 +import org.springframework.shell.core.annotation.CliCommand;
 +import org.springframework.shell.core.annotation.CliOption;
 +import org.springframework.shell.event.ParseResult;
 +
  import com.gemstone.gemfire.management.cli.CliMetaData;
  import com.gemstone.gemfire.management.cli.ConverterHint;
  import com.gemstone.gemfire.management.cli.Result;
@@@ -35,7 -23,22 +36,8 @@@ import com.gemstone.gemfire.management.
  import com.gemstone.gemfire.management.internal.cli.GfshParser;
  import com.gemstone.gemfire.management.internal.cli.annotation.CliArgument;
  import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperation;
  import com.gemstone.gemfire.test.junit.categories.UnitTest;
 -import org.junit.After;
 -import org.junit.Test;
 -import org.junit.experimental.categories.Category;
 -import org.springframework.shell.core.CommandMarker;
 -import org.springframework.shell.core.annotation.CliCommand;
 -import org.springframework.shell.core.annotation.CliOption;
 -import org.springframework.shell.event.ParseResult;
 -
 -import java.util.List;
 -
 -import static com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 -import static com.gemstone.gemfire.cache.operations.OperationContext.Resource;
 -import static org.junit.Assert.assertNotNull;
 -import static org.junit.Assert.assertTrue;
  
  /**
   * GfshExecutionStrategyTest - Includes tests to for GfshExecutionStrategyTest

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTestCase.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
----------------------------------------------------------------------
diff --cc geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
index 90bbd04,be2fa62..acb6c2d
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
@@@ -31,11 -30,9 +31,14 @@@ import java.io.File
  import java.security.Principal;
  import java.util.ArrayList;
  import java.util.HashMap;
 +import java.util.UUID;
 +import javax.servlet.ServletContextListener;
 +
 +import com.fasterxml.jackson.databind.ObjectMapper;
 +import com.vmware.gemfire.tools.pulse.internal.PulseAppListener;
+ 
+ import com.fasterxml.jackson.databind.ObjectMapper;
+ import com.gemstone.gemfire.test.junit.categories.UnitTest;
  import com.vmware.gemfire.tools.pulse.internal.controllers.PulseController;
  import com.vmware.gemfire.tools.pulse.internal.data.Cluster;
  import com.vmware.gemfire.tools.pulse.internal.data.PulseConfig;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/testbed/driver/PulseUITest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/wancommand/WanCommandStatusDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
----------------------------------------------------------------------
diff --cc geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
index 0000000,ef405db..f9d9d35
mode 000000,100644..100644
--- a/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
+++ b/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
@@@ -1,0 -1,274 +1,273 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management.internal.web.controllers.support;
+ 
+ import static org.junit.Assert.*;
+ 
+ import java.util.Enumeration;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.Map;
+ import javax.servlet.http.HttpServletRequest;
+ 
 -import com.gemstone.gemfire.test.junit.categories.UnitTest;
 -
+ import edu.umd.cs.mtc.MultithreadedTestCase;
+ import edu.umd.cs.mtc.TestFramework;
 -
+ import org.jmock.Expectations;
+ import org.jmock.Mockery;
+ import org.jmock.lib.concurrent.Synchroniser;
+ import org.jmock.lib.legacy.ClassImposteriser;
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
++import com.gemstone.gemfire.test.junit.categories.UnitTest;
++
+ /**
+  * The LoginHandlerInterceptorJUnitTest class is a test suite of test cases to test the contract
+  * and functionality of the Spring HandlerInterceptor, LoginHandlerInterceptor class.
+  * 
+  * @see org.jmock.Mockery
+  * @see org.junit.Assert
+  * @see org.junit.Test
+  * @since 8.0
+  */
+ @Category(UnitTest.class)
+ public class LoginHandlerInterceptorJUnitTest {
+ 
+   private Mockery mockContext;
+ 
+   @Before
+   public void setUp() {
+     mockContext = new Mockery();
+     mockContext.setImposteriser(ClassImposteriser.INSTANCE);
+     mockContext.setThreadingPolicy(new Synchroniser());
+   }
+ 
+   @After
+   public void tearDown() {
+     mockContext.assertIsSatisfied();
+     mockContext = null;
+   }
+ 
 -  protected String createEnvironmentVariable(final String name) {
++  private String createEnvironmentVariable(final String name) {
+     return (LoginHandlerInterceptor.ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX + name);
+   }
+ 
 -  protected <T> Enumeration<T> enumeration(final Iterator<T> iterator) {
++  private <T> Enumeration<T> enumeration(final Iterator<T> iterator) {
+     return new Enumeration<T>() {
+       public boolean hasMoreElements() {
+         return iterator.hasNext();
+       }
+       public T nextElement() {
+         return iterator.next();
+       }
+     };
+   }
+ 
+   @Test
+   public void testPreHandleAfterCompletion() throws Exception {
+     final Map<String, String> requestParameters = new HashMap<>(2);
+     final Map<String, String> requestHeaders = new HashMap<>();
+ 
+     requestParameters.put("parameter", "one");
+     requestParameters.put(createEnvironmentVariable("variable"), "two");
+ 
+     final HttpServletRequest mockHttpRequest = mockContext.mock(HttpServletRequest.class, "testPreHandleAfterCompletion.HttpServletRequest");
+ 
+     mockContext.checking(new Expectations() {{
+       oneOf(mockHttpRequest).getParameterNames();
+       will(returnValue(enumeration(requestParameters.keySet().iterator())));
+       oneOf(mockHttpRequest).getHeaderNames();
+       will(returnValue(enumeration(requestHeaders.keySet().iterator())));
+       oneOf(mockHttpRequest).getParameter(with(equal(createEnvironmentVariable("variable"))));
+       will(returnValue(requestParameters.get(createEnvironmentVariable("variable"))));
+     }});
+ 
+     LoginHandlerInterceptor handlerInterceptor = new LoginHandlerInterceptor();
+ 
+     Map<String, String> envBefore = LoginHandlerInterceptor.getEnvironment();
+ 
+     assertNotNull(envBefore);
+     assertTrue(envBefore.isEmpty());
+     assertTrue(handlerInterceptor.preHandle(mockHttpRequest, null, null));
+ 
+     Map<String, String> envSet = LoginHandlerInterceptor.getEnvironment();
+ 
+     assertNotNull(envSet);
+     assertNotSame(envBefore, envSet);
+     assertEquals(1, envSet.size());
+     assertTrue(envSet.containsKey("variable"));
+     assertEquals("two", envSet.get("variable"));
+ 
+     handlerInterceptor.afterCompletion(mockHttpRequest, null, null, null);
+ 
+     Map<String, String> envAfter = LoginHandlerInterceptor.getEnvironment();
+ 
+     assertNotNull(envAfter);
+     assertTrue(envAfter.isEmpty());
+   }
+ 
+   @Test
+   public void testHandlerInterceptorThreadSafety() throws Throwable {
+     TestFramework.runOnce(new HandlerInterceptorThreadSafetyMultiThreadedTestCase());
+   }
+ 
 -  protected final class HandlerInterceptorThreadSafetyMultiThreadedTestCase extends MultithreadedTestCase {
++  private final class HandlerInterceptorThreadSafetyMultiThreadedTestCase extends MultithreadedTestCase {
+ 
+     private LoginHandlerInterceptor handlerInterceptor;
+ 
+     private HttpServletRequest mockHttpRequestOne;
+     private HttpServletRequest mockHttpRequestTwo;
+ 
+     @Override
+     public void initialize() {
+       super.initialize();
+ 
+       final Map<String, String> requestParametersOne = new HashMap<>(3);
+       final Map<String, String> requestHeaders = new HashMap<>();
+ 
+       requestParametersOne.put("param", "one");
+       requestParametersOne.put(createEnvironmentVariable("STAGE"), "test");
+       requestParametersOne.put(createEnvironmentVariable("GEMFIRE"), "/path/to/gemfire/700");
+ 
+       mockHttpRequestOne = mockContext.mock(HttpServletRequest.class, "testHandlerInterceptorThreadSafety.HttpServletRequest.1");
+ 
+       mockContext.checking(new Expectations() {{
+         oneOf(mockHttpRequestOne).getParameterNames();
+         will(returnValue(enumeration(requestParametersOne.keySet().iterator())));
+         oneOf(mockHttpRequestOne).getHeaderNames();
+         will(returnValue(enumeration(requestHeaders.keySet().iterator())));
+         oneOf(mockHttpRequestOne).getParameter(with(equal(createEnvironmentVariable("STAGE"))));
+         will(returnValue(requestParametersOne.get(createEnvironmentVariable("STAGE"))));
+         oneOf(mockHttpRequestOne).getParameter(with(equal(createEnvironmentVariable("GEMFIRE"))));
+         will(returnValue(requestParametersOne.get(createEnvironmentVariable("GEMFIRE"))));
+       }});
+ 
+       mockHttpRequestTwo = mockContext.mock(HttpServletRequest.class, "testHandlerInterceptorThreadSafety.HttpServletRequest.2");
+ 
+       final Map<String, String> requestParametersTwo = new HashMap<>(3);
+ 
+       requestParametersTwo.put("parameter", "two");
+       requestParametersTwo.put(createEnvironmentVariable("HOST"), "localhost");
+       requestParametersTwo.put(createEnvironmentVariable("GEMFIRE"), "/path/to/gemfire/75");
+ 
+       mockContext.checking(new Expectations() {{
+         oneOf(mockHttpRequestTwo).getParameterNames();
+         will(returnValue(enumeration(requestParametersTwo.keySet().iterator())));
+         oneOf(mockHttpRequestTwo).getHeaderNames();
+         will(returnValue(enumeration(requestHeaders.keySet().iterator())));
+         oneOf(mockHttpRequestTwo).getParameter(with(equal(createEnvironmentVariable("HOST"))));
+         will(returnValue(requestParametersTwo.get(createEnvironmentVariable("HOST"))));
+         oneOf(mockHttpRequestTwo).getParameter(with(equal(createEnvironmentVariable("GEMFIRE"))));
+         will(returnValue(requestParametersTwo.get(createEnvironmentVariable("GEMFIRE"))));
+       }});
+ 
+       handlerInterceptor =  new LoginHandlerInterceptor();
+     }
+ 
+     public void thread1() throws Exception {
+       assertTick(0);
+       Thread.currentThread().setName("HTTP Request Processing Thread 1");
+ 
+       Map<String, String> env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertTrue(env.isEmpty());
+       assertTrue(handlerInterceptor.preHandle(mockHttpRequestOne, null, null));
+ 
+       env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertEquals(2, env.size());
+       assertFalse(env.containsKey("param"));
+       assertFalse(env.containsKey("parameter"));
+       assertFalse(env.containsKey("HOST"));
+       assertEquals("test", env.get("STAGE"));
+       assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
+ 
+       waitForTick(2);
+ 
+       env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertEquals(2, env.size());
+       assertFalse(env.containsKey("param"));
+       assertFalse(env.containsKey("parameter"));
+       assertFalse(env.containsKey("HOST"));
+       assertEquals("test", env.get("STAGE"));
+       assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
+ 
+       waitForTick(4);
+ 
+       env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertEquals(2, env.size());
+       assertFalse(env.containsKey("param"));
+       assertFalse(env.containsKey("parameter"));
+       assertFalse(env.containsKey("HOST"));
+       assertEquals("test", env.get("STAGE"));
+       assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
+ 
+       handlerInterceptor.afterCompletion(mockHttpRequestOne, null, null, null);
+ 
+       env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertTrue(env.isEmpty());
+     }
+ 
+     public void thread2() throws Exception {
+       assertTick(0);
+       Thread.currentThread().setName("HTTP Request Processing Thread 2");
+       waitForTick(1);
+ 
+       Map<String, String> env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertTrue(env.isEmpty());
+       assertTrue(handlerInterceptor.preHandle(mockHttpRequestTwo, null, null));
+ 
+       env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertEquals(2, env.size());
+       assertFalse(env.containsKey("parameter"));
+       assertFalse(env.containsKey("param"));
+       assertFalse(env.containsKey("STAGE"));
+       assertEquals("localhost", env.get("HOST"));
+       assertEquals("/path/to/gemfire/75", env.get("GEMFIRE"));
+ 
+       waitForTick(3);
+ 
+       handlerInterceptor.afterCompletion(mockHttpRequestTwo, null, null, null);
+ 
+       env = LoginHandlerInterceptor.getEnvironment();
+ 
+       assertNotNull(env);
+       assertTrue(env.isEmpty());
+     }
+ 
+     @Override
+     public void finish() {
+       super.finish();
+       handlerInterceptor = null;
+     }
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9bdd0d59/gradle/dependency-versions.properties
----------------------------------------------------------------------
diff --cc gradle/dependency-versions.properties
index fe6fd91,b3e004e..f262a0c
--- a/gradle/dependency-versions.properties
+++ b/gradle/dependency-versions.properties
@@@ -100,5 -100,6 +100,6 @@@ tempus-fugit.version = 1.
  tomcat6.version = 6.0.37
  tomcat7.version = 7.0.30
  mortbay-jetty-servlet-api.version=2.5-20081211
 -selenium.version=2.52.0
 +selenium.version=2.53.0
  google-gson.version=2.3.1
+ shiro.version=1.2.4


[28/63] [abbrv] incubator-geode git commit: GEODE-1262: Removed VM5-VM7 in AsyncEventQueueTestBase

Posted by kl...@apache.org.
GEODE-1262: Removed VM5-VM7 in AsyncEventQueueTestBase

* Refactored all the test cases to use lower numbered VMs
* VM5-VM7 were unused hence removed from AsyncEventQueueTestBase

This closes #137


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ecbbf766
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ecbbf766
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ecbbf766

Branch: refs/heads/feature/GEODE-1276
Commit: ecbbf7660f35e563c75f2c9f3364c26a581cf636
Parents: 20117a8
Author: nabarun <nn...@pivotal.io>
Authored: Wed Apr 20 13:50:48 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 16:48:21 2016 -0700

----------------------------------------------------------------------
 .../cache/wan/AsyncEventQueueTestBase.java      |   12 -
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 1112 +++++++++---------
 .../AsyncEventQueueStatsDUnitTest.java          |  186 +--
 .../ConcurrentAsyncEventQueueDUnitTest.java     |  168 +--
 .../CommonParallelAsyncEventQueueDUnitTest.java |    8 +-
 5 files changed, 732 insertions(+), 754 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ecbbf766/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
index c719538..7da8d90 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
@@ -101,12 +101,6 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
   protected static VM vm4;
 
-  protected static VM vm5;
-
-  protected static VM vm6;
-
-  protected static VM vm7;
-
   protected static AsyncEventListener eventListener1;
 
   private static final long MAX_WAIT = 10000;
@@ -134,9 +128,6 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     vm2 = host.getVM(2);
     vm3 = host.getVM(3);
     vm4 = host.getVM(4);
-    vm5 = host.getVM(5);
-    vm6 = host.getVM(6);
-    vm7 = host.getVM(7);
   }
 
   @Override
@@ -1549,9 +1540,6 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     vm2.invoke(() -> AsyncEventQueueTestBase.cleanupVM());
     vm3.invoke(() -> AsyncEventQueueTestBase.cleanupVM());
     vm4.invoke(() -> AsyncEventQueueTestBase.cleanupVM());
-    vm5.invoke(() -> AsyncEventQueueTestBase.cleanupVM());
-    vm6.invoke(() -> AsyncEventQueueTestBase.cleanupVM());
-    vm7.invoke(() -> AsyncEventQueueTestBase.cleanupVM());
   }
 
   public static void cleanupVM() throws IOException {


[61/63] [abbrv] incubator-geode git commit: GEODE-11: Fixing a class cast exception when LuceneFunction has an error

Posted by kl...@apache.org.
GEODE-11: Fixing a class cast exception when LuceneFunction has an error

LuceneFunction was using sendException to return exceptions to caller.
But the behavior of sendException is actually to pass the exception to
the addResult method, which is not what we want in this case.

Adding an integration test of the same. Changing LuceneFunctionJUnitTest
to use mockito and changing the expectations of what LuceneFunction
will do after an exception.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b3ef7913
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b3ef7913
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b3ef7913

Branch: refs/heads/feature/GEODE-1276
Commit: b3ef791346b248ea81b4da989ba7759c75c7d92d
Parents: 0481732
Author: Dan Smith <up...@apache.org>
Authored: Mon May 2 14:54:07 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Tue May 3 16:41:37 2016 -0700

----------------------------------------------------------------------
 .../internal/distributed/CollectorManager.java  |   3 +-
 .../internal/distributed/LuceneFunction.java    |  31 +-
 .../distributed/TopEntriesCollectorManager.java |   2 +-
 .../TopEntriesFunctionCollector.java            |   9 +-
 .../lucene/LuceneQueriesIntegrationTest.java    |  88 ++++
 .../distributed/LuceneFunctionJUnitTest.java    | 406 ++++++-------------
 .../TopEntriesFunctionCollectorJUnitTest.java   |   4 +-
 7 files changed, 236 insertions(+), 307 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/CollectorManager.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/CollectorManager.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/CollectorManager.java
index 45750d1..4d1d1c2 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/CollectorManager.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/CollectorManager.java
@@ -49,7 +49,6 @@ public interface CollectorManager<C extends IndexResultCollector> {
    * Reduce the results of individual collectors into a meaningful result. This method must be called after collection
    * is finished on all provided collectors.
    * 
-   * @throws IOException
    */
-  C reduce(Collection<C> results) throws IOException;
+  C reduce(Collection<C> results);
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunction.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunction.java
index 199b698..9567305 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunction.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunction.java
@@ -29,6 +29,7 @@ import org.apache.lucene.search.Query;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.execute.FunctionAdapter;
 import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.FunctionException;
 import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
 import com.gemstone.gemfire.cache.execute.ResultSender;
 import com.gemstone.gemfire.cache.lucene.LuceneQueryProvider;
@@ -41,6 +42,7 @@ import com.gemstone.gemfire.cache.lucene.internal.repository.RepositoryManager;
 import com.gemstone.gemfire.cache.query.QueryException;
 import com.gemstone.gemfire.internal.InternalEntity;
 import com.gemstone.gemfire.internal.cache.BucketNotFoundException;
+import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
 import com.gemstone.gemfire.internal.logging.LogService;
 
 /**
@@ -63,14 +65,12 @@ public class LuceneFunction extends FunctionAdapter implements InternalEntity {
 
     LuceneFunctionContext<IndexResultCollector> searchContext = (LuceneFunctionContext) ctx.getArguments();
     if (searchContext == null) {
-      resultSender.sendException(new IllegalArgumentException("Missing search context"));
-      return;
+      throw new IllegalArgumentException("Missing search context");
     }
 
     LuceneQueryProvider queryProvider = searchContext.getQueryProvider();
     if (queryProvider == null) {
-      resultSender.sendException(new IllegalArgumentException("Missing query provider"));
-      return;
+      throw new IllegalArgumentException("Missing query provider");
     }
     
     LuceneService service = LuceneServiceProvider.get(region.getCache());
@@ -81,8 +81,8 @@ public class LuceneFunction extends FunctionAdapter implements InternalEntity {
     try {
       query = queryProvider.getQuery(index);
     } catch (QueryException e) {
-      resultSender.sendException(e);
-      return;
+      logger.warn("", e);
+      throw new FunctionException(e);
     }
 
     if (logger.isDebugEnabled()) {
@@ -104,24 +104,11 @@ public class LuceneFunction extends FunctionAdapter implements InternalEntity {
         repo.query(query, resultLimit, collector);
         results.add(collector);
       }
-    } catch (IOException e) {
-      logger.warn("", e);
-      resultSender.sendException(e);
-      return;
-    } catch (BucketNotFoundException e) {
-      logger.warn("", e);
-      resultSender.sendException(e);
-      return;
-    }
-
-    TopEntriesCollector mergedResult;
-    try {
-      mergedResult = (TopEntriesCollector) manager.reduce(results);
+      TopEntriesCollector mergedResult = (TopEntriesCollector) manager.reduce(results);
       resultSender.lastResult(mergedResult);
-    } catch (IOException e) {
+    } catch (IOException|BucketNotFoundException e) {
       logger.warn("", e);
-      resultSender.sendException(e);
-      return;
+      throw new FunctionException(e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesCollectorManager.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesCollectorManager.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesCollectorManager.java
index b19e104..cf6e420 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesCollectorManager.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesCollectorManager.java
@@ -68,7 +68,7 @@ public class TopEntriesCollectorManager implements CollectorManager<TopEntriesCo
   }
 
   @Override
-  public TopEntriesCollector reduce(Collection<TopEntriesCollector> collectors) throws IOException {
+  public TopEntriesCollector reduce(Collection<TopEntriesCollector> collectors) {
     TopEntriesCollector mergedResult = new TopEntriesCollector(id, limit);
     if (collectors.isEmpty()) {
       return mergedResult;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollector.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollector.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollector.java
index 2e8f2dc..4a99bf8 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollector.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollector.java
@@ -123,13 +123,8 @@ public class TopEntriesFunctionCollector implements ResultCollector<TopEntriesCo
         return mergedResults.getEntries();
       }
       
-      try {
-        mergedResults = manager.reduce(subResults);
-        return mergedResults.getEntries();
-      } catch (IOException e) {
-        logger.debug("Error while merging function execution results", e);
-        throw new FunctionException(e);
-      }
+      mergedResults = manager.reduce(subResults);
+      return mergedResults.getEntries();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesIntegrationTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesIntegrationTest.java
new file mode 100644
index 0000000..9009e3d
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesIntegrationTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.lucene;
+
+import static org.hamcrest.Matchers.isA;
+import static org.junit.Assert.*;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionException;
+import com.gemstone.gemfire.cache.query.QueryException;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.ExpectedException;
+
+/**
+ * This class contains tests of lucene queries that can fit
+ */
+@Category(IntegrationTest.class)
+public class LuceneQueriesIntegrationTest {
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+  private static final String INDEX_NAME = "index";
+  protected static final String REGION_NAME = "index";
+  Cache cache;
+
+  @Before
+  public void createCache() {
+    cache = new CacheFactory()
+      .set("mcast-port", "0")
+      .set("locators", "")
+      .set("log-level", "warning").create();
+  }
+
+  @After
+  public void closeCache() {
+    cache.close();
+  }
+
+  @Test()
+  public void throwFunctionExceptionWhenGivenBadQuery() {
+    LuceneService luceneService = LuceneServiceProvider.get(cache);
+    luceneService.createIndex(INDEX_NAME, REGION_NAME, "text");
+    Region region = cache.createRegionFactory(RegionShortcut.PARTITION)
+      .create(REGION_NAME);
+
+    //Create a query that throws an exception
+    final LuceneQuery<Object, Object> query = luceneService.createLuceneQueryFactory().create(INDEX_NAME, REGION_NAME,
+      index -> {
+        throw new QueryException("Bad query");
+      });
+
+
+    thrown.expect(FunctionException.class);
+    thrown.expectCause(isA(QueryException.class));
+    try {
+      query.search();
+    } catch(FunctionException e) {
+      assertEquals(QueryException.class, e.getCause().getClass());
+      throw e;
+    }
+
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionJUnitTest.java
index 750ec0f..70ec434 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionJUnitTest.java
@@ -19,27 +19,16 @@
 
 package com.gemstone.gemfire.cache.lucene.internal.distributed;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.search.Query;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.api.Invocation;
-import org.jmock.lib.action.CustomAction;
-import org.jmock.lib.concurrent.Synchroniser;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.execute.FunctionException;
 import com.gemstone.gemfire.cache.execute.ResultSender;
 import com.gemstone.gemfire.cache.lucene.LuceneQueryFactory;
 import com.gemstone.gemfire.cache.lucene.LuceneQueryProvider;
@@ -55,9 +44,14 @@ import com.gemstone.gemfire.internal.cache.InternalCache;
 import com.gemstone.gemfire.internal.cache.execute.InternalRegionFunctionContext;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
+import org.apache.lucene.search.Query;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.ArgumentCaptor;
+
 @Category(UnitTest.class)
 public class LuceneFunctionJUnitTest {
-  Mockery mocker;
 
   String regionPath = "/region";
   String indexName = "index";
@@ -87,275 +81,162 @@ public class LuceneFunctionJUnitTest {
 
   @Test
   public void testRepoQueryAndMerge() throws Exception {
-    final AtomicReference<TopEntriesCollector> result = new AtomicReference<>();
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-
-        oneOf(mockRepoManager).getRepositories(mockContext);
-        will(returnValue(repos));
-
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-
-        oneOf(mockRepository1).query(with(query), with(equal(LuceneQueryFactory.DEFAULT_LIMIT)), with(any(IndexResultCollector.class)));
-        will(new CustomAction("streamSearchResults") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            IndexResultCollector collector = (IndexResultCollector) invocation.getParameter(2);
-            collector.collect(r1_1.getKey(), r1_1.getScore());
-            collector.collect(r1_2.getKey(), r1_2.getScore());
-            collector.collect(r1_3.getKey(), r1_3.getScore());
-            return null;
-          }
-        });
-
-        oneOf(mockRepository2).query(with(query), with(equal(LuceneQueryFactory.DEFAULT_LIMIT)), with(any(IndexResultCollector.class)));
-        will(new CustomAction("streamSearchResults") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            IndexResultCollector collector = (IndexResultCollector) invocation.getParameter(2);
-            collector.collect(r2_1.getKey(), r2_1.getScore());
-            collector.collect(r2_2.getKey(), r2_2.getScore());
-            return null;
-          }
-        });
-
-        oneOf(mockResultSender).lastResult(with(any(TopEntriesCollector.class)));
-        will(new CustomAction("collectResult") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            result.set((TopEntriesCollector) invocation.getParameter(0));
-            return null;
-          }
-        });
-      }
-    });
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    when(mockRepoManager.getRepositories(eq(mockContext))).thenReturn(repos);
+    doAnswer(invocation -> {
+      IndexResultCollector collector = invocation.getArgumentAt(2, IndexResultCollector.class);
+      collector.collect(r1_1.getKey(), r1_1.getScore());
+      collector.collect(r1_2.getKey(), r1_2.getScore());
+      collector.collect(r1_3.getKey(), r1_3.getScore());
+      return null;
+    }).when(mockRepository1).query(eq(query), eq(LuceneQueryFactory.DEFAULT_LIMIT), any(IndexResultCollector.class));
+
+    doAnswer(invocation -> {
+      IndexResultCollector collector = invocation.getArgumentAt(2, IndexResultCollector.class);
+      collector.collect(r2_1.getKey(), r2_1.getScore());
+      collector.collect(r2_2.getKey(), r2_2.getScore());
+      return null;
+    }).when(mockRepository2).query(eq(query), eq(LuceneQueryFactory.DEFAULT_LIMIT), any(IndexResultCollector.class));
 
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
-    List<EntryScore> hits = result.get().getEntries().getHits();
+
+    ArgumentCaptor<TopEntriesCollector> resultCaptor  = ArgumentCaptor.forClass(TopEntriesCollector.class);
+    verify(mockResultSender).lastResult(resultCaptor.capture());
+    TopEntriesCollector result = resultCaptor.getValue();
+
+
+    List<EntryScore> hits = result.getEntries().getHits();
     assertEquals(5, hits.size());
-    TopEntriesJUnitTest.verifyResultOrder(result.get().getEntries().getHits(), r1_1, r2_1, r1_2, r2_2, r1_3);
+    TopEntriesJUnitTest.verifyResultOrder(result.getEntries().getHits(), r1_1, r2_1, r1_2, r2_2, r1_3);
   }
 
   @Test
   public void testResultLimitClause() throws Exception {
-    final AtomicReference<TopEntriesCollector> result = new AtomicReference<>();
 
     searchArgs = new LuceneFunctionContext<IndexResultCollector>(queryProvider, "indexName", null, 3);
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    when(mockRepoManager.getRepositories(eq(mockContext))).thenReturn(repos);
+
+    doAnswer(invocation -> {
+      IndexResultCollector collector = invocation.getArgumentAt(2, IndexResultCollector.class);
+      collector.collect(r1_1.getKey(), r1_1.getScore());
+      collector.collect(r1_2.getKey(), r1_2.getScore());
+      collector.collect(r1_3.getKey(), r1_3.getScore());
+      return null;
+    }).when(mockRepository1).query(eq(query), eq(3), any(IndexResultCollector.class));
+
+    doAnswer(invocation -> {
+      IndexResultCollector collector = invocation.getArgumentAt(2, IndexResultCollector.class);
+      collector.collect(r2_1.getKey(), r2_1.getScore());
+      collector.collect(r2_2.getKey(), r2_2.getScore());
+      return null;
+    }).when(mockRepository2).query(eq(query), eq(3), any(IndexResultCollector.class));
 
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-
-        oneOf(mockRepoManager).getRepositories(mockContext);
-        will(returnValue(repos));
-
-        oneOf(mockRepository1).query(with(query), with(equal(3)), with(any(IndexResultCollector.class)));
-        will(new CustomAction("streamSearchResults") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            IndexResultCollector collector = (IndexResultCollector) invocation.getParameter(2);
-            collector.collect(r1_1.getKey(), r1_1.getScore());
-            collector.collect(r1_2.getKey(), r1_2.getScore());
-            collector.collect(r1_3.getKey(), r1_3.getScore());
-            return null;
-          }
-        });
-
-        oneOf(mockRepository2).query(with(query), with(equal(3)), with(any(IndexResultCollector.class)));
-        will(new CustomAction("streamSearchResults") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            IndexResultCollector collector = (IndexResultCollector) invocation.getParameter(2);
-            collector.collect(r2_1.getKey(), r2_1.getScore());
-            collector.collect(r2_2.getKey(), r2_2.getScore());
-            return null;
-          }
-        });
-
-        oneOf(mockResultSender).lastResult(with(any(TopEntriesCollector.class)));
-        will(new CustomAction("collectResult") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            result.set((TopEntriesCollector) invocation.getParameter(0));
-            return null;
-          }
-        });
-      }
-    });
 
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
-    List<EntryScore> hits = result.get().getEntries().getHits();
+
+    ArgumentCaptor<TopEntriesCollector> resultCaptor  = ArgumentCaptor.forClass(TopEntriesCollector.class);
+    verify(mockResultSender).lastResult(resultCaptor.capture());
+    TopEntriesCollector result = resultCaptor.getValue();
+
+    List<EntryScore> hits = result.getEntries().getHits();
     assertEquals(3, hits.size());
-    TopEntriesJUnitTest.verifyResultOrder(result.get().getEntries().getHits(), r1_1, r2_1, r1_2);
+    TopEntriesJUnitTest.verifyResultOrder(result.getEntries().getHits(), r1_1, r2_1, r1_2);
   }
 
   @Test
   public void injectCustomCollectorManager() throws Exception {
-    final CollectorManager mockManager = mocker.mock(CollectorManager.class);
+    final CollectorManager mockManager = mock(CollectorManager.class);
     searchArgs = new LuceneFunctionContext<IndexResultCollector>(queryProvider, "indexName", mockManager);
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-
-        oneOf(mockRepoManager).getRepositories(mockContext);
-        repos.remove(0);
-        will(returnValue(repos));
-
-        oneOf(mockManager).newCollector("repo2");
-        will(returnValue(mockCollector));
-        oneOf(mockManager).reduce(with(any(Collection.class)));
-        will(new CustomAction("reduce") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            Collection<IndexResultCollector> collectors = (Collection<IndexResultCollector>) invocation.getParameter(0);
-            assertEquals(1, collectors.size());
-            assertEquals(mockCollector, collectors.iterator().next());
-            return new TopEntriesCollector(null);
-          }
-        });
-
-        oneOf(mockCollector).collect("key-2-1", .45f);
-
-        oneOf(mockRepository2).query(with(query), with(equal(LuceneQueryFactory.DEFAULT_LIMIT)), with(any(IndexResultCollector.class)));
-        will(new CustomAction("streamSearchResults") {
-          @Override
-          public Object invoke(Invocation invocation) throws Throwable {
-            IndexResultCollector collector = (IndexResultCollector) invocation.getParameter(2);
-            collector.collect(r2_1.getKey(), r2_1.getScore());
-            return null;
-          }
-        });
-
-        oneOf(mockResultSender).lastResult(with(any(TopEntriesCollector.class)));
-      }
-    });
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    repos.remove(0);
+    when(mockRepoManager.getRepositories(eq(mockContext))).thenReturn(repos);
+    when(mockManager.newCollector(eq("repo2"))).thenReturn(mockCollector);
+    when(mockManager.reduce(any(Collection.class))).thenAnswer(invocation -> {
+      Collection<IndexResultCollector> collectors = invocation.getArgumentAt(0, Collection.class);
+      assertEquals(1, collectors.size());
+      assertEquals(mockCollector, collectors.iterator().next());
+      return new TopEntriesCollector(null);
+
+    } );
+
+    doAnswer(invocation -> {
+      IndexResultCollector collector = invocation.getArgumentAt(2, IndexResultCollector.class);
+      collector.collect(r2_1.getKey(), r2_1.getScore());
+      return null;
+    }).when(mockRepository2).query(eq(query), eq(LuceneQueryFactory.DEFAULT_LIMIT), any(IndexResultCollector.class));
+
 
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
+
+    verify(mockCollector).collect(eq("key-2-1"), eq(.45f));
+    verify(mockResultSender).lastResult(any(TopEntriesCollector.class));
   }
 
-  @Test
+  @Test(expected = FunctionException.class)
   public void testIndexRepoQueryFails() throws Exception {
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-
-        oneOf(mockRepoManager).getRepositories(mockContext);
-        will(returnValue(repos));
-
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-        oneOf(mockResultSender).sendException(with(any(IOException.class)));
-
-        oneOf(mockRepository1).query(with(query), with(equal(LuceneQueryFactory.DEFAULT_LIMIT)), with(any(IndexResultCollector.class)));
-        will(throwException(new IOException()));
-      }
-    });
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    when(mockRepoManager.getRepositories(eq(mockContext))).thenReturn(repos);
+    doThrow(IOException.class).when(mockRepository1).query(eq(query), eq(LuceneQueryFactory.DEFAULT_LIMIT), any(IndexResultCollector.class));
 
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
   }
 
-  @Test
+  @Test(expected = FunctionException.class)
   public void testBucketNotFound() throws Exception {
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-
-        oneOf(mockRepoManager).getRepositories(mockContext);
-        will(throwException(new BucketNotFoundException("")));
-
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-        oneOf(mockResultSender).sendException(with(any(BucketNotFoundException.class)));
-      }
-    });
-
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    when(mockRepoManager.getRepositories(eq(mockContext))).thenThrow(new BucketNotFoundException(""));
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
+
+    verify(mockResultSender).sendException(any(BucketNotFoundException.class));
   }
 
-  @Test
+  @Test(expected = FunctionException.class)
   public void testReduceError() throws Exception {
-    final CollectorManager mockManager = mocker.mock(CollectorManager.class);
+    final CollectorManager mockManager = mock(CollectorManager.class);
     searchArgs = new LuceneFunctionContext<IndexResultCollector>(queryProvider, "indexName", mockManager);
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-
-        oneOf(mockManager).newCollector("repo1");
-        will(returnValue(mockCollector));
-        oneOf(mockManager).reduce(with(any(Collection.class)));
-        will(throwException(new IOException()));
-
-        oneOf(mockRepoManager).getRepositories(mockContext);
-        repos.remove(1);
-        will(returnValue(repos));
-
-        oneOf(mockRepository1).query(query, LuceneQueryFactory.DEFAULT_LIMIT, mockCollector);
-        oneOf(mockResultSender).sendException(with(any(IOException.class)));
-      }
-    });
+
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    repos.remove(1);
+    when(mockRepoManager.getRepositories(eq(mockContext))).thenReturn(repos);
+    when(mockManager.newCollector(eq("repo1"))).thenReturn(mockCollector);
+    when(mockManager.reduce(any(Collection.class))).thenThrow(IOException.class);
 
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
   }
 
-  @Test
+  @Test(expected = FunctionException.class)
   public void queryProviderErrorIsHandled() throws Exception {
-    queryProvider = mocker.mock(LuceneQueryProvider.class);
+    queryProvider = mock(LuceneQueryProvider.class);
     searchArgs = new LuceneFunctionContext<IndexResultCollector>(queryProvider, "indexName");
-    mocker.checking(new Expectations() {
-      {
-        oneOf(mockContext).getDataSet();
-        will(returnValue(mockRegion));
-        oneOf(mockContext).getResultSender();
-        will(returnValue(mockResultSender));
-        oneOf(mockContext).getArguments();
-        will(returnValue(searchArgs));
-
-        oneOf(queryProvider).getQuery(mockIndex);
-        will(throwException(new QueryException()));
-
-        oneOf(mockResultSender).sendException(with(any(QueryException.class)));
-      }
-    });
-
+    when(mockContext.getDataSet()).thenReturn(mockRegion);
+    when(mockContext.getArguments()).thenReturn(searchArgs);
+    when(mockContext.<TopEntriesCollector>getResultSender()).thenReturn(mockResultSender);
+    when(queryProvider.getQuery(eq(mockIndex))).thenThrow(QueryException.class);
     LuceneFunction function = new LuceneFunction();
 
     function.execute(mockContext);
@@ -369,55 +250,34 @@ public class LuceneFunctionJUnitTest {
 
   @Before
   public void createMocksAndCommonObjects() throws Exception {
-    mocker = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-        setThreadingPolicy(new Synchroniser());
-      }
-    };
-
-    mockContext = mocker.mock(InternalRegionFunctionContext.class);
-    mockResultSender = mocker.mock(ResultSender.class);
-    mockRegion = mocker.mock(Region.class);
-
-    mockRepoManager = mocker.mock(RepositoryManager.class);
-    mockRepository1 = mocker.mock(IndexRepository.class, "repo1");
-    mockRepository2 = mocker.mock(IndexRepository.class, "repo2");
-    mockCollector = mocker.mock(IndexResultCollector.class);
+    mockContext = mock(InternalRegionFunctionContext.class);
+    mockResultSender = mock(ResultSender.class);
+    mockRegion = mock(Region.class);
+
+    mockRepoManager = mock(RepositoryManager.class);
+    mockRepository1 = mock(IndexRepository.class, "repo1");
+    mockRepository2 = mock(IndexRepository.class, "repo2");
+    mockCollector = mock(IndexResultCollector.class);
 
     repos = new ArrayList<IndexRepository>();
     repos.add(mockRepository1);
     repos.add(mockRepository2);
     
-    mockIndex = mocker.mock(InternalLuceneIndex.class);
-    mockService = mocker.mock(InternalLuceneService.class);
-    mockCache = mocker.mock(InternalCache.class);
+    mockIndex = mock(InternalLuceneIndex.class);
+    mockService = mock(InternalLuceneService.class);
+    mockCache = mock(InternalCache.class);
 
     queryProvider = new StringQueryProvider("gemfire:lucene");
     
     searchArgs = new LuceneFunctionContext<IndexResultCollector>(queryProvider, "indexName");
-    
-    mocker.checking(new Expectations() {{
-      allowing(mockRegion).getCache();
-      will(returnValue(mockCache));
-      allowing(mockRegion).getFullPath();
-      will(returnValue(regionPath));
-      allowing(mockCache).getService(InternalLuceneService.class);
-      will(returnValue(mockService));
-      allowing(mockService).getIndex(with("indexName"), with(regionPath));
-      will(returnValue(mockIndex));
-      allowing(mockIndex).getRepositoryManager();
-      will(returnValue(mockRepoManager));
-      allowing(mockIndex).getFieldNames();
-      will(returnValue(new String[] {"gemfire"}));
-    }});
-    
-    query = queryProvider.getQuery(mockIndex);
-  }
 
-  @After
-  public void validateMock() {
-    mocker.assertIsSatisfied();
-    mocker = null;
+    when(mockRegion.getCache()).thenReturn(mockCache);
+    when(mockRegion.getFullPath()).thenReturn(regionPath);
+    when(mockCache.getService(any())).thenReturn(mockService);
+    when(mockService.getIndex(eq("indexName"), eq(regionPath))).thenReturn(mockIndex);
+    when(mockIndex.getRepositoryManager()).thenReturn(mockRepoManager);
+    when(mockIndex.getFieldNames()).thenReturn(new String[] {"gemfire"});
+
+    query = queryProvider.getQuery(mockIndex);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3ef7913/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollectorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollectorJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollectorJUnitTest.java
index 4f93587..b7709bc 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollectorJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/TopEntriesFunctionCollectorJUnitTest.java
@@ -287,10 +287,10 @@ public class TopEntriesFunctionCollectorJUnitTest {
     TopEntriesJUnitTest.verifyResultOrder(merged.getHits(), r2_1, r2_2);
   }
 
-  @Test(expected = FunctionException.class)
+  @Test(expected = RuntimeException.class)
   public void testExceptionDuringMerge() throws Exception {
     TopEntriesCollectorManager mockManager = mock(TopEntriesCollectorManager.class);
-    Mockito.doThrow(new IOException()).when(mockManager).reduce(any(Collection.class));
+    Mockito.doThrow(new RuntimeException()).when(mockManager).reduce(any(Collection.class));
 
     LuceneFunctionContext<TopEntriesCollector> context = new LuceneFunctionContext<>(null, null, mockManager);
     TopEntriesFunctionCollector collector = new TopEntriesFunctionCollector(context);


[50/63] [abbrv] incubator-geode git commit: GEODE-17 - fix the nightly build error by clearing out security manager if not configured.

Posted by kl...@apache.org.
GEODE-17 - fix the nightly build error by clearing out security manager if not configured.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9681329d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9681329d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9681329d

Branch: refs/heads/feature/GEODE-1276
Commit: 9681329d8dbcec8d70011db13381c1dcf23ee9a5
Parents: 15b1e70
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Mon May 2 08:56:20 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Mon May 2 08:56:20 2016 -0700

----------------------------------------------------------------------
 .../gemfire/management/internal/SystemManagementService.java      | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9681329d/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
index dac016e..fd2a834 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
@@ -172,6 +172,9 @@ public final class SystemManagementService extends BaseManagementService {
       SecurityManager securityManager = new DefaultSecurityManager(realm);
       SecurityUtils.setSecurityManager(securityManager);
     }
+    else{
+      SecurityUtils.setSecurityManager(null);
+    }
 
     this.notificationHub = new NotificationHub(repo);
     if (system.getConfig().getJmxManager()) {


[18/63] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17-2

Posted by kl...@apache.org.
Merge branch 'develop' into feature/GEODE-17-2

# Conflicts:
#	geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c235ef84
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c235ef84
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c235ef84

Branch: refs/heads/feature/GEODE-1276
Commit: c235ef84adae4346933c14e58e42369d13a04abd
Parents: 7e8294d
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Wed Apr 27 14:16:06 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Wed Apr 27 14:16:06 2016 -0700

----------------------------------------------------------------------
 .../gemfire/codeAnalysis/sanctionedSerializables.txt    | 12 ------------
 1 file changed, 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c235ef84/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
index f7ce62b..03746c1 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
@@ -128,18 +128,6 @@ com/gemstone/gemfire/cache/execute/EmtpyRegionFunctionException,true,1
 com/gemstone/gemfire/cache/execute/FunctionAdapter,false
 com/gemstone/gemfire/cache/execute/FunctionException,true,4893171227542647452
 com/gemstone/gemfire/cache/execute/FunctionInvocationTargetException,true,1,id:com/gemstone/gemfire/distributed/DistributedMember
-com/gemstone/gemfire/cache/hdfs/HDFSIOException,false
-com/gemstone/gemfire/cache/hdfs/StoreExistsException,true,1
-com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder,false,autoMajorCompact:boolean,batchIntervalMillis:int,batchSize:int,blockCacheSize:float,clientConfigFile:java/lang/String,diskStoreName:java/lang/String,diskSynchronous:boolean,dispatcherThreads:int,fileRolloverInterval:int,homeDir:java/lang/String,isAutoCompact:boolean,isPersistenceEnabled:boolean,logPrefix:java/lang/String,majorCompactionConcurrency:int,majorCompactionIntervalMins:int,maxConcurrency:int,maxFileSize:int,maxInputFileCount:int,maxInputFileSizeMB:int,maximumQueueMemory:int,minInputFileCount:int,name:java/lang/String,namenodeURL:java/lang/String,oldFileCleanupIntervalMins:int
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager$CompactionIsDisabled,true,1
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController$1,true,1,this$1:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController,val$this$0:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController$2,true,1,this$1:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController,val$this$0:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog$HoplogVersion,false
-com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog$Meta,false
-com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile$CompressionType,false
 com/gemstone/gemfire/cache/operations/OperationContext$OperationCode,false
 com/gemstone/gemfire/cache/operations/OperationContext$Resource,false
 com/gemstone/gemfire/cache/partition/PartitionNotAvailableException,true,1


[14/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java
deleted file mode 100644
index 1e6a034..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-
-import com.gemstone.gemfire.cache.CacheException;
-import com.gemstone.gemfire.cache.EntryNotFoundException;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.SystemTimer;
-import com.gemstone.gemfire.internal.SystemTimer.SystemTimerTask;
-import com.gemstone.gemfire.internal.cache.ColocationHelper;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
-import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
-import com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue;
-
-/**
- * Parallel Gateway Sender Queue extended for HDFS functionality 
- *
- */
-public class HDFSParallelGatewaySenderQueue extends ParallelGatewaySenderQueue {
-
-  private int currentBucketIndex = 0;
-  private int elementsPeekedAcrossBuckets = 0;
-  private SystemTimer rollListTimer = null;
-  public static final String ROLL_SORTED_LIST_TIME_INTERVAL_MS__PROP = "gemfire.ROLL_SORTED_LIST_TIME_INTERVAL_MS";
-  private final int ROLL_SORTED_LIST_TIME_INTERVAL_MS = Integer.getInteger(ROLL_SORTED_LIST_TIME_INTERVAL_MS__PROP, 3000);
-  
-  public HDFSParallelGatewaySenderQueue(AbstractGatewaySender sender,
-      Set<Region> userPRs, int idx, int nDispatcher) {
-     
-    super(sender, userPRs, idx, nDispatcher);
-    //only first dispatcher Hemant?
-    if (sender.getBucketSorted() && this.index == 0) {
-      rollListTimer = new SystemTimer(sender.getCache().getDistributedSystem(),
-          true);
-      // schedule the task to roll the skip lists
-      rollListTimer.scheduleAtFixedRate(new RollSortedListsTimerTask(), 
-          ROLL_SORTED_LIST_TIME_INTERVAL_MS, ROLL_SORTED_LIST_TIME_INTERVAL_MS);
-    }
-  }
-  
-  @Override
-  public Object peek() throws InterruptedException, CacheException {
-    /* If you call peek and use super.peek it leads to the following exception.
-     * So I'm adding an explicit UnsupportedOperationException.
-     Caused by: java.lang.ClassCastException: com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue cannot be cast to com.gemstone.gemfire.internal.cache.BucketRegionQueue
-        at com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue.getRandomPrimaryBucket(ParallelGatewaySenderQueue.java:964)
-        at com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue.peek(ParallelGatewaySenderQueue.java:1078)
-     */
-    throw new UnsupportedOperationException();
-  }
-  
-  
-  @Override
-  public void cleanUp() {
-    super.cleanUp();
-    cancelRollListTimer();
-  }
-  
-  private void cancelRollListTimer() {
-    if (rollListTimer != null) {
-      rollListTimer.cancel();
-      rollListTimer = null;
-    }
-  }
-  /**
-   * A call to this function peeks elements from the first local primary bucket. 
-   * Next call to this function peeks elements from the next local primary 
-   * bucket and so on.  
-   */
-  @Override
-  public List peek(int batchSize, int timeToWait) throws InterruptedException,
-  CacheException {
-    
-    List batch = new ArrayList();
-    
-    int batchSizeInBytes = batchSize*1024*1024;
-    PartitionedRegion prQ = getRandomShadowPR();
-    if (prQ == null || prQ.getLocalMaxMemory() == 0) {
-      try {
-        Thread.sleep(50);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-      blockProcesorThreadIfRequired();
-      return batch;
-    }
-    
-    ArrayList list = null;
-    ArrayList<Integer> pbuckets = new ArrayList<Integer>(prQ
-        .getDataStore().getAllLocalPrimaryBucketIds());
-    ArrayList<Integer> buckets = new ArrayList<Integer>();
-    for(Integer i : pbuckets) {
-    	if(i % this.nDispatcher == this.index)
-    		buckets.add(i);
-    }
-    // In case of failures, peekedEvents would possibly have some elements 
-    // add them. 
-    if (this.resetLastPeeked) {
-      int previousBucketId = -1;
-      boolean stillPrimary = true; 
-      Iterator<GatewaySenderEventImpl>  iter = peekedEvents.iterator();
-      // we need to remove the events of the bucket that are no more primary on 
-      // this node as they cannot be persisted from this node. 
-      while(iter.hasNext()) {
-        HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)iter.next();
-        if (previousBucketId != hdfsEvent.getBucketId()){
-          stillPrimary = buckets.contains(hdfsEvent.getBucketId());
-          previousBucketId = hdfsEvent.getBucketId();
-        }
-        if (stillPrimary)
-          batch.add(hdfsEvent);
-        else {
-          iter.remove();
-        }
-      }
-      this.resetLastPeeked = false;
-    }
-    
-    if (buckets.size() == 0) {
-      // Sleep a bit before trying again. provided by Dan
-      try {
-        Thread.sleep(50);
-      }
-      catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-      return batch;
-    }
-    
-    if (this.sender.getBucketSorted()) {
-      
-    }
-    
-    // Each call to this function returns index of next bucket 
-    // that is to be processed. This function takes care 
-    // of the bucket sequence that is peeked by a sequence of 
-    // peek calls. 
-    // If there are bucket movements between two consecutive 
-    // calls to this function then there is chance that a bucket 
-    // is processed twice while another one is skipped. But, that is 
-    // ok because in the next round, it will be processed. 
-    Integer bIdIndex = getCurrentBucketIndex(buckets.size());
-    
-    // If we have gone through all the buckets once and no  
-    // elements were peeked from any of the buckets, take a nap.  
-    // This always sleep in the first call but that should be ok  
-    // because the timeToWait in practical use cases would be greater 
-    // than this sleep of 100 ms.  
-    if (bIdIndex == 0 && getAndresetElementsPeekedAcrossBuckets() == 0) { 
-      try { 
-        Thread.sleep(100); 
-      } catch (InterruptedException e) { 
-        Thread.currentThread().interrupt(); 
-      } 
-    } 
-    
-    HDFSBucketRegionQueue hrq = ((HDFSBucketRegionQueue)prQ
-        .getDataStore().getLocalBucketById(buckets.get(bIdIndex)));
-    
-    if (hrq == null) {
-      // bucket moved to another node after getAllLocalPrimaryBucketIds
-      // was called. Peeking not possible. return. 
-      return batch;
-    }
-    long entriesWaitingTobePeeked = hrq.totalEntries();
-    
-    if (entriesWaitingTobePeeked == 0) {
-      blockProcesorThreadIfRequired();
-      return batch;
-    }
-    
-    long currentTimeInMillis = System.currentTimeMillis();
-    long bucketSizeInBytes = hrq.getQueueSizeInBytes();
-    if (((currentTimeInMillis - hrq.getLastPeekTimeInMillis()) >  timeToWait)  
-        || ( bucketSizeInBytes > batchSizeInBytes)
-        || hrq.shouldDrainImmediately()) {
-      // peek now
-      if (logger.isDebugEnabled()) { 
-        logger.debug("Peeking queue " + hrq.getId()   + ": bucketSizeInBytes " + bucketSizeInBytes
-            + ":  batchSizeInBytes" + batchSizeInBytes
-            + ":  timeToWait" + timeToWait
-            + ":  (currentTimeInMillis - hrq.getLastPeekTimeInMillis())" + (currentTimeInMillis - hrq.getLastPeekTimeInMillis()));
-      }
-
-      list = peekAhead(buckets.get(bIdIndex), hrq);
-      
-      if (list != null && list.size() != 0 ) {
-        for (Object object : list) {
-          batch.add(object);
-          peekedEvents.add((HDFSGatewayEventImpl)object);
-        }
-      }
-    }
-    else {
-      blockProcesorThreadIfRequired();
-    }
-    if (logger.isDebugEnabled()  &&  batch.size() > 0) {
-      logger.debug(this + ":  Peeked a batch of " + batch.size() + " entries");
-    }
-    
-    setElementsPeekedAcrossBuckets(batch.size()); 
-    
-    return batch;
-  }
-  
-  /**
-   * This function maintains an index of the last processed bucket.
-   * When it is called, it returns index of the next bucket. 
-   * @param totalBuckets
-   * @return current bucket index
-   */
-  private int getCurrentBucketIndex(int totalBuckets) {
-    int retBucket = currentBucketIndex;
-    if (retBucket >=  totalBuckets) {
-      currentBucketIndex = 0;
-      retBucket = 0;
-    }
-    
-    currentBucketIndex++;
-    
-    return retBucket;
-  }
-  
-  @Override
-  public void remove(int batchSize) throws CacheException {
-    int destroyed = 0;
-    HDFSGatewayEventImpl event = null;
-    
-    if (this.peekedEvents.size() > 0)
-      event = (HDFSGatewayEventImpl)this.peekedEvents.remove();
-    
-    while (event != null && destroyed < batchSize) {
-      Region currentRegion = event.getRegion();
-      int currentBucketId = event.getBucketId();
-      int bucketId = event.getBucketId();
-        
-      ArrayList<HDFSGatewayEventImpl> listToDestroy = new ArrayList<HDFSGatewayEventImpl>();
-      ArrayList<Object> destroyedSeqNum = new ArrayList<Object>();
-      
-      // create a batch of all the entries of a bucket 
-      while (bucketId == currentBucketId) {
-        listToDestroy.add(event);
-        destroyedSeqNum.add(event.getShadowKey());
-        destroyed++;
-
-        if (this.peekedEvents.size() == 0 || (destroyed) >= batchSize) {
-          event = null; 
-          break;
-        }
-
-        event = (HDFSGatewayEventImpl)this.peekedEvents.remove();
-
-        bucketId = event.getBucketId();
-
-        if (!this.sender.isRunning()){
-          if (logger.isDebugEnabled()) {
-            logger.debug("ParallelGatewaySenderQueue#remove: Cache is closing down. Ignoring remove request.");
-          }
-          return;
-        }
-      }
-      try {
-        HDFSBucketRegionQueue brq = getBucketRegionQueue((PartitionedRegion) currentRegion, currentBucketId);
-        
-        if (brq != null) {
-          // destroy the entries from the bucket 
-          brq.destroyKeys(listToDestroy);
-          // Adding the removed event to the map for BatchRemovalMessage
-          // We need to provide the prQ as there could be multiple
-          // queue in a PGS now.
-          PartitionedRegion prQ = brq.getPartitionedRegion();
-          addRemovedEvents(prQ, currentBucketId, destroyedSeqNum);
-        }
-        
-      } catch (ForceReattemptException e) {
-        if (logger.isDebugEnabled()) {
-          logger.debug("ParallelGatewaySenderQueue#remove: " + "Got ForceReattemptException for " + this
-          + " for bucket = " + bucketId);
-        }
-      }
-      catch(EntryNotFoundException e) {
-        if (logger.isDebugEnabled()) {
-          logger.debug("ParallelGatewaySenderQueue#remove: " + "Got EntryNotFoundException for " + this
-            + " for bucket = " + bucketId );
-        }
-      }
-    }
-  }
-  
-  /** 
-  * Keeps a track of number of elements peeked across all buckets.  
-  */ 
-  private void setElementsPeekedAcrossBuckets(int peekedElements) { 
-    this.elementsPeekedAcrossBuckets +=peekedElements; 
-  } 
-  
-  /** 
-  * Returns the number of elements peeked across all buckets. Also, 
-  * resets this counter. 
-  */ 
-  private int getAndresetElementsPeekedAcrossBuckets() { 
-    int peekedElements = this.elementsPeekedAcrossBuckets; 
-    this.elementsPeekedAcrossBuckets = 0; 
-    return peekedElements; 
-  } 
-
-  @Override
-  public void remove() throws CacheException {
-    throw new UnsupportedOperationException("Method HDFSParallelGatewaySenderQueue#remove is not supported");
-  }
- 
-  @Override
-  public void put(Object object) throws InterruptedException, CacheException {
-    super.put(object);
-  }
-  
-  protected ArrayList peekAhead(int bucketId, HDFSBucketRegionQueue hrq) throws CacheException {
-    
-    if (logger.isDebugEnabled()) {
-      logger.debug(this + ": Peekahead for the bucket " + bucketId);
-    }
-    ArrayList  list = hrq.peekABatch();
-    if (logger.isDebugEnabled() && list != null ) {
-      logger.debug(this + ": Peeked" + list.size() + "objects from bucket " + bucketId);
-    }
-
-    return list;
-  }
-  
-  @Override
-  public Object take() {
-    throw new UnsupportedOperationException("take() is not supported for " + HDFSParallelGatewaySenderQueue.class.toString());
-  }
-  
-  protected boolean isUsedForHDFS()
-  {
-    return true;
-  }
-  
-  @Override
-  protected void afterRegionAdd (PartitionedRegion userPR) {
-  }
-  
-  /**
-   * gets the value for region key from the HDFSBucketRegionQueue 
- * @param region 
-   * @throws ForceReattemptException 
-   */
-  public HDFSGatewayEventImpl get(PartitionedRegion region, byte[] regionKey, int bucketId) throws ForceReattemptException  {
-    try {
-      HDFSBucketRegionQueue brq = getBucketRegionQueue(region, bucketId);
-      
-      if (brq ==null)
-        return null;
-      
-      return brq.getObjectForRegionKey(region, regionKey);
-    } catch(EntryNotFoundException e) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("HDFSParallelGatewaySenderQueue#get: " + "Got EntryNotFoundException for " + this
-            + " for bucket = " + bucketId);
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public void clear(PartitionedRegion pr, int bucketId) {
-    HDFSBucketRegionQueue brq;
-    try {
-      brq = getBucketRegionQueue(pr, bucketId);
-      if (brq == null)
-        return;
-      brq.clear();
-    } catch (ForceReattemptException e) {
-      //do nothing, bucket was destroyed.
-    }
-  }
-  
-  @Override
-  public int size(PartitionedRegion pr, int bucketId) throws ForceReattemptException {
-   HDFSBucketRegionQueue hq = getBucketRegionQueue(pr, bucketId);
-   return hq.size();
-  }
-
-  public HDFSBucketRegionQueue getBucketRegionQueue(PartitionedRegion region,
-      int bucketId) throws ForceReattemptException {
-    PartitionedRegion leader = ColocationHelper.getLeaderRegion(region);
-    if (leader == null)
-      return null;
-    String leaderregionPath = leader.getFullPath();
-    PartitionedRegion prQ = this.userRegionNameToshadowPRMap.get(leaderregionPath);
-    if (prQ == null)
-      return null;
-    HDFSBucketRegionQueue brq;
-
-    brq = ((HDFSBucketRegionQueue)prQ.getDataStore()
-        .getLocalBucketById(bucketId));
-    if(brq == null) {
-      prQ.getRegionAdvisor().waitForLocalBucketStorage(bucketId);
-    }
-    brq = ((HDFSBucketRegionQueue)prQ.getDataStore()
-        .getInitializedBucketForId(null, bucketId));
-    return brq;
-  }
-  
-  /**
-   * This class has the responsibility of rolling the lists of Sorted event 
-   * Queue. The rolling of lists by a separate thread is required because 
-   * neither put thread nor the peek/remove thread can do that. Put thread
-   * cannot do it because that would mean doing some synchronization with 
-   * other put threads and peek thread that would hamper the put latency. 
-   * Peek thread cannot do it because if the event insert rate is too high
-   * the list size can go way beyond what its size. 
-   *
-   */
-  class RollSortedListsTimerTask extends SystemTimerTask {
-    
-    
-    /**
-     * This function ensures that if any of the buckets has lists that are beyond 
-     * its size, they gets rolled over into new skip lists. 
-     */
-    @Override
-    public void run2() {
-      Set<PartitionedRegion> prQs = getRegions();
-      for (PartitionedRegion prQ : prQs) {
-        ArrayList<Integer> buckets = new ArrayList<Integer>(prQ
-            .getDataStore().getAllLocalPrimaryBucketIds());
-        for (Integer bId : buckets) {
-          HDFSBucketRegionQueue hrq =  ((HDFSBucketRegionQueue)prQ
-              .getDataStore().getLocalBucketById(bId));
-          if (hrq == null) {
-            // bucket moved to another node after getAllLocalPrimaryBucketIds
-            // was called. continue fixing the next bucket. 
-            continue;
-          }
-          if (logger.isDebugEnabled()) {
-            logger.debug("Rolling over the list for bucket id: " + bId);
-          }
-          hrq.rolloverSkipList();
-         }
-      }
-    }
-  }
-   
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
deleted file mode 100644
index 16d3d87..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.Serializable;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.GemFireConfigException;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.StoreExistsException;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-
-
-/**
- * Class to hold all hdfs store related configuration. Instead of copying the
- * same members in two different classes, factory and store, this class will be
- * used. The idea is let HdfsStoreImpl and HdfsStoreCreation delegate get calls,
- * set calls and copy constructor calls this class. Moreover this config holder
- * can be entirely replaced to support alter config
- * 
- */
-public class HDFSStoreConfigHolder implements HDFSStore, HDFSStoreFactory ,Serializable {  
-  private String name = null;
-  private String namenodeURL = null;
-  private String homeDir = DEFAULT_HOME_DIR;
-  private String clientConfigFile = null;
-  private float blockCacheSize = DEFAULT_BLOCK_CACHE_SIZE;
-  private int maxFileSize = DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT;
-  private int fileRolloverInterval = DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL;
-  protected boolean isAutoCompact = DEFAULT_MINOR_COMPACTION;
-  protected boolean autoMajorCompact = DEFAULT_MAJOR_COMPACTION;
-  protected int maxConcurrency = DEFAULT_MINOR_COMPACTION_THREADS;
-  protected int majorCompactionConcurrency = DEFAULT_MAJOR_COMPACTION_THREADS;
-  protected int majorCompactionIntervalMins = DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS;
-  protected int maxInputFileSizeMB = DEFAULT_INPUT_FILE_SIZE_MAX_MB;
-  protected int maxInputFileCount = DEFAULT_INPUT_FILE_COUNT_MAX;
-  protected int minInputFileCount = DEFAULT_INPUT_FILE_COUNT_MIN;
-  protected int oldFileCleanupIntervalMins = DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS;
-  
-  protected int batchSize = DEFAULT_BATCH_SIZE_MB;
-  protected int batchIntervalMillis = DEFAULT_BATCH_INTERVAL_MILLIS;
-  protected int maximumQueueMemory = DEFAULT_MAX_BUFFER_MEMORY;
-  protected boolean isPersistenceEnabled = DEFAULT_BUFFER_PERSISTANCE;
-  protected String diskStoreName = null;
-  protected boolean diskSynchronous = DEFAULT_DISK_SYNCHRONOUS; 
-  protected int dispatcherThreads = DEFAULT_DISPATCHER_THREADS;
-  
-  private static final Logger logger = LogService.getLogger();
-  protected final String logPrefix;
-
-  public HDFSStoreConfigHolder() {
-    this(null);
-  }
-
-  /**
-   * @param config configuration source for creating this instance 
-   */
-  public HDFSStoreConfigHolder(HDFSStore config) {
-    this.logPrefix = "<" + getName() + "> ";
-    if (config == null) {
-      return;
-    }
-    
-    this.name = config.getName();
-    this.namenodeURL = config.getNameNodeURL();
-    this.homeDir = config.getHomeDir();
-    this.clientConfigFile = config.getHDFSClientConfigFile();
-    this.blockCacheSize = config.getBlockCacheSize();
-    this.maxFileSize = config.getWriteOnlyFileRolloverSize();
-    this.fileRolloverInterval = config.getWriteOnlyFileRolloverInterval();
-    isAutoCompact = config.getMinorCompaction();
-    maxConcurrency = config.getMinorCompactionThreads();
-    autoMajorCompact = config.getMajorCompaction();
-    majorCompactionConcurrency = config.getMajorCompactionThreads();
-    majorCompactionIntervalMins = config.getMajorCompactionInterval();
-    maxInputFileSizeMB = config.getInputFileSizeMax();
-    maxInputFileCount = config.getInputFileCountMax();
-    minInputFileCount = config.getInputFileCountMin();
-    oldFileCleanupIntervalMins = config.getPurgeInterval();
-    
-    batchSize = config.getBatchSize();
-    batchIntervalMillis = config.getBatchInterval();
-    maximumQueueMemory = config.getMaxMemory();
-    isPersistenceEnabled = config.getBufferPersistent();
-    diskStoreName = config.getDiskStoreName();
-    diskSynchronous = config.getSynchronousDiskWrite();
-    dispatcherThreads = config.getDispatcherThreads();
-  }
-  
-  public void resetDefaultValues() {
-    name = null;
-    namenodeURL = null;
-    homeDir = null;
-    clientConfigFile = null;
-    blockCacheSize = -1f;
-    maxFileSize = -1;
-    fileRolloverInterval = -1;
-    
-    isAutoCompact = false;
-    maxConcurrency = -1;
-    maxInputFileSizeMB = -1;
-    maxInputFileCount = -1;
-    minInputFileCount = -1;
-    oldFileCleanupIntervalMins = -1;
-
-    autoMajorCompact = false;
-    majorCompactionConcurrency = -1;
-    majorCompactionIntervalMins = -1;
-    
-    batchSize = -1;
-    batchIntervalMillis = -1;
-    maximumQueueMemory = -1;
-    isPersistenceEnabled = false;
-    diskStoreName = null;
-    diskSynchronous = false; 
-    dispatcherThreads = -1;
-  }
-  
-  public void copyFrom(HDFSStoreMutator mutator) {
-    if (mutator.getWriteOnlyFileRolloverInterval() >= 0) {
-      logAttrMutation("fileRolloverInterval", mutator.getWriteOnlyFileRolloverInterval());
-      setWriteOnlyFileRolloverInterval(mutator.getWriteOnlyFileRolloverInterval());
-    }
-    if (mutator.getWriteOnlyFileRolloverSize() >= 0) {
-      logAttrMutation("MaxFileSize", mutator.getWriteOnlyFileRolloverInterval());
-      setWriteOnlyFileRolloverSize(mutator.getWriteOnlyFileRolloverSize());
-    }
-    
-    if (mutator.getMinorCompaction() != null) {
-      logAttrMutation("MinorCompaction", mutator.getMinorCompaction());
-      setMinorCompaction(mutator.getMinorCompaction());
-    }
-    
-    if (mutator.getMinorCompactionThreads() >= 0) {
-      logAttrMutation("MaxThreads", mutator.getMinorCompactionThreads());
-      setMinorCompactionThreads(mutator.getMinorCompactionThreads());
-    }
-    
-    if (mutator.getMajorCompactionInterval() > -1) {
-      logAttrMutation("MajorCompactionIntervalMins", mutator.getMajorCompactionInterval());
-      setMajorCompactionInterval(mutator.getMajorCompactionInterval());
-    }
-    if (mutator.getMajorCompactionThreads() >= 0) {
-      logAttrMutation("MajorCompactionMaxThreads", mutator.getMajorCompactionThreads());
-      setMajorCompactionThreads(mutator.getMajorCompactionThreads());
-    }
-    if (mutator.getMajorCompaction() != null) {
-      logAttrMutation("AutoMajorCompaction", mutator.getMajorCompaction());
-      setMajorCompaction(mutator.getMajorCompaction());
-    }
-    if (mutator.getInputFileCountMax() >= 0) {
-      logAttrMutation("maxInputFileCount", mutator.getInputFileCountMax());
-      setInputFileCountMax(mutator.getInputFileCountMax());
-    }
-    if (mutator.getInputFileSizeMax() >= 0) {
-      logAttrMutation("MaxInputFileSizeMB", mutator.getInputFileSizeMax());
-      setInputFileSizeMax(mutator.getInputFileSizeMax());
-    }
-    if (mutator.getInputFileCountMin() >= 0) {
-      logAttrMutation("MinInputFileCount", mutator.getInputFileCountMin());
-      setInputFileCountMin(mutator.getInputFileCountMin());
-    }    
-    if (mutator.getPurgeInterval() >= 0) {
-      logAttrMutation("OldFilesCleanupIntervalMins", mutator.getPurgeInterval());
-      setPurgeInterval(mutator.getPurgeInterval());
-    }
-    
-    if (mutator.getBatchSize() >= 0) {
-      logAttrMutation("batchSizeMB", mutator.getWriteOnlyFileRolloverInterval());
-      setBatchSize(mutator.getBatchSize());
-    }
-    if (mutator.getBatchInterval() >= 0) {
-      logAttrMutation("batchTimeInterval", mutator.getWriteOnlyFileRolloverInterval());
-      setBatchInterval(mutator.getBatchInterval());
-    }
-  }
-
-  void logAttrMutation(String name, Object value) {
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Alter " + name + ":" + value, logPrefix);
-    }
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-  @Override
-  public HDFSStoreFactory setName(String name) {
-    this.name = name;
-    return this;
-  }
-
-  @Override
-  public String getNameNodeURL() {
-    return namenodeURL;
-  }
-  @Override
-  public HDFSStoreFactory setNameNodeURL(String namenodeURL) {
-    this.namenodeURL = namenodeURL;
-    return this;
-  }
-
-  @Override
-  public String getHomeDir() {
-    return homeDir;
-  }
-  @Override
-  public HDFSStoreFactory setHomeDir(String homeDir) {
-    this.homeDir = homeDir;
-    return this;
-  }
-
-  @Override
-  public String getHDFSClientConfigFile() {
-    return clientConfigFile;
-  }
-  @Override
-  public HDFSStoreFactory setHDFSClientConfigFile(String clientConfigFile) {
-    this.clientConfigFile = clientConfigFile;
-    return this;
-  }
-  
-  @Override
-  public HDFSStoreFactory setBlockCacheSize(float percentage) {
-    if(percentage < 0 || percentage > 100) {
-      throw new IllegalArgumentException("Block cache size must be between 0 and 100, inclusive");
-    }
-    this.blockCacheSize  = percentage;
-    return this;
-  }
-  
-  @Override
-  public float getBlockCacheSize() {
-    return blockCacheSize;
-  }
-  
-  @Override
-  public HDFSStoreFactory setWriteOnlyFileRolloverSize(int maxFileSize) {
-    assertIsPositive(CacheXml.HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL, maxFileSize);
-    this.maxFileSize = maxFileSize;
-    return this;
-  }
-  @Override
-  public int getWriteOnlyFileRolloverSize() {
-    return maxFileSize;
-  }
-
-  @Override
-  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int count) {
-    assertIsPositive(CacheXml.HDFS_TIME_FOR_FILE_ROLLOVER, count);
-    this.fileRolloverInterval = count;
-    return this;
-  }
-  @Override
-  public int getWriteOnlyFileRolloverInterval() {
-    return fileRolloverInterval;
-  }
-  
-  @Override
-  public boolean getMinorCompaction() {
-    return isAutoCompact;
-  }
-  @Override
-  public HDFSStoreFactory setMinorCompaction(boolean auto) {
-    this.isAutoCompact = auto;
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setMinorCompactionThreads(int count) {
-    assertIsPositive(CacheXml.HDFS_MINOR_COMPACTION_THREADS, count);
-    this.maxConcurrency = count;
-    return this;
-  }
-  @Override
-  public int getMinorCompactionThreads() {
-    return maxConcurrency;
-  }
-
-  @Override
-  public HDFSStoreFactory setMajorCompaction(boolean auto) {
-    this.autoMajorCompact = auto;
-    return this;
-  }
-  @Override
-  public boolean getMajorCompaction() {
-    return autoMajorCompact;
-  }
-
-  @Override
-  public HDFSStoreFactory setMajorCompactionInterval(int count) {
-    HDFSStoreCreation.assertIsPositive(CacheXml.HDFS_MAJOR_COMPACTION_INTERVAL, count);
-    this.majorCompactionIntervalMins = count;
-    return this;
-  }
-  @Override
-  public int getMajorCompactionInterval() {
-    return majorCompactionIntervalMins;
-  }
-
-  @Override
-  public HDFSStoreFactory setMajorCompactionThreads(int count) {
-    HDFSStoreCreation.assertIsPositive(CacheXml.HDFS_MAJOR_COMPACTION_THREADS, count);
-    this.majorCompactionConcurrency = count;
-    return this;
-  }
-  @Override
-  public int getMajorCompactionThreads() {
-    return majorCompactionConcurrency;
-  }
-  
-  @Override
-  public HDFSStoreFactory setInputFileSizeMax(int size) {
-    HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MAX_INPUT_FILE_SIZE_MB", size);
-    this.maxInputFileSizeMB = size;
-    return this;
-  }
-  @Override
-  public int getInputFileSizeMax() {
-    return maxInputFileSizeMB;
-  }
-
-  @Override
-  public HDFSStoreFactory setInputFileCountMin(int count) {
-    HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MIN_INPUT_FILE_COUNT", count);
-    this.minInputFileCount = count;
-    return this;
-  }
-  @Override
-  public int getInputFileCountMin() {
-    return minInputFileCount;
-  }
-
-  @Override
-  public HDFSStoreFactory setInputFileCountMax(int count) {
-    HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MAX_INPUT_FILE_COUNT", count);
-    this.maxInputFileCount = count;
-    return this;
-  }
-  @Override
-  public int getInputFileCountMax() {
-    return maxInputFileCount;
-  }
-
-  @Override
-  public int getPurgeInterval() {
-    return oldFileCleanupIntervalMins ;
-  }    
-  @Override
-  public HDFSStoreFactory setPurgeInterval(int interval) {
-    assertIsPositive(CacheXml.HDFS_PURGE_INTERVAL, interval);
-    this.oldFileCleanupIntervalMins = interval;
-    return this;
-  }
-  
-  protected void validate() {
-    if (minInputFileCount > maxInputFileCount) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.HOPLOG_MIN_IS_MORE_THAN_MAX
-          .toLocalizedString(new Object[] {
-              "HDFS_COMPACTION_MIN_INPUT_FILE_COUNT",
-              minInputFileCount,
-              "HDFS_COMPACTION_MAX_INPUT_FILE_COUNT",
-              maxInputFileCount }));
-    }
-  }
-
-  /**
-   * This method should not be called on this class.
-   * @see HDFSStoreFactory#create(String)
-   */
-  @Override
-  public HDFSStore create(String name) throws GemFireConfigException,
-      StoreExistsException {
-    throw new UnsupportedOperationException();
-  }
-
-  /**
-   * This method should not be called on this class.
-   * @see HDFSStoreImpl#destroy()
-   */
-  @Override
-  public void destroy() {
-    throw new UnsupportedOperationException();
-  }
-  
-  public static void assertIsPositive(String name, int count) {
-    if (count < 1) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
-              .toLocalizedString(new Object[] { name, count }));
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("HDFSStoreConfigHolder@");
-    builder.append(System.identityHashCode(this));
-    builder.append(" [");
-    appendStrProp(builder, name, "name");
-    appendStrProp(builder, namenodeURL, "namenodeURL");
-    appendStrProp(builder, homeDir, "homeDir");
-    appendStrProp(builder, clientConfigFile, "clientConfigFile");
-    if (blockCacheSize > -1) {
-      builder.append("blockCacheSize=");
-      builder.append(blockCacheSize);
-      builder.append(", ");
-    }
-    appendIntProp(builder, maxFileSize, "maxFileSize");
-    appendIntProp(builder, fileRolloverInterval, "fileRolloverInterval");
-    appendBoolProp(builder, isAutoCompact, "isAutoCompact");
-    appendBoolProp(builder, autoMajorCompact, "autoMajorCompact");
-    appendIntProp(builder, maxConcurrency, "maxConcurrency");
-    appendIntProp(builder, majorCompactionConcurrency, "majorCompactionConcurrency");
-    appendIntProp(builder, majorCompactionIntervalMins, "majorCompactionIntervalMins");
-    appendIntProp(builder, maxInputFileSizeMB, "maxInputFileSizeMB");
-    appendIntProp(builder, maxInputFileCount, "maxInputFileCount");
-    appendIntProp(builder, minInputFileCount, "minInputFileCount");
-    appendIntProp(builder, oldFileCleanupIntervalMins, "oldFileCleanupIntervalMins");
-    appendIntProp(builder, batchSize, "batchSize");
-    appendIntProp(builder, batchIntervalMillis, "batchInterval");
-    appendIntProp(builder, maximumQueueMemory, "maximumQueueMemory");
-    appendIntProp(builder, dispatcherThreads, "dispatcherThreads");
-    appendBoolProp(builder, isPersistenceEnabled, "isPersistenceEnabled");
-    appendStrProp(builder, diskStoreName, "diskStoreName");
-    appendBoolProp(builder, diskSynchronous, "diskSynchronous");
-
-    builder.append("]");
-    return builder.toString();
-  }
-
-  private void appendStrProp(StringBuilder builder, String value, String name) {
-    if (value != null) {
-      builder.append(name + "=");
-      builder.append(value);
-      builder.append(", ");
-    }
-  }
-
-  private void appendIntProp(StringBuilder builder, int value, String name) {
-    if (value > -1) {
-      builder.append(name + "=");
-      builder.append(value);
-      builder.append(", ");
-    }
-  }
-  
-  private void appendBoolProp(StringBuilder builder, boolean value, String name) {
-    builder.append(name + "=");
-    builder.append(value);
-    builder.append(", ");
-  }
-
-  @Override
-  public HDFSStoreMutator createHdfsStoreMutator() {
-    // as part of alter execution, hdfs store will replace the config holder
-    // completely. Hence mutator at the config holder is not needed
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public HDFSStore alter(HDFSStoreMutator mutator) {
-    // as part of alter execution, hdfs store will replace the config holder
-    // completely. Hence mutator at the config holder is not needed
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public String getDiskStoreName() {
-    return this.diskStoreName;
-  }
-  @Override
-  public HDFSStoreFactory setDiskStoreName(String name) {
-    this.diskStoreName = name;
-    return this;
-  }
-
-  @Override
-  public int getBatchInterval() {
-    return this.batchIntervalMillis;
-  }
-  @Override
-  public HDFSStoreFactory setBatchInterval(int intervalMillis){
-    this.batchIntervalMillis = intervalMillis;
-    return this;
-  }
-  
-  @Override
-  public boolean getBufferPersistent() {
-    return isPersistenceEnabled;
-  }
-  @Override
-  public HDFSStoreFactory setBufferPersistent(boolean isPersistent) {
-    this.isPersistenceEnabled = isPersistent;
-    return this;
-  }
-
-  @Override
-  public int getDispatcherThreads() {
-    return dispatcherThreads;
-  }
-  @Override
-  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads) {
-    this.dispatcherThreads = dispatcherThreads;
-    return this;
-  }
-  
-  @Override
-  public int getMaxMemory() {
-    return this.maximumQueueMemory;
-  }
-  @Override
-  public HDFSStoreFactory setMaxMemory(int memory) {
-    this.maximumQueueMemory = memory;
-    return this;
-  }
-  
-  @Override
-  public int getBatchSize() {
-    return this.batchSize;
-  }
-  @Override
-  public HDFSStoreFactory setBatchSize(int size){
-    this.batchSize = size;
-    return this;
-  }
-  
-  @Override
-  public boolean getSynchronousDiskWrite() {
-    return this.diskSynchronous;
-  }
-  @Override
-  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous) {
-    this.diskSynchronous = isSynchronous;
-    return this;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
deleted file mode 100644
index 9ecc5e3..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import com.gemstone.gemfire.GemFireConfigException;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.StoreExistsException;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-
-/**
- */
-public class HDFSStoreCreation implements HDFSStoreFactory {
-  protected HDFSStoreConfigHolder configHolder;
-  
-  public HDFSStoreCreation() {
-    this(null);
-  }
-
-  /**
-   * Copy constructor for HDFSStoreCreation
-   * @param config configuration source for creating this instance 
-   */
-  public HDFSStoreCreation(HDFSStoreCreation config) {
-    this.configHolder = new HDFSStoreConfigHolder(config == null ? null : config.configHolder);
-  }
-
-  @Override
-  public HDFSStoreFactory setName(String name) {
-    configHolder.setName(name);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setNameNodeURL(String namenodeURL) {
-    configHolder.setNameNodeURL(namenodeURL);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setHomeDir(String homeDir) {
-    configHolder.setHomeDir(homeDir);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setHDFSClientConfigFile(String clientConfigFile) {
-    configHolder.setHDFSClientConfigFile(clientConfigFile);
-    return this;
-  }
-  
-  @Override
-  public HDFSStoreFactory setBlockCacheSize(float percentage) {
-    configHolder.setBlockCacheSize(percentage);
-    return this;
-  }
-  
-  @Override
-  public HDFSStoreFactory setWriteOnlyFileRolloverSize(int maxFileSize) {
-    configHolder.setWriteOnlyFileRolloverSize(maxFileSize);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int count) {
-    configHolder.setWriteOnlyFileRolloverInterval(count);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setMinorCompaction(boolean auto) {
-    configHolder.setMinorCompaction(auto);
-    return this;
-  }
-  
-  @Override
-  public HDFSStoreFactory setMinorCompactionThreads(int count) {
-    configHolder.setMinorCompactionThreads(count);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setMajorCompaction(boolean auto) {
-    configHolder.setMajorCompaction(auto);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setMajorCompactionInterval(int count) {
-    configHolder.setMajorCompactionInterval(count);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setMajorCompactionThreads(int count) {
-    configHolder.setMajorCompactionThreads(count);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setInputFileSizeMax(int size) {
-    configHolder.setInputFileSizeMax(size);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setInputFileCountMin(int count) {
-    configHolder.setInputFileCountMin(count);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setInputFileCountMax(int count) {
-    configHolder.setInputFileCountMax(count);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setPurgeInterval(int interval) {
-    configHolder.setPurgeInterval(interval);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setDiskStoreName(String name) {
-    configHolder.setDiskStoreName(name);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setMaxMemory(int memory) {
-    configHolder.setMaxMemory(memory);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setBatchInterval(int intervalMillis) {
-    configHolder.setBatchInterval(intervalMillis);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setBatchSize(int size) {
-    configHolder.setBatchSize(size);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setBufferPersistent(boolean isPersistent) {
-    configHolder.setBufferPersistent(isPersistent);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous) {
-    configHolder.setSynchronousDiskWrite(isSynchronous);
-    return this;
-  }
-
-  @Override
-  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads) {
-    configHolder.setDispatcherThreads(dispatcherThreads);
-    return this;
-  }
-  
-  /**
-   * This method should not be called on this class.
-   * @see HDFSStoreFactory#create(String)
-   */
-  @Override
-  public HDFSStore create(String name) throws GemFireConfigException,
-      StoreExistsException {
-    throw new UnsupportedOperationException();
-  }
-
-  public static void assertIsPositive(String name, int count) {
-    if (count < 1) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
-              .toLocalizedString(new Object[] { name, count }));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java
deleted file mode 100644
index 749f01c..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import com.gemstone.gemfire.GemFireConfigException;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.StoreExistsException;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-
-
-/**
- * Implementation of HDFSStoreFactory 
- * 
- */
-public class HDFSStoreFactoryImpl extends HDFSStoreCreation {
-  public static final String DEFAULT_ASYNC_QUEUE_ID_FOR_HDFS= "HDFS_QUEUE";
-  
-  private Cache cache;
-  
-  public HDFSStoreFactoryImpl(Cache cache) {
-    this(cache, null);
-  }
-  
-  public HDFSStoreFactoryImpl(Cache cache, HDFSStoreCreation config) {
-    super(config);
-    this.cache = cache;
-  }
-
-  @Override
-  public HDFSStore create(String name) {
-    if (name == null) {
-      throw new GemFireConfigException("HDFS store name not provided");
-    }
-    
-    this.configHolder.validate();
-    
-    HDFSStore result = null;
-    synchronized (this) {
-      if (this.cache instanceof GemFireCacheImpl) {
-        GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache;
-        if (gfc.findHDFSStore(name) != null) {
-          throw new StoreExistsException(name);
-        }
-        
-        HDFSStoreImpl hsi = new HDFSStoreImpl(name, this.configHolder);
-        gfc.addHDFSStore(hsi);
-        result = hsi;
-      }
-    }
-    return result;
-  }
-
-  public static final String getEventQueueName(String regionPath) {
-    return HDFSStoreFactoryImpl.DEFAULT_ASYNC_QUEUE_ID_FOR_HDFS + "_"
-        + regionPath.replace('/', '_');
-  }
-
-  public HDFSStore getConfigView() {
-    return (HDFSStore) configHolder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
deleted file mode 100644
index b5d56b6..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.net.ConnectTimeoutException;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSStoreDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
-import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import com.gemstone.gemfire.internal.util.SingletonCallable;
-import com.gemstone.gemfire.internal.util.SingletonValue;
-import com.gemstone.gemfire.internal.util.SingletonValue.SingletonBuilder;
-
-/**
- * Represents a HDFS based persistent store for region data.
- * 
- */
-public class HDFSStoreImpl implements HDFSStore {
-  
-  private volatile HDFSStoreConfigHolder configHolder; 
-  
-  private final SingletonValue<FileSystem> fs;
-
-  /**
-   * Used to make sure that only one thread creates the writer at a time. This prevents the dispatcher
-   * threads from cascading the Connection lock in DFS client see bug 51195
-   */
-  private final SingletonCallable<HoplogWriter> singletonWriter = new SingletonCallable<HoplogWriter>();
-
-  private final HFileStoreStatistics stats;
-  private final BlockCache blockCache;
-
-  private static HashSet<String> secureNameNodes = new HashSet<String>();
-  
-  private final boolean PERFORM_SECURE_HDFS_CHECK = Boolean.getBoolean(HoplogConfig.PERFORM_SECURE_HDFS_CHECK_PROP);
-  private static final Logger logger = LogService.getLogger();
-  protected final String logPrefix;
-  
-  static {
-    HdfsConfiguration.init();
-  }
-  
-  public HDFSStoreImpl(String name, final HDFSStore config) {
-    this.configHolder = new HDFSStoreConfigHolder(config);
-    configHolder.setName(name);
-
-    this.logPrefix = "<" + "HdfsStore:" + name + "> ";
-
-    stats = new HFileStoreStatistics(InternalDistributedSystem.getAnyInstance(), "HDFSStoreStatistics", name);
-
-    final Configuration hconf = new Configuration();
-        
-    // Set the block cache size.
-    // Disable the static block cache. We keep our own cache on the HDFS Store
-    // hconf.setFloat("hfile.block.cache.size", 0f);
-    if (this.getBlockCacheSize() != 0) {
-      long cacheSize = (long) (HeapMemoryMonitor.getTenuredPoolMaxMemory() * this.getBlockCacheSize() / 100);
-
-      // TODO use an off heap block cache if we're using off heap memory?
-      // See CacheConfig.instantiateBlockCache.
-      // According to Anthony, the off heap block cache is still
-      // experimental. Our own off heap cache might be a better bet.
-//      this.blockCache = new LruBlockCache(cacheSize,
-//          StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf, HFileSortedOplogFactory.convertStatistics(stats));
-      this.blockCache = new LruBlockCache(cacheSize, StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf);
-    } else {
-      this.blockCache = null;
-    }
-    
-    final String clientFile = config.getHDFSClientConfigFile();
-    fs = new SingletonValue<FileSystem>(new SingletonBuilder<FileSystem>() {
-      @Override
-      public FileSystem create() throws IOException {
-        return createFileSystem(hconf, clientFile, false);
-      }
-
-      @Override
-      public void postCreate() {
-      }
-      
-      @Override
-      public void createInProgress() {
-      }
-    });
-    
-    FileSystem fileSystem = null;
-    try {
-      fileSystem = fs.get();
-    } catch (Throwable ex) {
-      throw new HDFSIOException(ex.getMessage(),ex);
-    }    
-    //HDFSCompactionConfig has already been initialized
-    long cleanUpIntervalMillis = getPurgeInterval() * 60 * 1000;
-    Path cleanUpIntervalPath = new Path(getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
-    HoplogUtil.exposeCleanupIntervalMillis(fileSystem, cleanUpIntervalPath, cleanUpIntervalMillis);
-  }
-  
-  /**
-   * Creates a new file system every time.  
-   */
-  public FileSystem createFileSystem() {
-    Configuration hconf = new Configuration();
-    try {
-      return createFileSystem(hconf, this.getHDFSClientConfigFile(), true);
-    } catch (Throwable ex) {
-      throw new HDFSIOException(ex.getMessage(),ex);
-    }
-  }
-  
-  private FileSystem createFileSystem(Configuration hconf, String configFile, boolean forceNew) throws IOException {
-    FileSystem filesystem = null; 
-    
-      // load hdfs client config file if specified. The path is on local file
-      // system
-      if (configFile != null) {
-        if (logger.isDebugEnabled()) {
-          logger.debug("{}Adding resource config file to hdfs configuration:" + configFile, logPrefix);
-        }
-        hconf.addResource(new Path(configFile));
-        
-        if (! new File(configFile).exists()) {
-          logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_CLIENT_CONFIG_FILE_ABSENT, configFile));
-        }
-      }
-      
-      // This setting disables shutdown hook for file system object. Shutdown
-      // hook may cause FS object to close before the cache or store and
-      // unpredictable behavior. This setting is provided for GFXD like server
-      // use cases where FS close is managed by a server. This setting is not
-      // supported by old versions of hadoop, HADOOP-4829
-      hconf.setBoolean("fs.automatic.close", false);
-      
-      // Hadoop has a configuration parameter io.serializations that is a list of serialization 
-      // classes which can be used for obtaining serializers and deserializers. This parameter 
-      // by default contains avro classes. When a sequence file is created, it calls 
-      // SerializationFactory.getSerializer(keyclass). This internally creates objects using 
-      // reflection of all the classes that were part of io.serializations. But since, there is 
-      // no avro class available it throws an exception. 
-      // Before creating a sequenceFile, override the io.serializations parameter and pass only the classes 
-      // that are important to us. 
-      hconf.setStrings("io.serializations",
-          new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
-      // create writer
-
-      SchemaMetrics.configureGlobally(hconf);
-      
-      String nameNodeURL = null;
-      if ((nameNodeURL = getNameNodeURL()) == null) {
-          nameNodeURL = hconf.get("fs.default.name");
-      }
-      
-      URI namenodeURI = URI.create(nameNodeURL);
-    
-    //if (! GemFireCacheImpl.getExisting().isHadoopGfxdLonerMode()) {
-      String authType = hconf.get("hadoop.security.authentication");
-      
-      //The following code handles Gemfire XD with secure HDFS
-      //A static set is used to cache all known secure HDFS NameNode urls.
-      UserGroupInformation.setConfiguration(hconf);
-
-      //Compare authentication method ignoring case to make GFXD future version complaint
-      //At least version 2.0.2 starts complaining if the string "kerberos" is not in all small case.
-      //However it seems current version of hadoop accept the authType in any case
-      if (authType.equalsIgnoreCase("kerberos")) {
-        
-        String principal = hconf.get(HoplogConfig.KERBEROS_PRINCIPAL);
-        String keyTab = hconf.get(HoplogConfig.KERBEROS_KEYTAB_FILE);
-       
-        if (!PERFORM_SECURE_HDFS_CHECK) {
-          if (logger.isDebugEnabled())
-            logger.debug("{}Ignore secure hdfs check", logPrefix);
-        } else {
-          if (!secureNameNodes.contains(nameNodeURL)) {
-            if (logger.isDebugEnabled())
-              logger.debug("{}Executing secure hdfs check", logPrefix);
-             try{
-              filesystem = FileSystem.newInstance(namenodeURI, hconf);
-              //Make sure no IOExceptions are generated when accessing insecure HDFS. 
-              filesystem.listFiles(new Path("/"),false);
-              throw new HDFSIOException("Gemfire XD HDFS client and HDFS cluster security levels do not match. The configured HDFS Namenode is not secured.");
-             } catch (IOException ex) {
-               secureNameNodes.add(nameNodeURL);
-             } finally {
-             //Close filesystem to avoid resource leak
-               if(filesystem != null) {
-                 closeFileSystemIgnoreError(filesystem);
-               }
-             }
-          }
-        }
-
-        // check to ensure the namenode principal is defined
-        String nameNodePrincipal = hconf.get("dfs.namenode.kerberos.principal");
-        if (nameNodePrincipal == null) {
-          throw new IOException(LocalizedStrings.GF_KERBEROS_NAMENODE_PRINCIPAL_UNDEF.toLocalizedString());
-        }
-        
-        // ok, the user specified a gfxd principal so we will try to login
-        if (principal != null) {
-          //If NameNode principal is the same as Gemfire XD principal, there is a 
-          //potential security hole
-          String regex = "[/@]";
-          if (nameNodePrincipal != null) {
-            String HDFSUser = nameNodePrincipal.split(regex)[0];
-            String GFXDUser = principal.split(regex)[0];
-            if (HDFSUser.equals(GFXDUser)) {
-              logger.warn(LocalizedMessage.create(LocalizedStrings.HDFS_USER_IS_SAME_AS_GF_USER, GFXDUser));
-            }
-          }
-          
-          // a keytab must exist if the user specifies a principal
-          if (keyTab == null) {
-            throw new IOException(LocalizedStrings.GF_KERBEROS_KEYTAB_UNDEF.toLocalizedString());
-          }
-          
-          // the keytab must exist as well
-          File f = new File(keyTab);
-          if (!f.exists()) {
-            throw new FileNotFoundException(LocalizedStrings.GF_KERBEROS_KEYTAB_FILE_ABSENT.toLocalizedString(f.getAbsolutePath()));
-          }
-
-          //Authenticate Gemfire XD principal to Kerberos KDC using Gemfire XD keytab file
-          String principalWithValidHost = SecurityUtil.getServerPrincipal(principal, "");
-          UserGroupInformation.loginUserFromKeytab(principalWithValidHost, keyTab);
-        } else {
-          logger.warn(LocalizedMessage.create(LocalizedStrings.GF_KERBEROS_PRINCIPAL_UNDEF));
-        }
-      }
-    //}
-
-    filesystem = getFileSystemFactory().create(namenodeURI, hconf, forceNew);
-    
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Initialized FileSystem linked to " + filesystem.getUri()
-          + " " + filesystem.hashCode(), logPrefix);
-    }
-    return filesystem;
-  }
-
-  public FileSystem getFileSystem() throws IOException {
-    return fs.get();
-  }
-  
-  public FileSystem getCachedFileSystem() {
-    return fs.getCachedValue();
-  }
-
-  public SingletonCallable<HoplogWriter> getSingletonWriter() {
-    return this.singletonWriter;
-  }
-
-  private final SingletonCallable<Boolean> fsExists = new SingletonCallable<Boolean>();
-
-  public boolean checkFileSystemExists() throws IOException {
-    try {
-      return fsExists.runSerially(new Callable<Boolean>() {
-        @Override
-        public Boolean call() throws Exception {
-          FileSystem fileSystem = getCachedFileSystem();
-          if (fileSystem == null) {
-            return false;
-          }
-          return fileSystem.exists(new Path("/"));
-        }
-      });
-    } catch (Exception e) {
-      if (e instanceof IOException) {
-        throw (IOException)e;
-      }
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * This method executes a query on namenode. If the query succeeds, FS
-   * instance is healthy. If it fails, the old instance is closed and a new
-   * instance is created.
-   */
-  public void checkAndClearFileSystem() {
-    FileSystem fileSystem = getCachedFileSystem();
-    
-    if (fileSystem != null) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Checking file system at " + fileSystem.getUri(), logPrefix);
-      }
-      try {
-        checkFileSystemExists();
-        if (logger.isDebugEnabled()) {
-          logger.debug("{}FS client is ok: " + fileSystem.getUri() + " "
-              + fileSystem.hashCode(), logPrefix);
-        }
-        return;
-      } catch (ConnectTimeoutException e) {
-        if (logger.isDebugEnabled()) {
-          logger.debug("{}Hdfs unreachable, FS client is ok: "
-              + fileSystem.getUri() + " " + fileSystem.hashCode(), logPrefix);
-        }
-        return;
-      } catch (IOException e) {
-        logger.debug("IOError in filesystem checkAndClear ", e);
-        
-        // The file system is closed or NN is not reachable. It is safest to
-        // create a new FS instance. If the NN continues to remain unavailable,
-        // all subsequent read/write request will cause HDFSIOException. This is
-        // similar to the way hbase manages failures. This has a drawback
-        // though. A network blip will result in all connections to be
-        // recreated. However trying to preserve the connections and waiting for
-        // FS to auto-recover is not deterministic.
-        if (e instanceof RemoteException) {
-          e = ((RemoteException) e).unwrapRemoteException();
-        }
-
-        logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_UNREACHABLE,
-            fileSystem.getUri()), e);
-      }
-
-      // compare and clear FS container. The fs container needs to be reusable
-      boolean result = fs.clear(fileSystem, true);
-      if (!result) {
-        // the FS instance changed after this call was initiated. Check again
-        logger.debug("{}Failed to clear FS ! I am inconsistent so retrying ..", logPrefix);
-        checkAndClearFileSystem();
-      } else {
-        closeFileSystemIgnoreError(fileSystem);
-      }      
-    }
-  }
-
-  private void closeFileSystemIgnoreError(FileSystem fileSystem) {
-    if (fileSystem == null) {
-      logger.debug("{}Trying to close null file system", logPrefix);
-      return;
-    }
-
-    try {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Closing file system at " + fileSystem.getUri() + " "
-            + fileSystem.hashCode(), logPrefix);
-      }
-      fileSystem.close();
-    } catch (Exception e) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("Failed to close file system at " + fileSystem.getUri()
-            + " " + fileSystem.hashCode(), e);
-      }
-    }
-  }
-
-  public HFileStoreStatistics getStats() {
-    return stats;
-  }
-  
-  public BlockCache getBlockCache() {
-    return blockCache;
-  }
-
-  public void close() {
-    logger.debug("{}Closing file system: " + getName(), logPrefix);
-    stats.close();
-    blockCache.shutdown();
-    //Might want to clear the block cache, but it should be dereferenced.
-    
-    // release DDL hoplog organizer for this store. Also shutdown compaction
-    // threads. These two resources hold references to GemfireCacheImpl
-    // instance. Any error is releasing this resources is not critical and needs
-    // be ignored.
-    try {
-      HDFSCompactionManager manager = HDFSCompactionManager.getInstance(this);
-      if (manager != null) {
-        manager.reset();
-      }
-    } catch (Exception e) {
-      logger.info(e);
-    }
-    
-    // once this store is closed, this store should not be used again
-    FileSystem fileSystem = fs.clear(false);
-    if (fileSystem != null) {
-      closeFileSystemIgnoreError(fileSystem);
-    }    
-  }
-  
-  /**
-   * Test hook to remove all of the contents of the the folder
-   * for this HDFS store from HDFS.
-   * @throws IOException 
-   */
-  public void clearFolder() throws IOException {
-    getFileSystem().delete(new Path(getHomeDir()), true);
-  }
-  
-  @Override
-  public void destroy() {
-    Collection<String> regions = HDFSRegionDirector.getInstance().getRegionsInStore(this);
-    if(!regions.isEmpty()) {
-      throw new IllegalStateException("Cannot destroy a HDFS store that still contains regions: " + regions); 
-    }
-    close();
-    HDFSStoreDirector.getInstance().removeHDFSStore(this.getName());
-  }
-
-  @Override
-  public synchronized HDFSStore alter(HDFSStoreMutator mutator) {
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Altering hdfsStore " + this, logPrefix);
-      logger.debug("{}Mutator " + mutator, logPrefix);
-    }
-    HDFSStoreConfigHolder newHolder = new HDFSStoreConfigHolder(configHolder);
-    newHolder.copyFrom(mutator);
-    newHolder.validate();
-    HDFSStore oldStore = configHolder;
-    configHolder = newHolder;
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Resuult of Alter " + this, logPrefix);
-    }
-    return (HDFSStore) oldStore;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("HDFSStoreImpl [");
-    if (configHolder != null) {
-      builder.append("configHolder=");
-      builder.append(configHolder);
-    }
-    builder.append("]");
-    return builder.toString();
-  }
-
-  @Override
-  public String getName() {
-    return configHolder.getName();
-  }
-
-  @Override
-  public String getNameNodeURL() {
-    return configHolder.getNameNodeURL();
-  }
-
-  @Override
-  public String getHomeDir() {
-    return configHolder.getHomeDir();
-  }
-
-  @Override
-  public String getHDFSClientConfigFile() {
-    return configHolder.getHDFSClientConfigFile();
-  }
-
-  @Override
-  public float getBlockCacheSize() {
-    return configHolder.getBlockCacheSize();
-  }
-
-  @Override
-  public int getWriteOnlyFileRolloverSize() {
-    return configHolder.getWriteOnlyFileRolloverSize();
-  }
-
-  @Override
-  public int getWriteOnlyFileRolloverInterval() {
-    return configHolder.getWriteOnlyFileRolloverInterval();
-  }
-
-  @Override
-  public boolean getMinorCompaction() {
-    return configHolder.getMinorCompaction();
-  }
-
-  @Override
-  public int getMinorCompactionThreads() {
-    return configHolder.getMinorCompactionThreads();
-  }
-
-  @Override
-  public boolean getMajorCompaction() {
-    return configHolder.getMajorCompaction();
-  }
-
-  @Override
-  public int getMajorCompactionInterval() {
-    return configHolder.getMajorCompactionInterval();
-  }
-
-  @Override
-  public int getMajorCompactionThreads() {
-    return configHolder.getMajorCompactionThreads();
-  }
-
-
-  @Override
-  public int getInputFileSizeMax() {
-    return configHolder.getInputFileSizeMax();
-  }
-
-  @Override
-  public int getInputFileCountMin() {
-    return configHolder.getInputFileCountMin();
-  }
-
-  @Override
-  public int getInputFileCountMax() {
-    return configHolder.getInputFileCountMax();
-  }
-
-  @Override
-  public int getPurgeInterval() {
-    return configHolder.getPurgeInterval();
-  }
-
-  @Override
-  public String getDiskStoreName() {
-    return configHolder.getDiskStoreName();
-  }
-
-  @Override
-  public int getMaxMemory() {
-    return configHolder.getMaxMemory();
-  }
-
-  @Override
-  public int getBatchSize() {
-    return configHolder.getBatchSize();
-  }
-
-  @Override
-  public int getBatchInterval() {
-    return configHolder.getBatchInterval();
-  }
-
-  @Override
-  public boolean getBufferPersistent() {
-    return configHolder.getBufferPersistent();
-  }
-
-  @Override
-  public boolean getSynchronousDiskWrite() {
-    return configHolder.getSynchronousDiskWrite();
-  }
-
-  @Override
-  public int getDispatcherThreads() {
-    return configHolder.getDispatcherThreads();
-  }
-  
-  @Override
-  public HDFSStoreMutator createHdfsStoreMutator() {
-    return new HDFSStoreMutatorImpl();
-  }
-
-  public FileSystemFactory getFileSystemFactory() {
-    return new DistributedFileSystemFactory();
-  }
-
-  /*
-   * Factory to create HDFS file system instances
-   */
-  static public interface FileSystemFactory {
-    public FileSystem create(URI namenode, Configuration conf, boolean forceNew) throws IOException;
-  }
-
-  /*
-   * File system factory implementations for creating instances of file system
-   * connected to distributed HDFS cluster
-   */
-  public class DistributedFileSystemFactory implements FileSystemFactory {
-    private final boolean ALLOW_TEST_FILE_SYSTEM = Boolean.getBoolean(HoplogConfig.ALLOW_LOCAL_HDFS_PROP);
-    private final boolean USE_FS_CACHE = Boolean.getBoolean(HoplogConfig.USE_FS_CACHE);
-
-    @Override
-    public FileSystem create(URI nn, Configuration conf, boolean create) throws IOException {
-      FileSystem filesystem;
-
-      if (USE_FS_CACHE && !create) {
-        filesystem = FileSystem.get(nn, conf);
-      } else {
-        filesystem = FileSystem.newInstance(nn, conf);
-      }
-
-      if (filesystem instanceof LocalFileSystem && !ALLOW_TEST_FILE_SYSTEM) {
-        closeFileSystemIgnoreError(filesystem);
-        throw new IllegalStateException(
-            LocalizedStrings.HOPLOG_TRYING_TO_CREATE_STANDALONE_SYSTEM.toLocalizedString(getNameNodeURL()));
-      }
-
-      return filesystem;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
deleted file mode 100644
index 203e623..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-
-public class HDFSStoreMutatorImpl implements HDFSStoreMutator {
-  private HDFSStoreConfigHolder configHolder;
-  private Boolean autoCompact;
-  private Boolean autoMajorCompact;
-
-  public HDFSStoreMutatorImpl() {
-    configHolder = new HDFSStoreConfigHolder();
-    configHolder.resetDefaultValues();
-  }
-
-  public HDFSStoreMutatorImpl(HDFSStore store) {
-    configHolder = new HDFSStoreConfigHolder(store);
-  }
-  
-  public HDFSStoreMutator setWriteOnlyFileRolloverSize(int maxFileSize) {
-    configHolder.setWriteOnlyFileRolloverSize(maxFileSize);
-    return this;
-  }
-  @Override
-  public int getWriteOnlyFileRolloverSize() {
-    return configHolder.getWriteOnlyFileRolloverSize();
-  }
-
-  @Override
-  public HDFSStoreMutator setWriteOnlyFileRolloverInterval(int count) {
-    configHolder.setWriteOnlyFileRolloverInterval(count);
-    return this;
-  }
-  @Override
-  public int getWriteOnlyFileRolloverInterval() {
-    return configHolder.getWriteOnlyFileRolloverInterval();
-  }
-
-  @Override
-  public HDFSStoreMutator setMinorCompaction(boolean auto) {
-    autoCompact = Boolean.valueOf(auto);
-    configHolder.setMinorCompaction(auto);
-    return null;
-  }
-  @Override
-  public Boolean getMinorCompaction() {
-    return autoCompact;
-  }
-  
-  @Override
-  public HDFSStoreMutator setMinorCompactionThreads(int count) {
-    configHolder.setMinorCompactionThreads(count);
-    return this;
-  }
-  @Override
-  public int getMinorCompactionThreads() {
-    return configHolder.getMinorCompactionThreads();
-  }
-  
-  @Override
-  public HDFSStoreMutator setMajorCompaction(boolean auto) {
-    autoMajorCompact = Boolean.valueOf(auto);
-    configHolder.setMajorCompaction(auto);
-    return this;
-  }
-  @Override
-  public Boolean getMajorCompaction() {
-    return autoMajorCompact;
-  }
-
-  @Override
-  public HDFSStoreMutator setMajorCompactionInterval(int count) {
-    configHolder.setMajorCompactionInterval(count);
-    return this;
-  }
-  @Override
-  public int getMajorCompactionInterval() {
-    return configHolder.getMajorCompactionInterval();
-  }
-
-  @Override
-  public HDFSStoreMutator setMajorCompactionThreads(int count) {
-    configHolder.setMajorCompactionThreads(count);
-    return this;
-  }
-  @Override
-  public int getMajorCompactionThreads() {
-    return configHolder.getMajorCompactionThreads();
-  }
-
-  @Override
-  public HDFSStoreMutator setInputFileSizeMax(int size) {
-    configHolder.setInputFileSizeMax(size);
-    return this;
-  }
-  @Override
-  public int getInputFileSizeMax() {
-    return configHolder.getInputFileSizeMax();
-  }
-  
-  @Override
-  public HDFSStoreMutator setInputFileCountMin(int count) {
-    configHolder.setInputFileCountMin(count);
-    return this;
-  }
-  @Override
-  public int getInputFileCountMin() {
-    return configHolder.getInputFileCountMin();
-  }
-  
-  @Override
-  public HDFSStoreMutator setInputFileCountMax(int count) {
-    configHolder.setInputFileCountMax(count);
-    return this;
-  }
-  @Override
-  public int getInputFileCountMax() {
-    return configHolder.getInputFileCountMax();
-  }
-  
-  @Override
-  public HDFSStoreMutator setPurgeInterval(int interval) {
-    configHolder.setPurgeInterval(interval);
-    return this;
-  }
-  @Override
-  public int getPurgeInterval() {
-    return configHolder.getPurgeInterval();
-  }
-
-  @Override
-  public int getBatchSize() {
-    return configHolder.batchSize;
-  }
-  @Override
-  public HDFSStoreMutator setBatchSize(int size) {
-    configHolder.setBatchSize(size);
-    return this;
-  }
-
-  
-  @Override
-  public int getBatchInterval() {
-    return configHolder.batchIntervalMillis;
-  }
-  @Override
-  public HDFSStoreMutator setBatchInterval(int interval) {
-    configHolder.setBatchInterval(interval);
-    return this;
-  }
-    
-  public static void assertIsPositive(String name, int count) {
-    if (count < 1) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
-              .toLocalizedString(new Object[] { name, count }));
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("HDFSStoreMutatorImpl [");
-    if (configHolder != null) {
-      builder.append("configHolder=");
-      builder.append(configHolder);
-      builder.append(", ");
-    }
-    if (autoCompact != null) {
-      builder.append("MinorCompaction=");
-      builder.append(autoCompact);
-      builder.append(", ");
-    }
-    if (getMajorCompaction() != null) {
-      builder.append("autoMajorCompaction=");
-      builder.append(getMajorCompaction());
-      builder.append(", ");
-    }
-    builder.append("]");
-    return builder.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java
deleted file mode 100644
index 0298523..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
-import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-
-/**
- * Listener that persists events to a write only HDFS store
- *
- */
-public class HDFSWriteOnlyStoreEventListener implements
-    AsyncEventListener {
-
-  private final LogWriterI18n logger;
-  private volatile boolean senderStopped = false; 
-  private final FailureTracker failureTracker = new FailureTracker(10L, 60 * 1000L, 1.5f);
-  
-  
-  public HDFSWriteOnlyStoreEventListener(LogWriterI18n logger) {
-    this.logger = logger;
-  }
-  
-  @Override
-  public void close() {
-    senderStopped = true;
-  }
-
-  @Override
-  public boolean processEvents(List<AsyncEvent> events) {
-    if (Hoplog.NOP_WRITE) {
-      return true;
-    }
-
-    if (logger.fineEnabled())
-      logger.fine("HDFSWriteOnlyStoreEventListener: A total of " + events.size() + " events are sent from GemFire to persist on HDFS");
-    boolean success = false;
-    try {
-      failureTracker.sleepIfRetry();
-      HDFSGatewayEventImpl hdfsEvent = null;
-      int previousBucketId = -1;
-      BatchManager bm = null;
-      for (AsyncEvent asyncEvent : events) {
-        if (senderStopped){
-          if (logger.fineEnabled()) {
-            logger.fine("HDFSWriteOnlyStoreEventListener.processEvents: Cache is closing down. Ignoring the batch of data.");
-          }
-          return false;
-        }
-        hdfsEvent = (HDFSGatewayEventImpl)asyncEvent;
-        if (previousBucketId != hdfsEvent.getBucketId()){
-          if (previousBucketId != -1) 
-            persistBatch(bm, previousBucketId);
-          
-          previousBucketId = hdfsEvent.getBucketId();
-          bm = new BatchManager();
-        }
-        bm.addEvent(hdfsEvent);
-      }
-      try {
-        persistBatch(bm, hdfsEvent.getBucketId());
-      } catch (BucketMovedException e) {
-        logger.fine("Batch could not be written to HDFS as the bucket moved. bucket id: " + 
-            hdfsEvent.getBucketId() + " Exception: " + e);
-        return false;
-      }
-      success = true;
-    } catch (IOException e) {
-      logger.warning(LocalizedStrings.HOPLOG_FLUSH_FOR_BATCH_FAILED, e);
-      return false;
-    }
-    catch (ClassNotFoundException e) {
-      logger.warning(LocalizedStrings.HOPLOG_FLUSH_FOR_BATCH_FAILED, e);
-      return false;
-    }
-    catch (CacheClosedException e) {
-      // exit silently
-      if (logger.fineEnabled())
-        logger.fine(e);
-      return false;
-    } catch (ForceReattemptException e) {
-      if (logger.fineEnabled())
-        logger.fine(e);
-      return false;
-    } catch (InterruptedException e1) {
-      // TODO Auto-generated catch block
-      e1.printStackTrace();
-    } finally {
-      failureTracker.record(success);
-    }
-    return true;
-  }
-  
-  /**
-   * Persists batches of multiple regions specified by the batch manager
-   * 
-   */
-  private void persistBatch(BatchManager bm, int bucketId) throws IOException, ForceReattemptException {
-    Iterator<Map.Entry<LocalRegion,ArrayList<QueuedPersistentEvent>>> eventsPerRegion = 
-        bm.iterator();
-    HoplogOrganizer bucketOrganizer = null; 
-    while (eventsPerRegion.hasNext()) {
-      Map.Entry<LocalRegion, ArrayList<QueuedPersistentEvent>> eventsForARegion = eventsPerRegion.next();
-      bucketOrganizer = getOrganizer((PartitionedRegion) eventsForARegion.getKey(), bucketId);
-      // bucket organizer cannot be null. 
-      if (bucketOrganizer == null)
-        throw new BucketMovedException("Bucket moved. BucketID: " + bucketId + "  HdfsRegion: " +  eventsForARegion.getKey().getName());
-      bucketOrganizer.flush(eventsForARegion.getValue().iterator(), eventsForARegion.getValue().size());
-      if (logger.fineEnabled()) {
-        logger.fine("Batch written to HDFS of size " +  eventsForARegion.getValue().size() + 
-            " for region " + eventsForARegion.getKey());
-      }
-    }
-  }
-
-  private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
-    BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
-    if (br == null) {
-      // got rebalanced or something
-      throw new BucketMovedException("Bucket region is no longer available. BucketId: "+
-          bucketId + " HdfsRegion: " +  region.getName());
-    }
-
-    return br.getHoplogOrganizer();
-  }
-  
-  /**
-   * Sorts out events of the multiple regions into lists per region 
-   *
-   */
-  private class BatchManager implements Iterable<Map.Entry<LocalRegion,ArrayList<QueuedPersistentEvent>>> {
-    private HashMap<LocalRegion, ArrayList<QueuedPersistentEvent>> regionBatches = 
-        new HashMap<LocalRegion, ArrayList<QueuedPersistentEvent>>();
-    
-    public void addEvent (HDFSGatewayEventImpl hdfsEvent) throws IOException, ClassNotFoundException {
-      LocalRegion region = (LocalRegion) hdfsEvent.getRegion();
-      ArrayList<QueuedPersistentEvent> regionList = regionBatches.get(region);
-      if (regionList == null) {
-        regionList = new ArrayList<QueuedPersistentEvent>();
-        regionBatches.put(region, regionList);
-      }
-      regionList.add(new UnsortedHDFSQueuePersistedEvent(hdfsEvent));
-    }
-
-    @Override
-    public Iterator<Map.Entry<LocalRegion,ArrayList<QueuedPersistentEvent>>> iterator() {
-      return regionBatches.entrySet().iterator();
-    }
-    
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java
deleted file mode 100644
index c7ba23f..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogListener;
-
-/**
- * Objects of this class needs to be created for every region. These objects 
- * listen to the oplog events and take appropriate action.   
- *
- */
-public class HoplogListenerForRegion implements HoplogListener {
-
-  private List<HoplogListener> otherListeners = new CopyOnWriteArrayList<HoplogListener>();
-
-  public HoplogListenerForRegion() {
-    
-  }
-
-  @Override
-  public void hoplogCreated(String regionFolder, int bucketId,
-      Hoplog... oplogs) throws IOException {
-    for (HoplogListener listener : this.otherListeners) {
-      listener.hoplogCreated(regionFolder, bucketId, oplogs);
-    }
-  }
-
-  @Override
-  public void hoplogDeleted(String regionFolder, int bucketId,
-      Hoplog... oplogs) {
-    for (HoplogListener listener : this.otherListeners) {
-      try {
-        listener.hoplogDeleted(regionFolder, bucketId, oplogs);
-      } catch (IOException e) {
-        // TODO handle
-        throw new HDFSIOException(e.getLocalizedMessage(), e);
-      }
-    }
-  }
-
-  public void addListener(HoplogListener listener) {
-    this.otherListeners.add(listener);
-  }
-
-  @Override
-  public void compactionCompleted(String region, int bucket, boolean isMajor) {
-    for (HoplogListener listener : this.otherListeners) {
-      listener.compactionCompleted(region, bucket, isMajor);
-    }
-  }
-}



[26/63] [abbrv] incubator-geode git commit: GEODE-1262: Removed VM5-VM7 in AsyncEventQueueTestBase

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ecbbf766/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
index cd11ae9..8972595 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
@@ -37,37 +37,37 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialPropagation() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
     Wait.pause(2000);//give some time for system to become stable
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln", 0, 1000, 1000, 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln", 10 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln", 10 ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln", 0, 1000, 0, 0 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln", 0 ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln", 0 ));
   }
   
   /**
@@ -76,53 +76,53 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
   public void testAsyncStatsTwoListeners() throws Exception {
     Integer lnPort = createFirstLocatorWithDSId(1);
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
       false, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
       false, 100, 100, false, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
       false, 100, 100, false, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln1",
       false, 100, 100, false, false, null, false ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
       false, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
       false, 100, 100, false, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
       false, 100, 100, false, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln2",
       false, 100, 100, false, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln1", 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln2", 1000 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln1", 1000 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln2", 1000 ));
     Wait.pause(2000);//give some time for system to become stable
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln1", 0, 1000, 1000, 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln1", 10 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln1", 10 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln2", 0, 1000, 1000, 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln2", 10 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln2", 10 ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln1", 0, 1000, 0, 0 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln1", 0 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln1", 0 ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln2", 0, 1000, 0, 0 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln2", 0 ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueBatchStats( "ln2", 0 ));
   }
   
   /**
@@ -131,28 +131,28 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialPropagationHA() throws Exception {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache(lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
       false, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
       false, 100, 100, false, false, null, false ));
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR", "ln", isOffHeap() ));
     
-    AsyncInvocation inv1 = vm5.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR", 10000 ));
+    AsyncInvocation inv1 = vm2.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR", 10000 ));
     Wait.pause(2000);
-    AsyncInvocation inv2 = vm4.invokeAsync(() -> AsyncEventQueueTestBase.killAsyncEventQueue( "ln" ));
+    AsyncInvocation inv2 = vm1.invokeAsync(() -> AsyncEventQueueTestBase.killAsyncEventQueue( "ln" ));
     Boolean isKilled = Boolean.FALSE;
     try {
       isKilled = (Boolean)inv2.getResult();
@@ -162,13 +162,13 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
     }
     AsyncInvocation inv3 = null; 
     if(!isKilled){
-      inv3 = vm5.invokeAsync(() -> AsyncEventQueueTestBase.killSender( "ln" ));
+      inv3 = vm2.invokeAsync(() -> AsyncEventQueueTestBase.killSender( "ln" ));
       inv3.join();
     }
     inv1.join();
     inv2.join();
     Wait.pause(2000);//give some time for system to become stable
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats_Failover("ln", 10000));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats_Failover("ln", 10000));
   }
 
   /**
@@ -177,52 +177,52 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialPropagationUNPorcessedEvents() throws Exception {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
       false, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
       false, 100, 100, false, false, null, false ));
 
     //create one RR (RR_1) on local site
-    vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_1", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_1", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_1", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_1", "ln", isOffHeap() ));
 
     //create another RR (RR_2) on local site
-    vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_2", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_2", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_2", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
+    vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue(
         getTestMethodName() + "_RR_2", "ln", isOffHeap() ));
     
     //start puts in RR_1 in another thread
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR_1", 1000 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR_1", 1000 ));
     //do puts in RR_2 in main thread
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPutsFrom( getTestMethodName() + "_RR_2", 1000, 1500 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPutsFrom( getTestMethodName() + "_RR_2", 1000, 1500 ));
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1500 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1500 ));
         
     Wait.pause(2000);//give some time for system to become stable
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats("ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats("ln",
       0, 1500, 1500, 1500));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueUnprocessedStats("ln", 0));
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueUnprocessedStats("ln", 0));
     
     
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats("ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats("ln",
       0, 1500, 0, 0));
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueUnprocessedStats("ln", 1500));
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueUnprocessedStats("ln", 1500));
   }
   
   /**
@@ -231,20 +231,20 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
   public void testSerialPropagationConflation() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
     
-    vm4
+    vm1
         .invoke(() -> AsyncEventQueueTestBase.pauseAsyncEventQueue( "ln" ));
     //pause at least for the batchTimeInterval to make sure that the AsyncEventQueue is actually paused
     Wait.pause(2000);
@@ -255,8 +255,8 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
       keyValues.put(i, i);
     }
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue( getTestMethodName() + "_RR", keyValues ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize( "ln", keyValues.size() ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue( getTestMethodName() + "_RR", keyValues ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize( "ln", keyValues.size() ));
     
     for(int i=0;i<500;i++) {
       updateKeyValues.put(i, i+"_updated");
@@ -264,22 +264,22 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
     
     // Put the update events and check the queue size.
     // There should be no conflation with the previous create events.
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue( getTestMethodName() + "_RR", updateKeyValues ));    
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize( "ln", keyValues.size() + updateKeyValues.size() ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue( getTestMethodName() + "_RR", updateKeyValues ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize( "ln", keyValues.size() + updateKeyValues.size() ));
     
     // Put the update events again and check the queue size.
     // There should be conflation with the previous update events.
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue( getTestMethodName() + "_RR", updateKeyValues ));    
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize( "ln", keyValues.size() + updateKeyValues.size() ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue( getTestMethodName() + "_RR", updateKeyValues ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize( "ln", keyValues.size() + updateKeyValues.size() ));
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));
   
-    vm4.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));
     
     Wait.pause(2000);// give some time for system to become stable
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueStats(
         "ln", 0, 2000, 2000, 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueConflatedStats( "ln", 500 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueConflatedStats( "ln", 500 ));
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ecbbf766/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
index ff11577..3e52393 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
@@ -36,35 +36,35 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
   public void testConcurrentSerialAsyncEventQueueAttributes() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 150, true, true, "testDS", true, 5, OrderPolicy.THREAD ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateConcurrentAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.THREAD ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateConcurrentAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.THREAD ));
   }
   
  
   public void testConcurrentParallelAsyncEventQueueAttributesOrderPolicyKey() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         true, 100, 150, true, true, "testDS", true, 5, OrderPolicy.KEY ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateConcurrentAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.KEY ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateConcurrentAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.KEY ));
   }
 
   public void testConcurrentParallelAsyncEventQueueAttributesOrderPolicyPartition() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         true, 100, 150, true, true, "testDS", true, 5, OrderPolicy.PARTITION ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateConcurrentAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.PARTITION ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateConcurrentAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.PARTITION ));
   }
   
   /**
@@ -79,37 +79,37 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
   public void testReplicatedSerialAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyKey() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         100 ));
     
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 100 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 100 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
   
   /**
@@ -124,30 +124,30 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
   public void testReplicatedSerialAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyThread() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithAsyncEventQueue( getTestMethodName() + "_RR", "ln", isOffHeap() ));
 
-    AsyncInvocation inv1 = vm4.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    AsyncInvocation inv1 = vm1.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         50 ));
-    AsyncInvocation inv2 = vm4.invokeAsync(() -> AsyncEventQueueTestBase.doNextPuts( getTestMethodName() + "_RR",
+    AsyncInvocation inv2 = vm1.invokeAsync(() -> AsyncEventQueueTestBase.doNextPuts( getTestMethodName() + "_RR",
       50, 100 ));
-    AsyncInvocation inv3 = vm4.invokeAsync(() -> AsyncEventQueueTestBase.doNextPuts( getTestMethodName() + "_RR",
+    AsyncInvocation inv3 = vm1.invokeAsync(() -> AsyncEventQueueTestBase.doNextPuts( getTestMethodName() + "_RR",
       100, 150 ));
     
     try {
@@ -160,15 +160,15 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
           ie);
     }
     
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 150 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 150 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
   
   /**
@@ -183,39 +183,39 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
   public void testPartitionedParallelAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyKey() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         100 ));
     
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
   
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm3size = (Integer)vm3.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm6size = (Integer)vm6.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm7size = (Integer)vm7.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
   
-    assertEquals(vm4size + vm5size + vm6size + vm7size, 100);
+    assertEquals(vm1size + vm2size + vm3size + vm4size, 100);
   
   }
   
@@ -232,38 +232,38 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
   public void testPartitionedParallelAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyPartition() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
             OrderPolicy.PARTITION ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
             OrderPolicy.PARTITION ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
             OrderPolicy.PARTITION ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln", true, 100, 10, true, false, null, false, 3,
             OrderPolicy.PARTITION ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         100 ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
 
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
+    int vm3size = (Integer)vm3.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
     int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
-    int vm6size = (Integer)vm6.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
-    int vm7size = (Integer)vm7.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
 
-    assertEquals(100, vm4size + vm5size + vm6size + vm7size);
+    assertEquals(100, vm1size + vm2size + vm3size + vm4size);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ecbbf766/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
index 2741760..8a88d43 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
@@ -34,12 +34,12 @@ public class CommonParallelAsyncEventQueueDUnitTest extends AsyncEventQueueTestB
   public void testSameSenderWithNonColocatedRegions() throws Exception {
     IgnoredException.addIgnoredException("cannot have the same parallel async");
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCache( lnPort ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
       true, 100, 100, false, false, null, false ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR1", "ln", isOffHeap()  ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR1", "ln", isOffHeap()  ));
     try {
-      vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR2", "ln", isOffHeap()  ));
+      vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR2", "ln", isOffHeap()  ));
       fail("Expected IllegateStateException : cannot have the same parallel gateway sender");
     }
     catch (Exception e) {



[04/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
index c75286e..328c196 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
@@ -41,7 +41,6 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
 import java.util.concurrent.FutureTask;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
@@ -93,22 +92,12 @@ import com.gemstone.gemfire.cache.TransactionDataNotColocatedException;
 import com.gemstone.gemfire.cache.TransactionDataRebalancedException;
 import com.gemstone.gemfire.cache.TransactionException;
 import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
 import com.gemstone.gemfire.cache.execute.EmtpyRegionFunctionException;
 import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.FunctionContext;
 import com.gemstone.gemfire.cache.execute.FunctionException;
 import com.gemstone.gemfire.cache.execute.FunctionService;
 import com.gemstone.gemfire.cache.execute.ResultCollector;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.CompactionStatus;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSFlushQueueFunction;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionArgs;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionFunction;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionResultCollector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSLastCompactionTimeFunction;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
 import com.gemstone.gemfire.cache.partition.PartitionListener;
 import com.gemstone.gemfire.cache.partition.PartitionNotAvailableException;
 import com.gemstone.gemfire.cache.query.FunctionDomainException;
@@ -224,7 +213,6 @@ import com.gemstone.gemfire.internal.cache.partitioned.PutAllPRMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.PutMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.PutMessage.PutResult;
 import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor;
-import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor.BucketVisitor;
 import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor.PartitionProfile;
 import com.gemstone.gemfire.internal.cache.partitioned.RemoveAllPRMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.RemoveIndexesMessage;
@@ -256,7 +244,6 @@ import com.gemstone.gemfire.internal.offheap.annotations.Released;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
 import com.gemstone.gemfire.internal.sequencelog.RegionLogger;
 import com.gemstone.gemfire.internal.util.TransformUtils;
-import com.gemstone.gemfire.internal.util.concurrent.FutureResult;
 import com.gemstone.gemfire.internal.util.concurrent.StoppableCountDownLatch;
 import com.gemstone.gemfire.i18n.StringId;
 
@@ -708,17 +695,9 @@ public class PartitionedRegion extends LocalRegion implements
   private final PartitionListener[] partitionListeners;
 
   private boolean isShadowPR = false;
-  private boolean isShadowPRForHDFS = false;
-  
+
   private AbstractGatewaySender parallelGatewaySender = null;
   
-  private final ThreadLocal<Boolean> queryHDFS = new ThreadLocal<Boolean>() {
-    @Override
-    protected Boolean initialValue() {
-      return false;
-    }
-  };
-  
   public PartitionedRegion(String regionname, RegionAttributes ra,
       LocalRegion parentRegion, GemFireCacheImpl cache,
       InternalRegionArguments internalRegionArgs) {
@@ -738,12 +717,6 @@ public class PartitionedRegion extends LocalRegion implements
     // (which prevents pridmap cleanup).
     cache.getDistributedSystem().addDisconnectListener(dsPRIdCleanUpListener);
     
-    // add an async queue for the region if the store name is not null. 
-    if (this.getHDFSStoreName() != null) {
-      String eventQueueName = getHDFSEventQueueName();
-      super.addAsyncEventQueueId(eventQueueName);
-    }
-
     // this.userScope = ra.getScope();
     this.partitionAttributes = ra.getPartitionAttributes();
     this.localMaxMemory = this.partitionAttributes.getLocalMaxMemory();
@@ -822,8 +795,6 @@ public class PartitionedRegion extends LocalRegion implements
     if (internalRegionArgs.isUsedForParallelGatewaySenderQueue()) {
       this.isShadowPR = true;
       this.parallelGatewaySender = internalRegionArgs.getParallelGatewaySender();
-      if (internalRegionArgs.isUsedForHDFSParallelGatewaySenderQueue())
-        this.isShadowPRForHDFS = true;
     }
     
     
@@ -867,38 +838,10 @@ public class PartitionedRegion extends LocalRegion implements
     });
   }
 
-  @Override
-  public final boolean isHDFSRegion() {
-    return this.getHDFSStoreName() != null;
-  }
-
-  @Override
-  public final boolean isHDFSReadWriteRegion() {
-    return isHDFSRegion() && !getHDFSWriteOnly();
-  }
-
-  @Override
-  protected final boolean isHDFSWriteOnly() {
-    return isHDFSRegion() && getHDFSWriteOnly();
-  }
-
-  public final void setQueryHDFS(boolean includeHDFS) {
-    queryHDFS.set(includeHDFS);
-  }
-
-  @Override
-  public final boolean includeHDFSResults() {
-    return queryHDFS.get();
-  }
-
   public final boolean isShadowPR() {
     return isShadowPR;
   }
 
-  public final boolean isShadowPRForHDFS() {
-    return isShadowPRForHDFS;
-  }
-  
   public AbstractGatewaySender getParallelGatewaySender() {
     return parallelGatewaySender;
   }
@@ -1664,7 +1607,7 @@ public class PartitionedRegion extends LocalRegion implements
       try {
         final boolean loc = (this.localMaxMemory > 0) && retryNode.equals(getMyId());
         if (loc) {
-          ret = this.dataStore.getEntryLocally(bucketId, key, access, allowTombstones, true);
+          ret = this.dataStore.getEntryLocally(bucketId, key, access, allowTombstones);
         } else {
           ret = getEntryRemotely(retryNode, bucketIdInt, key, access, allowTombstones);
           // TODO:Suranjan&Yogesh : there should be better way than this one
@@ -2123,8 +2066,7 @@ public class PartitionedRegion extends LocalRegion implements
           bucketStorageAssigned=false;
           // if this is a Delta update, then throw exception since the key doesn't
           // exist if there is no bucket for it yet
-          // For HDFS region, we will recover key, so allow bucket creation
-          if (!this.dataPolicy.withHDFS() && event.hasDelta()) {
+          if (event.hasDelta()) {
             throw new EntryNotFoundException(LocalizedStrings.
               PartitionedRegion_CANNOT_APPLY_A_DELTA_WITHOUT_EXISTING_ENTRY
                 .toLocalizedString());
@@ -3319,9 +3261,9 @@ public class PartitionedRegion extends LocalRegion implements
    */
    @Override
   public Object get(Object key, Object aCallbackArgument,
-      boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
-      ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws TimeoutException, CacheLoaderException
+                    boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
+                    ClientProxyMembershipID requestingClient,
+                    EntryEventImpl clientEvent, boolean returnTombstones) throws TimeoutException, CacheLoaderException
   {
     validateKey(key);
     validateCallbackArg(aCallbackArgument);
@@ -3335,7 +3277,7 @@ public class PartitionedRegion extends LocalRegion implements
       // if scope is local and there is no loader, then
       // don't go further to try and get value
       Object value = getDataView().findObject(getKeyInfo(key, aCallbackArgument), this, true/*isCreate*/, generateCallbacks,
-                                      null /*no local value*/, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+                                      null /*no local value*/, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
       if (value != null && !Token.isInvalid(value)) {
         miss = false;
       }
@@ -3381,7 +3323,7 @@ public class PartitionedRegion extends LocalRegion implements
     if (primary == null) {
       return null;
     }
-    if (isTX() || this.hdfsStoreName != null) {
+    if (isTX()) {
       return getNodeForBucketWrite(bucketId, null);
     }
     InternalDistributedMember result =  getRegionAdvisor().getPreferredNode(bucketId);
@@ -3395,7 +3337,7 @@ public class PartitionedRegion extends LocalRegion implements
    */
   private InternalDistributedMember getNodeForBucketReadOrLoad(int bucketId) {
     InternalDistributedMember targetNode;
-    if (!this.haveCacheLoader && (this.hdfsStoreName == null)) {
+    if (!this.haveCacheLoader) {
       targetNode = getNodeForBucketRead(bucketId);
     }
     else {
@@ -3528,9 +3470,16 @@ public class PartitionedRegion extends LocalRegion implements
   }
 
   @Override
-  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
-      TXStateInterface tx, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
+  protected Object findObjectInSystem(KeyInfo keyInfo,
+                                      boolean isCreate,
+                                      TXStateInterface tx,
+                                      boolean generateCallbacks,
+                                      Object localValue,
+                                      boolean disableCopyOnRead,
+                                      boolean preferCD,
+                                      ClientProxyMembershipID requestingClient,
+                                      EntryEventImpl clientEvent,
+                                      boolean returnTombstones)
       throws CacheLoaderException, TimeoutException
   {
     Object obj = null;
@@ -3566,7 +3515,7 @@ public class PartitionedRegion extends LocalRegion implements
         return null;
       }
       
-      obj = getFromBucket(targetNode, bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowRetry, allowReadFromHDFS);
+      obj = getFromBucket(targetNode, bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowRetry);
     }
     finally {
       this.prStats.endGet(startTime);
@@ -4149,15 +4098,22 @@ public class PartitionedRegion extends LocalRegion implements
 
   /**
    * no docs
-   * @param preferCD 
+   * @param preferCD
    * @param requestingClient the client requesting the object, or null if not from a client
    * @param clientEvent TODO
    * @param returnTombstones TODO
    * @param allowRetry if false then do not retry
    */
   private Object getFromBucket(final InternalDistributedMember targetNode,
-      int bucketId, final Object key, final Object aCallbackArgument,
-      boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowRetry, boolean allowReadFromHDFS) {
+                               int bucketId,
+                               final Object key,
+                               final Object aCallbackArgument,
+                               boolean disableCopyOnRead,
+                               boolean preferCD,
+                               ClientProxyMembershipID requestingClient,
+                               EntryEventImpl clientEvent,
+                               boolean returnTombstones,
+                               boolean allowRetry) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     
     final int retryAttempts = calcRetry();
@@ -4187,7 +4143,7 @@ public class PartitionedRegion extends LocalRegion implements
       try {
         if (isLocal) {
           obj = this.dataStore.getLocally(bucketId, key, aCallbackArgument,
-              disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false, allowReadFromHDFS);
+              disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false);
         }
         else {
             if (localCacheEnabled && null != (obj = localCacheGet(key))) { // OFFHEAP: copy into heap cd; TODO optimize for preferCD case
@@ -4196,14 +4152,14 @@ public class PartitionedRegion extends LocalRegion implements
               }
               return obj;
             }
-            else if (this.haveCacheLoader || this.hdfsStoreName != null) {
+            else if (this.haveCacheLoader) {
               // If the region has a cache loader, 
               // the target node is the primary server of the bucket. But, if the 
               // value can be found in a local bucket, we should first try there. 
 
               /* MergeGemXDHDFSToGFE -readoing from local bucket was disabled in GemXD*/
 			  if (null != ( obj = getFromLocalBucket(bucketId, key, aCallbackArgument,
-                  disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS))) {
+                  disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones))) {
                 return obj;
               } 
             }
@@ -4211,7 +4167,7 @@ public class PartitionedRegion extends LocalRegion implements
           //  Test hook
           if (((LocalRegion)this).isTest())
             ((LocalRegion)this).incCountNotFoundInLocal();
-          obj = getRemotely(retryNode, bucketId, key, aCallbackArgument, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+          obj = getRemotely(retryNode, bucketId, key, aCallbackArgument, preferCD, requestingClient, clientEvent, returnTombstones);
  
           // TODO:Suranjan&Yogesh : there should be better way than this one
           String name = Thread.currentThread().getName();
@@ -4309,9 +4265,9 @@ public class PartitionedRegion extends LocalRegion implements
    *   
    */
   public Object getFromLocalBucket(int bucketId, final Object key,
-		final Object aCallbackArgument, boolean disableCopyOnRead,
-		boolean preferCD, ClientProxyMembershipID requestingClient,
-		EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
+                                   final Object aCallbackArgument, boolean disableCopyOnRead,
+                                   boolean preferCD, ClientProxyMembershipID requestingClient,
+                                   EntryEventImpl clientEvent, boolean returnTombstones)
 		throws ForceReattemptException, PRLocallyDestroyedException {
     Object obj;
     // try reading locally. 
@@ -4320,7 +4276,7 @@ public class PartitionedRegion extends LocalRegion implements
       return null; // fixes 51657
     }
     if (readNode.equals(getMyId()) && null != ( obj = this.dataStore.getLocally(bucketId, key, aCallbackArgument,
-      disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, true, allowReadFromHDFS))) {
+      disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, true))) {
 	  if (logger.isTraceEnabled()) {
             logger.trace("getFromBucket: Getting key {} ({}) locally - success", key, key.hashCode());
 	  }
@@ -5116,7 +5072,13 @@ public class PartitionedRegion extends LocalRegion implements
    *                 if the peer is no longer available
    */
   public Object getRemotely(InternalDistributedMember targetNode,
-      int bucketId, final Object key, final Object aCallbackArgument, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws PrimaryBucketException,
+                            int bucketId,
+                            final Object key,
+                            final Object aCallbackArgument,
+                            boolean preferCD,
+                            ClientProxyMembershipID requestingClient,
+                            EntryEventImpl clientEvent,
+                            boolean returnTombstones) throws PrimaryBucketException,
       ForceReattemptException {
     Object value;
     if (logger.isDebugEnabled()) {
@@ -5124,7 +5086,7 @@ public class PartitionedRegion extends LocalRegion implements
           getPRId(), BUCKET_ID_SEPARATOR, bucketId, key);
     }
     GetResponse response = GetMessage.send(targetNode, this, key,
-        aCallbackArgument, requestingClient, returnTombstones, allowReadFromHDFS);
+        aCallbackArgument, requestingClient, returnTombstones);
     this.prStats.incPartitionMessagesSent();
     value = response.waitForResponse(preferCD);
     if (clientEvent != null) {
@@ -7078,9 +7040,6 @@ public class PartitionedRegion extends LocalRegion implements
   public int entryCount(Set<Integer> buckets,
       boolean estimate) {
     Map<Integer, SizeEntry> bucketSizes = null;
- 	if (isHDFSReadWriteRegion() && (includeHDFSResults() || estimate)) {
-      bucketSizes = getSizeForHDFS( buckets, estimate);
-	} else {
     if (buckets != null) {
       if (this.dataStore != null) {
         List<Integer> list = new ArrayList<Integer>();	
@@ -7112,7 +7071,6 @@ public class PartitionedRegion extends LocalRegion implements
         }
       }
     }
- 	}
 
     int size = 0;
     if (bucketSizes != null) {
@@ -7135,81 +7093,7 @@ public class PartitionedRegion extends LocalRegion implements
       return 0;
     }
   }
-  private Map<Integer, SizeEntry> getSizeForHDFS(final Set<Integer> buckets, boolean estimate) {
-    // figure out which buckets to include
-    Map<Integer, SizeEntry> bucketSizes = new HashMap<Integer, SizeEntry>();
-    getRegionAdvisor().accept(new BucketVisitor<Map<Integer, SizeEntry>>() {
-      @Override
-      public boolean visit(RegionAdvisor advisor, ProxyBucketRegion pbr,
-          Map<Integer, SizeEntry> map) {
-        if (buckets == null || buckets.contains(pbr.getBucketId())) {
-          map.put(pbr.getBucketId(), null);
-          // ensure that the bucket has been created
-          pbr.getPartitionedRegion().getOrCreateNodeForBucketWrite(pbr.getBucketId(), null);
-        }
-        return true;
-      }
-    }, bucketSizes);
 
-    RetryTimeKeeper retry = new RetryTimeKeeper(retryTimeout);
-
-    while (true) {
-      // get the size from local buckets
-      if (dataStore != null) {
-        Map<Integer, SizeEntry> localSizes;
-        if (estimate) {
-          localSizes = dataStore.getSizeEstimateForLocalPrimaryBuckets();
-        } else {
-          localSizes = dataStore.getSizeForLocalPrimaryBuckets();
-        }
-        for (Map.Entry<Integer, SizeEntry> me : localSizes.entrySet()) {
-          if (bucketSizes.containsKey(me.getKey())) {
-            bucketSizes.put(me.getKey(), me.getValue());
-          }
-        }
-      }
-      // all done
-      int count = 0;
-      Iterator it = bucketSizes.values().iterator();
-      while (it.hasNext()) {
-        if (it.next() != null) count++;
-      }
-      if (bucketSizes.size() == count) {
-        return bucketSizes;
-      }
-      
-      Set<InternalDistributedMember> remotes = getRegionAdvisor().adviseDataStore(true);
-      remotes.remove(getMyId());
-      
-      // collect remote sizes
-      if (!remotes.isEmpty()) {
-        Map<Integer, SizeEntry> remoteSizes = new HashMap<Integer, PartitionedRegion.SizeEntry>();
-        try {
-          remoteSizes = getSizeRemotely(remotes, estimate);
-        } catch (ReplyException e) {
-          // Remote member will never throw ForceReattemptException or
-          // PrimaryBucketException, so any exception on the remote member
-          // should be re-thrown
-          e.handleAsUnexpected();
-        }
-        for (Map.Entry<Integer, SizeEntry> me : remoteSizes.entrySet()) {
-          Integer k = me.getKey();
-          if (bucketSizes.containsKey(k) && me.getValue().isPrimary()) {
-            bucketSizes.put(k, me.getValue());
-          }
-        }
-      }
-      
-      if (retry.overMaximum()) {
-        checkReadiness();
-        PRHARedundancyProvider.timedOut(this, null, null, "calculate size", retry.getRetryTime());
-      }
-      
-      // throttle subsequent attempts
-      retry.waitForBucketsRecovery();
-    }
-  }
-  
   /**
    * This method gets a PartitionServerSocketConnection to targetNode and sends
    * size request to the node. It returns size of all the buckets "primarily"
@@ -7607,9 +7491,7 @@ public class PartitionedRegion extends LocalRegion implements
       .append("; isClosed=").append(this.isClosed)
       .append("; retryTimeout=").append(this.retryTimeout)
       .append("; serialNumber=").append(getSerialNumber())
-	  .append("; hdfsStoreName=").append(getHDFSStoreName())
-      .append("; hdfsWriteOnly=").append(getHDFSWriteOnly())
-      
+
       .append("; partition attributes=").append(getPartitionAttributes().toString())
       .append("; on VM ").append(getMyId())
       .append("]")
@@ -7752,18 +7634,6 @@ public class PartitionedRegion extends LocalRegion implements
   @Override
   public void destroyRegion(Object aCallbackArgument)
       throws CacheWriterException, TimeoutException {
-    //For HDFS regions, we need a data store
-    //to do the global destroy so that it can delete
-    //the data from HDFS as well.
-    if(!isDataStore() && this.dataPolicy.withHDFS()) {
-      if(destroyOnDataStore(aCallbackArgument)) {
-        //If we were able to find a data store to do the destroy,
-        //stop here.
-        //otherwise go ahead and destroy the region from this member
-        return;
-      }
-    }
-
     checkForColocatedChildren();
     getDataView().checkSupportsRegionDestroy();
     checkForLimitedOrNoAccess();
@@ -7811,7 +7681,6 @@ public class PartitionedRegion extends LocalRegion implements
 
     boolean keepWaiting = true;
 
-    AsyncEventQueueImpl hdfsQueue = getHDFSEventQueue();
     while(true) {
       List<String> pausedSenders = new ArrayList<String>();
       List<ConcurrentParallelGatewaySenderQueue> parallelQueues = new ArrayList<ConcurrentParallelGatewaySenderQueue>();
@@ -7929,11 +7798,6 @@ public class PartitionedRegion extends LocalRegion implements
         }
       }
     }
-    
-    if(hdfsQueue != null) {
-      hdfsQueue.destroy();
-      cache.removeAsyncEventQueue(hdfsQueue);
-    }
   }
         
   @Override
@@ -8114,9 +7978,6 @@ public class PartitionedRegion extends LocalRegion implements
     final boolean isClose = event.getOperation().isClose();
     destroyPartitionedRegionLocally(!isClose);
     destroyCleanUp(event, serials);
-	if(!isClose) {
-      destroyHDFSData();
-    }
     return true;
   }
 
@@ -8409,8 +8270,6 @@ public class PartitionedRegion extends LocalRegion implements
       }
     }
     
-    HDFSRegionDirector.getInstance().clear(getFullPath());
-    
     RegionLogger.logDestroy(getName(), cache.getMyId(), null, op.isClose());
   }
 
@@ -11055,11 +10914,6 @@ public class PartitionedRegion extends LocalRegion implements
         }
       }
       
-      //hoplogs - pause HDFS dispatcher while we 
-      //clear the buckets to avoid missing some files
-      //during the clear
-      pauseHDFSDispatcher();
-
       try {
         // now clear the bucket regions; we go through the primary bucket
         // regions so there is distribution for every bucket but that
@@ -11075,7 +10929,6 @@ public class PartitionedRegion extends LocalRegion implements
           }
         }
       } finally {
-        resumeHDFSDispatcher();
         // release the bucket locks
         for (BucketRegion br : lockedRegions) {
           try {
@@ -11091,247 +10944,6 @@ public class PartitionedRegion extends LocalRegion implements
     }
     
   }
-  
-  /**Destroy all data in HDFS, if this region is using HDFS persistence.*/
-  private void destroyHDFSData() {
-    if(getHDFSStoreName() == null) {
-      return;
-    }
-    
-    try {
-      hdfsManager.destroyData();
-    } catch (IOException e) {
-      logger.warn(LocalizedStrings.HOPLOG_UNABLE_TO_DELETE_HDFS_DATA, e);
-    }
-  }
-
-  private void pauseHDFSDispatcher() {
-    if(!isHDFSRegion()) {
-      return;
-    }
-    AbstractGatewaySenderEventProcessor eventProcessor = getHDFSEventProcessor();
-    if (eventProcessor == null) return;
-    eventProcessor.pauseDispatching();
-    eventProcessor.waitForDispatcherToPause();
-  }
-  
-  /**
-   * Get the statistics for the HDFS event queue associated with this region,
-   * if any
-   */
-  public AsyncEventQueueStats getHDFSEventQueueStats() {
-    AsyncEventQueueImpl asyncQ = getHDFSEventQueue();
-    if(asyncQ == null) {
-      return null;
-    }
-    return asyncQ.getStatistics();
-  }
-  
-  protected AbstractGatewaySenderEventProcessor getHDFSEventProcessor() {
-    final AsyncEventQueueImpl asyncQ = getHDFSEventQueue();
-    final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender();
-    AbstractGatewaySenderEventProcessor eventProcessor = gatewaySender.getEventProcessor();
-    return eventProcessor;
-  }
-
-  public AsyncEventQueueImpl getHDFSEventQueue() {
-    String asyncQId = getHDFSEventQueueName();
-    if(asyncQId == null) {
-      return null;
-    }
-    final AsyncEventQueueImpl asyncQ =  (AsyncEventQueueImpl)this.getCache().getAsyncEventQueue(asyncQId);
-    return asyncQ;
-  }
-  
-  private void resumeHDFSDispatcher() {
-    if(!isHDFSRegion()) {
-      return;
-    }
-    AbstractGatewaySenderEventProcessor eventProcessor = getHDFSEventProcessor();
-    if (eventProcessor == null) return;
-    eventProcessor.resumeDispatching();
-  }
-
-  protected String getHDFSEventQueueName() {
-    if (!this.getDataPolicy().withHDFS()) return null;
-    String colocatedWith = this.getPartitionAttributes().getColocatedWith();
-    String eventQueueName;
-    if (colocatedWith != null) {
-      PartitionedRegion leader = ColocationHelper.getLeaderRegionName(this);
-      eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(leader
-          .getFullPath());
-    }
-    else {
-      eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(getFullPath());
-    }
-    return eventQueueName;
-  }
-
-  /**
-   * schedules compaction on all members where this region is hosted.
-   * 
-   * @param isMajor
-   *          true for major compaction
-   * @param maxWaitTime
-   *          time to wait for the operation to complete, 0 will wait forever
-   */
-  @Override
-  public void forceHDFSCompaction(boolean isMajor, Integer maxWaitTime) {
-    if (!this.isHDFSReadWriteRegion()) {
-      if (this.isHDFSRegion()) {
-        throw new UnsupportedOperationException(
-            LocalizedStrings.HOPLOG_CONFIGURED_AS_WRITEONLY
-                .toLocalizedString(getName()));
-      }
-      throw new UnsupportedOperationException(
-          LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
-              .toLocalizedString(getName()));
-    }
-    // send request to remote data stores
-    long start = System.currentTimeMillis();
-    int waitTime = maxWaitTime * 1000;
-    HDFSForceCompactionArgs args = new HDFSForceCompactionArgs(getRegionAdvisor().getBucketSet(), isMajor, waitTime);
-    HDFSForceCompactionResultCollector rc = new HDFSForceCompactionResultCollector();
-    AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(this).withArgs(args).withCollector(rc);
-    execution.setWaitOnExceptionFlag(true); // wait for all exceptions
-    if (logger.isDebugEnabled()) {
-      logger.debug("HDFS: ForceCompat invoking function with arguments "+args);
-    }
-    execution.execute(HDFSForceCompactionFunction.ID);
-    List<CompactionStatus> result = rc.getResult();
-    Set<Integer> successfulBuckets = rc.getSuccessfulBucketIds();
-    if (rc.shouldRetry()) {
-      int retries = 0;
-      while (retries < HDFSForceCompactionFunction.FORCE_COMPACTION_MAX_RETRIES) {
-        waitTime -= System.currentTimeMillis() - start;
-        if (maxWaitTime > 0 && waitTime < 0) {
-          break;
-        }
-        start = System.currentTimeMillis();
-        retries++;
-        Set<Integer> retryBuckets = new HashSet<Integer>(getRegionAdvisor().getBucketSet());
-        retryBuckets.removeAll(successfulBuckets);
-        
-        for (int bucketId : retryBuckets) {
-          getNodeForBucketWrite(bucketId, new PartitionedRegion.RetryTimeKeeper(waitTime));
-          long now = System.currentTimeMillis();
-          waitTime -= now - start;
-          start = now;
-        }
-        
-        args = new HDFSForceCompactionArgs(retryBuckets, isMajor, waitTime);
-        rc = new HDFSForceCompactionResultCollector();
-        execution = (AbstractExecution) FunctionService.onRegion(this).withArgs(args).withCollector(rc);
-        execution.setWaitOnExceptionFlag(true); // wait for all exceptions
-        if (logger.isDebugEnabled()) {
-          logger.debug("HDFS: ForceCompat re-invoking function with arguments "+args+" filter:"+retryBuckets);
-        }
-        execution.execute(HDFSForceCompactionFunction.ID);
-        result = rc.getResult();
-        successfulBuckets.addAll(rc.getSuccessfulBucketIds());
-      }
-    }
-    if (successfulBuckets.size() != getRegionAdvisor().getBucketSet().size()) {
-      checkReadiness();
-      Set<Integer> uncessfulBuckets = new HashSet<Integer>(getRegionAdvisor().getBucketSet());
-      uncessfulBuckets.removeAll(successfulBuckets);
-      throw new FunctionException("Could not run compaction on following buckets:"+uncessfulBuckets);
-    }
-  }
-
-  /**
-   * Schedules compaction on local buckets
-   * @param buckets the set of buckets to compact
-   * @param isMajor true for major compaction
-   * @param time TODO use this
-   * @return a list of futures for the scheduled compaction tasks
-   */
-  public List<Future<CompactionStatus>> forceLocalHDFSCompaction(Set<Integer> buckets, boolean isMajor, long time) {
-    List<Future<CompactionStatus>> futures = new ArrayList<Future<CompactionStatus>>();
-    if (!isDataStore() || hdfsManager == null || buckets == null || buckets.isEmpty()) {
-      if (logger.isDebugEnabled()) {
-        logger.debug(
-            "HDFS: did not schedule local " + (isMajor ? "Major" : "Minor") + " compaction");
-      }
-      // nothing to do
-      return futures;
-    }
-    if (logger.isDebugEnabled()) {
-      logger.debug(
-          "HDFS: scheduling local " + (isMajor ? "Major" : "Minor") + " compaction for buckets:"+buckets);
-    }
-    Collection<HoplogOrganizer> organizers = hdfsManager.getBucketOrganizers(buckets);
-    
-    for (HoplogOrganizer hoplogOrganizer : organizers) {
-      Future<CompactionStatus> f = hoplogOrganizer.forceCompaction(isMajor);
-      futures.add(f);
-    }
-    return futures;
-  }
-  
-  @Override
-  public void flushHDFSQueue(int maxWaitTime) {
-    if (!this.isHDFSRegion()) {
-      throw new UnsupportedOperationException(
-          LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
-              .toLocalizedString(getName()));
-    }
-    HDFSFlushQueueFunction.flushQueue(this, maxWaitTime);
-  }
-  
-  @Override
-  public long lastMajorHDFSCompaction() {
-    if (!this.isHDFSReadWriteRegion()) {
-      if (this.isHDFSRegion()) {
-        throw new UnsupportedOperationException(
-            LocalizedStrings.HOPLOG_CONFIGURED_AS_WRITEONLY
-                .toLocalizedString(getName()));
-      }
-      throw new UnsupportedOperationException(
-          LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
-              .toLocalizedString(getName()));
-    }
-    List<Long> result = (List<Long>) FunctionService.onRegion(this)
-        .execute(HDFSLastCompactionTimeFunction.ID)
-        .getResult();
-    if (logger.isDebugEnabled()) {
-      logger.debug("HDFS: Result of LastCompactionTimeFunction "+result);
-    }
-    long min = Long.MAX_VALUE;
-    for (long ts : result) {
-      if (ts !=0 && ts < min) {
-        min = ts;
-      }
-    }
-    min = min == Long.MAX_VALUE ? 0 : min;
-    return min;
-  }
-
-  public long lastLocalMajorHDFSCompaction() {
-    if (!isDataStore() || hdfsManager == null) {
-      // nothing to do
-      return 0;
-    }
-    if (logger.isDebugEnabled()) {
-      logger.debug(
-          "HDFS: getting local Major compaction time");
-    }
-    Collection<HoplogOrganizer> organizers = hdfsManager.getBucketOrganizers();
-    long minTS = Long.MAX_VALUE;
-    for (HoplogOrganizer hoplogOrganizer : organizers) {
-      long ts = hoplogOrganizer.getLastMajorCompactionTimestamp();
-      if (ts !=0 && ts < minTS) {
-        minTS = ts;
-      }
-    }
-    minTS = minTS == Long.MAX_VALUE ? 0 : minTS;
-    if (logger.isDebugEnabled()) {
-      logger.debug(
-          "HDFS: local Major compaction time: "+minTS);
-    }
-    return minTS;
-  }
-
 
   public void shadowPRWaitForBucketRecovery() {
     assert this.isShadowPR();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
index 57b1e71..bda68e3 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
@@ -64,7 +64,6 @@ import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.FunctionException;
 import com.gemstone.gemfire.cache.execute.ResultSender;
 import com.gemstone.gemfire.cache.query.QueryInvalidException;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
 import com.gemstone.gemfire.cache.query.internal.IndexUpdater;
 import com.gemstone.gemfire.cache.query.internal.QCompiler;
 import com.gemstone.gemfire.cache.query.internal.index.IndexCreationData;
@@ -2059,13 +2058,13 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
       ForceReattemptException, PRLocallyDestroyedException
   {
 	  return getLocally(bucketId, key,aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, 
-			  clientEvent, returnTombstones, false, false);
+			  clientEvent, returnTombstones, false);
   }
   /**
    * Returns value corresponding to this key.
    * @param key
    *          the key to look for
-   * @param preferCD 
+   * @param preferCD
    * @param requestingClient the client making the request, or null
    * @param clientEvent client's event (for returning version tag)
    * @param returnTombstones whether tombstones should be returned
@@ -2076,21 +2075,28 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    * @throws PrimaryBucketException if the locally managed bucket is not primary
    * @throws PRLocallyDestroyedException if the PartitionRegion is locally destroyed
    */
-  public Object getLocally(int bucketId, final Object key,
-      final Object aCallbackArgument, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, 
-      boolean returnTombstones, boolean opScopeIsLocal, boolean allowReadFromHDFS) throws PrimaryBucketException,
+  public Object getLocally(int bucketId,
+                           final Object key,
+                           final Object aCallbackArgument,
+                           boolean disableCopyOnRead,
+                           boolean preferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent,
+                           boolean returnTombstones,
+                           boolean opScopeIsLocal) throws PrimaryBucketException,
       ForceReattemptException, PRLocallyDestroyedException
   {
     final BucketRegion bucketRegion = getInitializedBucketForId(key, Integer.valueOf(bucketId));
     //  check for primary (when a loader is present) done deeper in the BucketRegion
     Object ret=null;
     if (logger.isDebugEnabled()) {
-      logger.debug("getLocally:  key {}) bucketId={}{}{} region {} returnTombstones {} allowReadFromHDFS {}", key,
-          this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), returnTombstones, allowReadFromHDFS);
+      logger.debug("getLocally:  key {}) bucketId={}{}{} region {} returnTombstones {} ", key,
+          this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), returnTombstones);
     }
     invokeBucketReadHook();
     try {
-      ret = bucketRegion.get(key, aCallbackArgument, true, disableCopyOnRead , preferCD, requestingClient, clientEvent, returnTombstones, opScopeIsLocal, allowReadFromHDFS, false);
+      ret = bucketRegion.get(key, aCallbackArgument, true, disableCopyOnRead , preferCD, requestingClient, clientEvent, returnTombstones, opScopeIsLocal,
+        false);
       checkIfBucketMoved(bucketRegion);
     }
     catch (RegionDestroyedException rde) {
@@ -2122,7 +2128,11 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    * @throws PrimaryBucketException if the locally managed bucket is not primary
    * @see #getLocally(int, Object, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean)
    */
-  public RawValue getSerializedLocally(KeyInfo keyInfo, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws PrimaryBucketException,
+  public RawValue getSerializedLocally(KeyInfo keyInfo,
+                                       boolean doNotLockEntry,
+                                       ClientProxyMembershipID requestingClient,
+                                       EntryEventImpl clientEvent,
+                                       boolean returnTombstones) throws PrimaryBucketException,
       ForceReattemptException {
     final BucketRegion bucketRegion = getInitializedBucketForId(keyInfo.getKey(), keyInfo.getBucketId());
     //  check for primary (when loader is present) done deeper in the BucketRegion
@@ -2133,7 +2143,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
     invokeBucketReadHook();
 
     try {
-      RawValue result = bucketRegion.getSerialized(keyInfo, true, doNotLockEntry, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+      RawValue result = bucketRegion.getSerialized(keyInfo, true, doNotLockEntry, requestingClient, clientEvent, returnTombstones);
       checkIfBucketMoved(bucketRegion);
       return result;
     } catch (RegionDestroyedException rde) {
@@ -2157,7 +2167,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    * @param access
    *          true if caller wants last accessed time updated
    * @param allowTombstones whether a tombstoned entry can be returned
-   * 
+   *
    * @throws ForceReattemptException
    *           if bucket region is not present in this process
    * @return a RegionEntry for the given key, which will be null if the key is
@@ -2168,7 +2178,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    *           if the PartitionRegion is locally destroyed
    */
   public EntrySnapshot getEntryLocally(int bucketId, final Object key,
-      boolean access, boolean allowTombstones, boolean allowReadFromHDFS)
+                                       boolean access, boolean allowTombstones)
       throws EntryNotFoundException, PrimaryBucketException,
       ForceReattemptException, PRLocallyDestroyedException
   {
@@ -2181,12 +2191,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
     EntrySnapshot res = null;
     RegionEntry ent = null;
     try {
-      if (allowReadFromHDFS) {
-        ent = bucketRegion.entries.getEntry(key);
-      }
-      else {
-        ent = bucketRegion.entries.getOperationalEntryInVM(key);
-      }
+      ent = bucketRegion.entries.getEntry(key);
 
       if (ent == null) {
         this.getPartitionedRegion().checkReadiness();
@@ -2296,14 +2301,8 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
     try{
       if (r != null) {
         Set keys = r.keySet(allowTombstones);
-        if (getPartitionedRegion().isHDFSReadWriteRegion()) {
-          // hdfs regions can't copy all keys into memory
-          ret = keys;
-
-        } else  { 
         // A copy is made so that the bucket is free to move
         ret = new HashSet(r.keySet(allowTombstones));
-		}
         checkIfBucketMoved(r);
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
index f083268..de1f7d8 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
@@ -65,12 +65,19 @@ public class PartitionedRegionDataView extends LocalRegionDataView {
   }
 
   @Override
-  public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean disableCopyOnRead,
-      boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
+  public Object findObject(KeyInfo key,
+                           LocalRegion r,
+                           boolean isCreate,
+                           boolean generateCallbacks,
+                           Object value,
+                           boolean disableCopyOnRead,
+                           boolean preferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent,
+                           boolean returnTombstones) {
     TXStateProxy tx = r.cache.getTXMgr().internalSuspend();
     try {
-      return r.findObjectInSystem(key, isCreate, tx, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+      return r.findObjectInSystem(key, isCreate, tx, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
     } finally {
       r.cache.getTXMgr().resume(tx);
     }
@@ -82,10 +89,14 @@ public class PartitionedRegionDataView extends LocalRegionDataView {
     return pr.nonTXContainsKey(keyInfo);
   }
   @Override
-  public Object getSerializedValue(LocalRegion localRegion, KeyInfo keyInfo, boolean doNotLockEntry, ClientProxyMembershipID requestingClient,
-  EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion,
+                                   KeyInfo keyInfo,
+                                   boolean doNotLockEntry,
+                                   ClientProxyMembershipID requestingClient,
+                                   EntryEventImpl clientEvent,
+                                   boolean returnTombstones) throws DataLocationException {
     PartitionedRegion pr = (PartitionedRegion)localRegion;
-    return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+    return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, requestingClient, clientEvent, returnTombstones);
   }
   @Override
   public boolean putEntryOnRemote(EntryEventImpl event, boolean ifNew,
@@ -118,7 +129,7 @@ public class PartitionedRegionDataView extends LocalRegionDataView {
       boolean allowTombstones) throws DataLocationException {
     PartitionedRegion pr = (PartitionedRegion)localRegion;
     return pr.getDataStore().getEntryLocally(keyInfo.getBucketId(),
-        keyInfo.getKey(), false, allowTombstones, true);
+        keyInfo.getKey(), false, allowTombstones);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
index f0a6543..74c134b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
@@ -626,27 +626,6 @@ final class ProxyRegionMap implements RegionMap {
     }
 
     @Override
-    public boolean isMarkedForEviction() {
-      throw new UnsupportedOperationException(LocalizedStrings
-          .ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0
-              .toLocalizedString(DataPolicy.EMPTY));
-    }
-
-    @Override
-    public void setMarkedForEviction() {
-      throw new UnsupportedOperationException(LocalizedStrings
-          .ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0
-              .toLocalizedString(DataPolicy.EMPTY));
-    }
-
-    @Override
-    public void clearMarkedForEviction() {
-      throw new UnsupportedOperationException(LocalizedStrings
-          .ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0
-              .toLocalizedString(DataPolicy.EMPTY));
-    }
-
-    @Override
     public boolean isValueNull() {
       throw new UnsupportedOperationException(LocalizedStrings.ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0.toLocalizedString(DataPolicy.EMPTY));
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
index 5838ead..bedbf81 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
@@ -35,7 +35,6 @@ import com.gemstone.gemfire.internal.offheap.StoredObject;
 import com.gemstone.gemfire.internal.offheap.annotations.Released;
 import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
-import com.gemstone.gemfire.cache.EvictionCriteria;
 
 /**
  * Internal interface for a region entry.
@@ -415,25 +414,6 @@ public interface RegionEntry {
   public void setUpdateInProgress(final boolean underUpdate);
 
   /**
-   * Returns true if this entry has been marked for eviction for custom eviction
-   * via {@link EvictionCriteria}.
-   */
-  public boolean isMarkedForEviction();
-
-  /**
-   * Marks this entry for eviction by custom eviction via
-   * {@link EvictionCriteria}.
-   */
-  public void setMarkedForEviction();
-
-  /**
-   * Clears this entry as for eviction by custom eviction via
-   * {@link EvictionCriteria} or when an update is done after it was marked for
-   * eviction.
-   */
-  public void clearMarkedForEviction();
-
-  /**
    * Event containing this RegionEntry is being passed through
    * dispatchListenerEvent for CacheListeners under RegionEntry lock. This is
    * used during deserialization for a VMCacheSerializable value contained by

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
index 2a7f0c4..7a97408 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
@@ -39,12 +39,6 @@ class RegionMapFactory {
     //.getDataPolicy().withPartitioning());
     if (owner.isProxy() /*|| owner instanceof PartitionedRegion*/) { // TODO enabling this causes eviction tests to fail
       return new ProxyRegionMap(owner, attrs, internalRegionArgs);
-    } else if (internalRegionArgs.isReadWriteHDFSRegion()) {
-      if (owner.getEvictionController() == null) {
-        return new HDFSRegionMapImpl(owner, attrs, internalRegionArgs);
-      }
-      return new HDFSLRURegionMap(owner, attrs, internalRegionArgs);
-    //else if (owner.getEvictionController() != null && isNotPartitionedRegion) {
     } else if (owner.getEvictionController() != null ) {
       return new VMLRURegionMap(owner, attrs,internalRegionArgs);
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
index c754339..b565a2c 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
@@ -122,7 +122,7 @@ public final class RemoteGetMessage extends RemoteOperationMessageWithDirectRepl
           ((KeyWithRegionContext)this.key).setRegionContext(r);
         }
         KeyInfo keyInfo = r.getKeyInfo(key, cbArg);
-        val = r.getDataView().getSerializedValue(r, keyInfo, false, this.context, null, false, false/*for replicate regions*/);
+        val = r.getDataView().getSerializedValue(r, keyInfo, false, this.context, null, false /*for replicate regions*/);
         valueBytes = val instanceof RawValue ? (RawValue)val : new RawValue(val);
 
         if (logger.isTraceEnabled(LogMarker.DM)) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
index 983f928..2906ff6 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
@@ -113,7 +113,8 @@ public class TXEntry implements Region.Entry
   {
     checkTX();
 //    Object value = this.localRegion.getDeserialized(this.key, false, this.myTX, this.rememberReads);
-    @Unretained Object value = this.myTX.getDeserializedValue(keyInfo, this.localRegion, false, false, false, null, false, false, false);
+    @Unretained Object value = this.myTX.getDeserializedValue(keyInfo, this.localRegion, false, false, false, null, false,
+      false);
     if (value == null) {
       throw new EntryDestroyedException(this.keyInfo.getKey().toString());
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
index a67d3cc..617873c 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
@@ -1407,7 +1407,14 @@ public class TXState implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult) {
+  public Object getDeserializedValue(KeyInfo keyInfo,
+                                     LocalRegion localRegion,
+                                     boolean updateStats,
+                                     boolean disableCopyOnRead,
+                                     boolean preferCD,
+                                     EntryEventImpl clientEvent,
+                                     boolean returnTombstones,
+                                     boolean retainResult) {
     TXEntryState tx = txReadEntry(keyInfo, localRegion, true, true/*create txEntry is absent*/);
     if (tx != null) {
       Object v = tx.getValue(keyInfo, localRegion, preferCD);
@@ -1416,7 +1423,8 @@ public class TXState implements TXStateInterface {
       }
       return v;
     } else {
-      return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones, allowReadFromHDFS, retainResult);
+      return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones,
+        retainResult);
     }
   }
 
@@ -1425,15 +1433,19 @@ public class TXState implements TXStateInterface {
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object)
    */
   @Retained
-  public Object getSerializedValue(LocalRegion localRegion, KeyInfo keyInfo, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, 
-      boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion,
+                                   KeyInfo keyInfo,
+                                   boolean doNotLockEntry,
+                                   ClientProxyMembershipID requestingClient,
+                                   EntryEventImpl clientEvent,
+                                   boolean returnTombstones) throws DataLocationException {
     final Object key = keyInfo.getKey();
     TXEntryState tx = txReadEntry(keyInfo, localRegion, true,true/*create txEntry is absent*/);
     if (tx != null) {
       Object val = tx.getPendingValue();
       if(val==null || Token.isInvalidOrRemoved(val)) {
         val = findObject(keyInfo,localRegion, val!=Token.INVALID,
-            true, val, false, false, requestingClient, clientEvent, false, allowReadFromHDFS);
+            true, val, false, false, requestingClient, clientEvent, false);
       }
       return val;
     } else {
@@ -1441,7 +1453,7 @@ public class TXState implements TXStateInterface {
       // so we should never come here
       assert localRegion instanceof PartitionedRegion;
       PartitionedRegion pr = (PartitionedRegion)localRegion;
-      return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, null, null, returnTombstones, allowReadFromHDFS);
+      return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, null, null, returnTombstones);
     }
   }
 
@@ -1519,9 +1531,17 @@ public class TXState implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
-  public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
-    return r.findObjectInSystem(key, isCreate, this, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+  public Object findObject(KeyInfo key,
+                           LocalRegion r,
+                           boolean isCreate,
+                           boolean generateCallbacks,
+                           Object value,
+                           boolean disableCopyOnRead,
+                           boolean preferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent,
+                           boolean returnTombstones) {
+    return r.findObjectInSystem(key, isCreate, this, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
   }
 
   private boolean readEntryAndCheckIfDestroyed(KeyInfo keyInfo, LocalRegion localRegion,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
index 5da20d8..3fa9351 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
@@ -123,8 +123,14 @@ public interface TXStateInterface extends Synchronization, InternalDataView {
    * @param localRegion
    * @param updateStats TODO
    */
-  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
-      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadsFromHDFS, boolean retainResult);
+  public Object getDeserializedValue(KeyInfo keyInfo,
+                                     LocalRegion localRegion,
+                                     boolean updateStats,
+                                     boolean disableCopyOnRead,
+                                     boolean preferCD,
+                                     EntryEventImpl clientEvent,
+                                     boolean returnTombstones,
+                                     boolean retainResult);
 
   public TXEvent getEvent();
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
index e66302e..0939ab0 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
@@ -341,9 +341,16 @@ public class TXStateProxyImpl implements TXStateProxy {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
-      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult) {
-    Object val = getRealDeal(keyInfo, localRegion).getDeserializedValue(keyInfo, localRegion, updateStats, disableCopyOnRead, preferCD, null, false, allowReadFromHDFS, retainResult);
+  public Object getDeserializedValue(KeyInfo keyInfo,
+                                     LocalRegion localRegion,
+                                     boolean updateStats,
+                                     boolean disableCopyOnRead,
+                                     boolean preferCD,
+                                     EntryEventImpl clientEvent,
+                                     boolean returnTombstones,
+                                     boolean retainResult) {
+    Object val = getRealDeal(keyInfo, localRegion).getDeserializedValue(keyInfo, localRegion, updateStats, disableCopyOnRead, preferCD, null, false,
+      retainResult);
     if (val != null) {
       // fixes bug 51057: TXStateStub  on client always returns null, so do not increment
       // the operation count it will be incremented in findObject()
@@ -599,13 +606,13 @@ public class TXStateProxyImpl implements TXStateProxy {
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
   public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean disableCopyOnRead,
-      boolean preferCD, ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
+                           boolean generateCallbacks, Object value, boolean disableCopyOnRead,
+                           boolean preferCD, ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent, boolean returnTombstones) {
     try {
       this.operationCount++;
       Object retVal = getRealDeal(key, r).findObject(key, r, isCreate, generateCallbacks,
-          value, disableCopyOnRead, preferCD, requestingClient, clientEvent, false, allowReadFromHDFS);
+          value, disableCopyOnRead, preferCD, requestingClient, clientEvent, false);
       trackBucketForTx(key);
       return retVal;
     } catch (TransactionDataRebalancedException | PrimaryBucketException re) {
@@ -720,9 +727,14 @@ public class TXStateProxyImpl implements TXStateProxy {
    * (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object)
    */
-  public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion,
+                                   KeyInfo key,
+                                   boolean doNotLockEntry,
+                                   ClientProxyMembershipID requestingClient,
+                                   EntryEventImpl clientEvent,
+                                   boolean returnTombstones) throws DataLocationException {
     this.operationCount++;
-    return getRealDeal(key, localRegion).getSerializedValue(localRegion, key, doNotLockEntry, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
+    return getRealDeal(key, localRegion).getSerializedValue(localRegion, key, doNotLockEntry, requestingClient, clientEvent, returnTombstones);
   }
 
   /* (non-Javadoc)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
index ac35425..0b226e0 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
@@ -184,8 +184,14 @@ public abstract class TXStateStub implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
-      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS,  boolean retainResult) {
+  public Object getDeserializedValue(KeyInfo keyInfo,
+                                     LocalRegion localRegion,
+                                     boolean updateStats,
+                                     boolean disableCopyOnRead,
+                                     boolean preferCD,
+                                     EntryEventImpl clientEvent,
+                                     boolean returnTombstones,
+                                     boolean retainResult) {
     // We never have a local value if we are a stub...
     return null;
   }
@@ -373,10 +379,17 @@ public abstract class TXStateStub implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
-  public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
-    return getTXRegionStub(r).findObject(keyInfo,isCreate,generateCallbacks,value, preferCD, requestingClient, clientEvent, allowReadFromHDFS);
+  public Object findObject(KeyInfo keyInfo,
+                           LocalRegion r,
+                           boolean isCreate,
+                           boolean generateCallbacks,
+                           Object value,
+                           boolean disableCopyOnRead,
+                           boolean preferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent,
+                           boolean returnTombstones) {
+    return getTXRegionStub(r).findObject(keyInfo,isCreate,generateCallbacks,value, preferCD, requestingClient, clientEvent);
   }
 
   /* (non-Javadoc)
@@ -432,7 +445,12 @@ public abstract class TXStateStub implements TXStateInterface {
    * (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object)
    */
-  public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
+  public Object getSerializedValue(LocalRegion localRegion,
+                                   KeyInfo key,
+                                   boolean doNotLockEntry,
+                                   ClientProxyMembershipID requestingClient,
+                                   EntryEventImpl clientEvent,
+                                   boolean returnTombstones) {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
index a17650c..269f891 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
@@ -114,10 +114,6 @@ public abstract class UserSpecifiedRegionAttributes<K,V> implements RegionAttrib
    */
   private boolean hasCloningEnabled = false;
   
-  private boolean hasHDFSStoreName = false;
-  
-  private boolean hasHDFSWriteOnly = false;
-  
 /**
    * Whether this region has entry value compression.
    * 
@@ -526,7 +522,7 @@ public abstract class UserSpecifiedRegionAttributes<K,V> implements RegionAttrib
   {
     this.hasDiskSynchronous = val;
   }
-  private static final int HAS_COUNT = 43;
+  private static final int HAS_COUNT = 41;
   
   public void initHasFields(UserSpecifiedRegionAttributes<K,V> other)
   {
@@ -602,22 +598,4 @@ public abstract class UserSpecifiedRegionAttributes<K,V> implements RegionAttrib
   public List getIndexes() {
     return this.indexes;
   }
-
-  public boolean hasHDFSStoreName()
-  {
-    return this.hasHDFSStoreName;
-  }
-  public void setHasHDFSStoreName(boolean val)
-  {
-    this.hasHDFSStoreName = val;
-  }
-  
-  public void setHasHDFSWriteOnly(boolean val)
-  {
-    this.hasHDFSWriteOnly = val;
-  }
-  public boolean hasHDFSWriteOnly()
-  {
-    return this.hasHDFSWriteOnly;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
index f587e39..54133cc 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
@@ -408,19 +408,6 @@ public class ValidatingDiskRegion extends DiskRegion implements DiskRecoveryStor
       // TODO Auto-generated method stub
     }
     @Override
-    public boolean isMarkedForEviction() {
-      // TODO Auto-generated method stub
-      return false;
-    }
-    @Override
-    public void setMarkedForEviction() {
-      // TODO Auto-generated method stub
-    }
-    @Override
-    public void clearMarkedForEviction() {
-      // TODO Auto-generated method stub
-    }
-    @Override
     public boolean isInvalid() {
       // TODO Auto-generated method stub
       return false;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
index d3078a9..ea47e91 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
@@ -299,7 +299,7 @@ public final class FetchBulkEntriesMessage extends PartitionMessage
             Object key = it.next();
             VersionTagHolder clientEvent = new VersionTagHolder();
             Object value = map.get(key, null, true, true, true, null,
-                clientEvent, allowTombstones, false);
+                clientEvent, allowTombstones);
 
             if (needToWriteBucketInfo) {
               DataSerializer.writePrimitiveInt(map.getId(), mos);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
index d7e50f1..3fef790 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
@@ -93,11 +93,9 @@ public final class GetMessage extends PartitionMessageWithDirectReply
   
   private boolean returnTombstones;
 
-  private boolean allowReadFromHDFS;
   // reuse some flags
   protected static final int HAS_LOADER = NOTIFICATION_ONLY;
   protected static final int CAN_START_TX = IF_NEW;
-  protected static final int READ_FROM_HDFS = IF_OLD;
 
   /**
    * Empty constructor to satisfy {@link DataSerializer} requirements
@@ -106,15 +104,14 @@ public final class GetMessage extends PartitionMessageWithDirectReply
   }
   
   private GetMessage(InternalDistributedMember recipient, int regionId,
-      DirectReplyProcessor processor,
-      final Object key, final Object aCallbackArgument, ClientProxyMembershipID context,
-      boolean returnTombstones, boolean allowReadFromHDFS) {
+                     DirectReplyProcessor processor,
+                     final Object key, final Object aCallbackArgument, ClientProxyMembershipID context,
+                     boolean returnTombstones) {
     super(recipient, regionId, processor);
     this.key = key;
     this.cbArg = aCallbackArgument;
     this.context = context;
     this.returnTombstones = returnTombstones;
-	this.allowReadFromHDFS = allowReadFromHDFS;
   }
 
   private static final boolean ORDER_PR_GETS = Boolean.getBoolean("gemfire.order-pr-gets");
@@ -191,7 +188,7 @@ public final class GetMessage extends PartitionMessageWithDirectReply
         KeyInfo keyInfo = r.getKeyInfo(key, cbArg);
         boolean lockEntry = forceUseOfPRExecutor || isDirectAck();
         
-        val = r.getDataView().getSerializedValue(r, keyInfo, !lockEntry, this.context, event, returnTombstones, allowReadFromHDFS);
+        val = r.getDataView().getSerializedValue(r, keyInfo, !lockEntry, this.context, event, returnTombstones);
         
         if(val == BucketRegion.REQUIRES_ENTRY_LOCK) {
           Assert.assertTrue(!lockEntry);
@@ -272,14 +269,12 @@ public final class GetMessage extends PartitionMessageWithDirectReply
   @Override
   protected short computeCompressedShort(short s) {
     s = super.computeCompressedShort(s);
-    if (this.allowReadFromHDFS) s |= READ_FROM_HDFS;
     return s;
   }
 
   @Override
   protected void setBooleans(short s, DataInput in) throws ClassNotFoundException, IOException {
     super.setBooleans(s, in);
-    if ((s & READ_FROM_HDFS) != 0) this.allowReadFromHDFS = true;
   }
 
   public void setKey(Object key)
@@ -303,15 +298,18 @@ public final class GetMessage extends PartitionMessageWithDirectReply
    * @throws ForceReattemptException if the peer is no longer available
    */
   public static GetResponse send(InternalDistributedMember recipient,
-      PartitionedRegion r, final Object key, final Object aCallbackArgument,
-      ClientProxyMembershipID requestingClient, boolean returnTombstones, boolean allowReadFromHDFS)
+                                 PartitionedRegion r,
+                                 final Object key,
+                                 final Object aCallbackArgument,
+                                 ClientProxyMembershipID requestingClient,
+                                 boolean returnTombstones)
       throws ForceReattemptException
   {
     Assert.assertTrue(recipient != null,
         "PRDistribuedGetReplyMessage NULL reply message");
     GetResponse p = new GetResponse(r.getSystem(), Collections.singleton(recipient), key);
     GetMessage m = new GetMessage(recipient, r.getPRId(), p,
-        key, aCallbackArgument, requestingClient, returnTombstones, allowReadFromHDFS);
+        key, aCallbackArgument, requestingClient, returnTombstones);
     Set failures = r.getDistributionManager().putOutgoing(m);
     if (failures != null && failures.size() > 0) {
       throw new ForceReattemptException(LocalizedStrings.GetMessage_FAILED_SENDING_0.toLocalizedString(m));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
index a88f96f..8aaf587 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
@@ -101,9 +101,8 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
 
   protected static final short HAS_BRIDGE_CONTEXT = UNRESERVED_FLAGS_START;
   protected static final short SKIP_CALLBACKS = (HAS_BRIDGE_CONTEXT << 1);
-  protected static final short FETCH_FROM_HDFS = (SKIP_CALLBACKS << 1);
   //using the left most bit for IS_PUT_DML, the last available bit
-  protected static final short IS_PUT_DML = (short) (FETCH_FROM_HDFS << 1);
+  protected static final short IS_PUT_DML = (short) (SKIP_CALLBACKS << 1);
 
   private transient InternalDistributedSystem internalDs;
 
@@ -118,9 +117,6 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
   
   transient VersionedObjectList versions = null;
 
-  /** whether this operation should fetch oldValue from HDFS */
-  private boolean fetchFromHDFS;
-  
   private boolean isPutDML;
   /**
    * Empty constructor to satisfy {@link DataSerializer}requirements
@@ -129,7 +125,7 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
   }
 
   public PutAllPRMessage(int bucketId, int size, boolean notificationOnly,
-      boolean posDup, boolean skipCallbacks, Object callbackArg, boolean fetchFromHDFS, boolean isPutDML) {
+      boolean posDup, boolean skipCallbacks, Object callbackArg, boolean isPutDML) {
     this.bucketId = Integer.valueOf(bucketId);
     putAllPRData = new PutAllEntryData[size];
     this.notificationOnly = notificationOnly;
@@ -137,8 +133,7 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
     this.skipCallbacks = skipCallbacks;
     this.callbackArg = callbackArg;
     initTxMemberId();
-    this.fetchFromHDFS = fetchFromHDFS;
-    this.isPutDML = isPutDML; 
+    this.isPutDML = isPutDML;
   }
 
   public void addEntry(PutAllEntryData entry) {
@@ -307,7 +302,6 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
     s = super.computeCompressedShort(s);
     if (this.bridgeContext != null) s |= HAS_BRIDGE_CONTEXT;
     if (this.skipCallbacks) s |= SKIP_CALLBACKS;
-    if (this.fetchFromHDFS) s |= FETCH_FROM_HDFS;
     if (this.isPutDML) s |= IS_PUT_DML;
     return s;
   }
@@ -317,7 +311,6 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
       ClassNotFoundException {
     super.setBooleans(s, in);
     this.skipCallbacks = ((s & SKIP_CALLBACKS) != 0);
-    this.fetchFromHDFS = ((s & FETCH_FROM_HDFS) != 0);
     this.isPutDML = ((s & IS_PUT_DML) != 0);
   }
 
@@ -495,9 +488,6 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
 
             ev.setPutAllOperation(dpao);
 
-            // set the fetchFromHDFS flag
-            ev.setFetchFromHDFS(this.fetchFromHDFS);
-            
             // make sure a local update inserts a cache de-serializable
             ev.makeSerializedNewValue();
             

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
index d5abaa1..a6a39dc 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
@@ -182,9 +182,6 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
 
   private VersionTag versionTag;
 
-  /** whether this operation should fetch oldValue from HDFS*/
-  private transient boolean fetchFromHDFS;
-
   private transient boolean isPutDML;
   
   // additional bitmask flags used for serialization/deserialization
@@ -208,7 +205,6 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
   // masks there are taken
   // also switching the masks will impact backwards compatibility. Need to
   // verify if it is ok to break backwards compatibility
-  protected static final int FETCH_FROM_HDFS = getNextByteMask(HAS_CALLBACKARG);  
 
   /*
   private byte[] oldValBytes;
@@ -608,9 +604,6 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
       this.originalSender = (InternalDistributedMember)DataSerializer
         .readObject(in);
     }
-    if ((extraFlags & FETCH_FROM_HDFS) != 0) {
-      this.fetchFromHDFS = true;
-    }
     this.eventId = new EventID();
     InternalDataSerializer.invokeFromData(this.eventId, in);
     
@@ -697,7 +690,6 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
       extraFlags |= HAS_DELTA_WITH_FULL_VALUE;
     }
     if (this.originalSender != null) extraFlags |= HAS_ORIGINAL_SENDER;
-    if (this.event.isFetchFromHDFS()) extraFlags |= FETCH_FROM_HDFS;
     out.writeByte(extraFlags);
 
     DataSerializer.writeObject(getKey(), out);
@@ -822,7 +814,6 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
     ev.setCausedByMessage(this);
     ev.setInvokePRCallbacks(!notificationOnly);
     ev.setPossibleDuplicate(this.posDup);
-	ev.setFetchFromHDFS(this.fetchFromHDFS);
     ev.setPutDML(this.isPutDML);
     /*if (this.hasOldValue) {
       if (this.oldValueIsSerialized) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java
deleted file mode 100644
index 5c199ae..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedReader.SerializedComparator;
-
-/**
- * Compares objects byte-by-byte.  This is fast and sufficient for cases when
- * lexicographic ordering is not important or the serialization is order-
- * preserving. 
- * 
- */
-public class ByteComparator implements SerializedComparator {
-  @Override
-  public int compare(byte[] rhs, byte[] lhs) {
-    return compare(rhs, 0, rhs.length, lhs, 0, lhs.length);
-  }
-
-  @Override
-  public int compare(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
-    return compareBytes(r, rOff, rLen, l, lOff, lLen);
-  }
-  
-  /**
-   * Compares two byte arrays element-by-element.
-   * 
-   * @param r the right array
-   * @param rOff the offset of r
-   * @param rLen the length of r to compare
-   * @param l the left array
-   * @param lOff the offset of l
-   * @param lLen the length of l to compare
-   * @return -1 if r < l; 0 if r == l; 1 if r > 1
-   */
-  
-  public static int compareBytes(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
-    return Bytes.compareTo(r, rOff, rLen, l, lOff, lLen);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java
deleted file mode 100644
index dacc208..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import java.util.Iterator;
-
-/**
- * Provides an {@link Iterator} that allows access to the current iteration
- * element.  The implementor must provide access to the current element
- * as well as a means to move to the next element.
- * 
- *
- * @param <E> the element type
- */
-public interface CursorIterator<E> extends Iterator<E> {
-  /**
-   * Returns the element at the current position.
-   * @return the current element
-   */
-  E current();
-  
-  /**
-   * Provides an iteration cursor by wrapping an {@link Iterator}.
-   *
-   * @param <E> the element type
-   */
-  public static class WrappedIterator<E> implements CursorIterator<E> {
-    /** the underlying iterator */
-    private final Iterator<E> src;
-    
-    /** the current iteration element */
-    private E current;
-    
-    public WrappedIterator(Iterator<E> src) {
-      this.src = src;
-    }
-
-    @Override
-    public boolean hasNext() {
-      return src.hasNext();
-    }
-
-    @Override
-    public E next() {
-      current = src.next();
-      return current;
-    }
-
-    @Override
-    public E current() {
-      return current;
-    }
-    
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Returns the unwrapped interator.
-     * @return the iterator
-     */
-    public Iterator<E> unwrap() {
-      return src;
-    }
-  }
-}



[21/63] [abbrv] incubator-geode git commit: GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
index 680aa0b..84ef866 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
@@ -16,11 +16,12 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static org.junit.Assert.*;
+
 import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -66,29 +67,24 @@ import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.RegionNotFoundException;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.cache.query.data.PortfolioData;
-import com.gemstone.gemfire.cache.query.data.Position;
 import com.gemstone.gemfire.cache.query.functional.StructSetOrResultsSet;
 import com.gemstone.gemfire.cache.query.internal.index.PartitionedIndex;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryPerfDUnitTest.ResultsObject;
 import com.gemstone.gemfire.cache.query.types.ObjectType;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
-import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.ReplyException;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase;
 import com.gemstone.gemfire.util.test.TestUtil;
 
-import parReg.query.unittest.NewPortfolio;
 import util.TestException;
 
 /**
@@ -96,30 +92,26 @@ import util.TestException;
  * 
  */
 
-public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
+public class PRQueryDUnitHelper implements Serializable
 {
   /**
    * constructor *
    * 
    * @param name
    */
+  static Cache cache = null;
+  public static void setCache(Cache cache) {
+    PRQueryDUnitHelper.cache = cache;
+  }
 
-  public PRQueryDUnitHelper(String name) {
+  public PRQueryDUnitHelper() {
 
-    super(name);
   }
 
-  final Class valueConstraint = PortfolioData.class;
-
-  /**
-   * This function creates a appropriate region (Local or PR ) given the scope &
-   * the isPR parameters *
-   */
-  public CacheSerializableRunnable getCacheSerializableRunnableForLocalRegionCreation(
-      final String regionName) {
-    return getCacheSerializableRunnableForLocalRegionCreation(regionName, this.valueConstraint);
+  public static Cache getCache() {
+    return cache;
   }
-    
+
   public CacheSerializableRunnable getCacheSerializableRunnableForLocalRegionCreation(
       final String regionName, final Class constraint) {
     SerializableRunnable createPrRegion;
@@ -192,20 +184,9 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
     return (CacheSerializableRunnable)createPrRegion;
   }
-  /**
-   * This function creates a Replicated Region using {@link RegionShortcut#REPLICATE}.
-   * 
-   * @param regionName
-   * 
-   * @return cacheSerializable object
-   */
-  public CacheSerializableRunnable getCacheSerializableRunnableForReplicatedRegionCreation(
-      final String regionName) {
-    return getCacheSerializableRunnableForLocalRegionCreation(regionName, this.valueConstraint);
-  }
-    
+
   public CacheSerializableRunnable getCacheSerializableRunnableForReplicatedRegionCreation(
-      final String regionName, final Class constraint) {
+    final String regionName) {
     SerializableRunnable createPrRegion;
     createPrRegion = new CacheSerializableRunnable(regionName) {
       @Override
@@ -246,11 +227,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
    * @return cacheSerializable object
    */
   public CacheSerializableRunnable getCacheSerializableRunnableForPRCreate(
-         final String regionName, final int redundancy) {
-    return getCacheSerializableRunnableForPRCreate(regionName, redundancy, this.valueConstraint);
-  }
-
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRCreate(
     final String regionName, final int redundancy, final Class constraint) {
       
     SerializableRunnable createPrRegion;
@@ -259,8 +235,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
       public void run2() throws CacheException
       {
 
-        //closeCache();
-        disconnectFromDS();
         Cache cache = getCache();
         Region partitionedregion = null;
         AttributesFactory attr = new AttributesFactory();
@@ -297,8 +271,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         public void run2() throws CacheException
         {
 
-          //closeCache();
-          disconnectFromDS();
           Cache cache = getCache();
           Region partitionedregion = null;
           try {
@@ -339,12 +311,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         public void run2() throws CacheException
         {
 
-          //closeCache();
-          disconnectFromDS();
           Cache cache = getCache();
           Region partitionedregion = null;
           try {
-            cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("diskstore");
+            cache.createDiskStoreFactory().setDiskDirs(JUnit4CacheTestCase.getDiskDirs()).create("diskstore");
             AttributesFactory attr = new AttributesFactory();
             attr.setValueConstraint(constraint);
             attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
@@ -397,8 +367,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         public void run2() throws CacheException
         {
   
-          //closeCache();
-          //disconnectFromDS();
           Cache cache = getCache();
           Region partitionedregion = null;
           try {
@@ -456,10 +424,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         Region region = cache.getRegion(regionName);
         for (int j = from; j < to; j++)
           region.put(new Integer(j), portfolio[j]);
-//        getLogWriter()
-//            .info(
-//                "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
-//                    + regionName);
       }
     };
     return (CacheSerializableRunnable)prPuts;
@@ -492,38 +456,22 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               case 0:
                 // Put operation
                 region.put(new Integer(j), new Portfolio(j));
-//                getLogWriter()
-//                    .info(
-//                        "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: INSERTED Portfolio data for key "
-//                            + j);
                 break;
               case 1:
                 // invalidate
                 if (region.containsKey(new Integer(j))) {
                   region.invalidate(new Integer(j));
-//                  getLogWriter()
-//                      .info(
-//                          "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: INVALIDATED data for key "
-//                              + j);
                 }
                 break;
               case 2:
                 if (region.containsKey(new Integer(j))) {
                   region.destroy(new Integer(j));
-//                  getLogWriter()
-//                      .info(
-//                          "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: DESTROYED Portfolio data for key "
-//                              + j);
                 }
                 break;
               case 3:
 
                 if (!region.containsKey(new Integer(j))) {
                   region.create(new Integer(j), null);
-//                  getLogWriter()
-//                      .info(
-//                          "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: INSERTED Null data for key "
-//                              + j);
                 }
 
                 break;
@@ -544,25 +492,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
     return (CacheSerializableRunnable) prPuts;
   }
 
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRPutsAndDestroy(
-      final String regionName, final int from, final int to) {
-    SerializableRunnable prPuts = new CacheSerializableRunnable("PRPuts") {
-      @Override
-      public void run2() throws CacheException {
-        Cache cache = getCache();
-        Region region = cache.getRegion(regionName);
-        
-        for (int j = from; j < to; j++) {
-          region.put(new Integer(j), new Portfolio(j));
-        }
-        
-        for (int j = from; j < to; j++) {
-          region.destroy(new Integer(j));
-        }
-      }
-    };
-    return (CacheSerializableRunnable) prPuts;
-  }
   /**
    * This function puts portfolio objects into the created Region (PR or Local) *
    * 
@@ -584,10 +513,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         Region region = cache.getRegion(regionName);
         for (int j = from, i = to ; j < to; j++, i++)
           region.put(new Integer(i), portfolio[j]);
-//        getLogWriter()
-//            .info(
-//                "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
-//                    + regionName);
       }
     };
     return (CacheSerializableRunnable)prPuts;
@@ -612,10 +537,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         Region region = cache.getRegion(regionName);
         for (int j = from; j < to; j++)
           region.put(portfolio[j], portfolio[j]);
-//        getLogWriter()
-//            .info(
-//                "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
-//                    + regionName);
       }
     };
     return (CacheSerializableRunnable)prPuts;
@@ -703,7 +624,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         try {
           for (int j = 0; j < queries.length; j++) {
             synchronized (region) {
-              // getCache().getLogger().info("About to execute local query: " + queries[j]);
               if (fullQueryOnPortfolioPositions) {
                 params = new Object[] { local, new Double((j % 25) * 1.0 + 1) };
                 r[j][0] = qs.newQuery(queries[j]).execute(params);
@@ -711,23 +631,15 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               else {
                 r[j][0] = local.query(queries[j]);
               }
-              // getCache().getLogger().info("Executed local query " + j + ": " + queries[j] + "; on region: " + local.getFullPath() +
-              // "; region size=" + local.size() + "; region values=" + local.values() + ";results=" + r[j][0]);
               if (fullQueryOnPortfolioPositions) {
-//                getCache().getLogger().info("About to execute PR query: " + queries[j]);
                 params = new Object[] { region, new Double((j % 25) * 1.0 + 1) };
                 r[j][1] = qs.newQuery(queries[j]).execute(params);
-//                getCache().getLogger().info("Finished executing PR query: " + queries[j]);
               }
               else {
                 r[j][1] = region.query(queries[j]);
               }
              }
            }
-//          getLogWriter()
-//              .info(
-//                  "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
-
           compareTwoQueryResults(r, queries.length);
         }
         catch (QueryInvocationTargetException e) {
@@ -764,10 +676,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         }
         finally {
-          for (int i=0; i<expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-              "<ExpectedException action=remove>" + expectedExceptions[i]
-                  + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -841,10 +753,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             QueryInvocationTargetException.class.getName()
         };
 
-        for (int i=0; i<expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                                                                    + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         String distinct = "SELECT DISTINCT ";
@@ -863,8 +775,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               // Execute on remote region.
               qStr = (distinct + queries[j].replace("REGION_NAME", regionName)); 
               r[j][1] = qs.newQuery(qStr).execute();
-
-//              getCache().getLogger().info("Finished executing PR query: " + qStr);
             }
           }
 
@@ -872,7 +782,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
-          // compareTwoQueryResults(r, queries.length);
           StructSetOrResultsSet ssORrs = new  StructSetOrResultsSet();
           ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length,queries);
           
@@ -907,10 +816,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         }
         finally {
-          for (int i=0; i<expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                                                                         + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -973,10 +882,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             QueryInvocationTargetException.class.getName()
         };
 
-        for (int i=0; i<expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                                                                    + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         String distinct = "SELECT DISTINCT ";
@@ -985,19 +894,17 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         StructSetOrResultsSet ssORrs = new  StructSetOrResultsSet();
         
         try {
-          for (int j = 0; j < queries.length; j++) {
+          for (final String query : queries) {
             String qStr = null;
             synchronized (region) {
               // Execute on local region.
-              qStr = (distinct + queries[j].replace("REGION_NAME", localRegion)); 
+              qStr = (distinct + query.replace("REGION_NAME", localRegion));
               r[0][0] = qs.newQuery(qStr).execute();
 
               // Execute on remote region.
-              qStr = (distinct + queries[j].replace("REGION_NAME", regionName)); 
+              qStr = (distinct + query.replace("REGION_NAME", regionName));
               r[0][1] = qs.newQuery(qStr).execute();
-
-//              getCache().getLogger().info("Finished executing PR query: " + qStr);
-              ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, 1, true,queries);
+              ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, 1, true, queries);
             }
           }
 
@@ -1035,10 +942,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         }
         finally {
-          for (int i=0; i<expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                                                                         + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -1098,10 +1005,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             QueryInvocationTargetException.class.getName()
         };
 
-        for (int i=0; i<expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                                                                    + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         String distinct = "<TRACE>SELECT DISTINCT ";
@@ -1123,6 +1030,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                 if(sr.asList().size() > l*l) {
                   fail("The resultset size exceeds limit size. Limit size="+ l*l+", result size ="+ sr.asList().size());
                 }
+
                 // Execute on remote region.
                 qStr = (distinct + queries[j].replace("REGION_NAME", regionName));
                 qStr += (" LIMIT " + (l*l));
@@ -1132,9 +1040,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                 if(srr.size() > l*l) {
                   fail("The resultset size exceeds limit size. Limit size="+ l*l+", result size ="+ srr.asList().size());
                 }
-                //assertEquals("The resultset size is not same as limit size.", l*l, srr.asList().size());
-
-//                getCache().getLogger().info("Finished executing PR query: " + qStr);
               }
             }
             StructSetOrResultsSet ssORrs = new  StructSetOrResultsSet();
@@ -1144,9 +1049,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
-
-          // compareTwoQueryResults(r, queries.length);
-          
         }
         catch (QueryInvocationTargetException e) {
           // throw an unchecked exception so the controller can examine the cause and see whether or not it's okay
@@ -1178,10 +1080,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         }
         finally {
-          for (int i=0; i<expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                                                                         + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -1228,7 +1130,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
        };
 
        Object r[][] = new Object[queries.length][2];
-       Region local = cache.getRegion(localRegion);
        Region region = cache.getRegion(regionName);
        assertNotNull(region);
 
@@ -1240,10 +1141,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
            QueryInvocationTargetException.class.getName()
        };
 
-       for (int i=0; i<expectedExceptions.length; i++) {
+       for (final String expectedException : expectedExceptions) {
          getCache().getLogger().info(
-             "<ExpectedException action=add>" + expectedExceptions[i]
-                                                                   + "</ExpectedException>");
+           "<ExpectedException action=add>" + expectedException
+             + "</ExpectedException>");
        }
 
        QueryService qs = getCache().getQueryService();
@@ -1262,16 +1163,12 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               qStr = queries[j];
               SelectResults srr = (SelectResults) qs.newQuery(qStr.replace(regionName, localRegion)).execute();
               r[j][1] = srr;
-
-//              getCache().getLogger().info(
-//                  "Finished executing PR query: " + qStr);
             }
           }
          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
          .info(
              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
-         // compareTwoQueryResults(r, queries.length);
          StructSetOrResultsSet ssORrs = new  StructSetOrResultsSet();
          ssORrs.CompareCountStarQueryResultsWithoutAndWithIndexes(r, queries.length,true,queries);
          
@@ -1306,10 +1203,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
        }
        finally {
-         for (int i=0; i<expectedExceptions.length; i++) {
+         for (final String expectedException : expectedExceptions) {
            getCache().getLogger().info(
-               "<ExpectedException action=remove>" + expectedExceptions[i]
-                                                                        + "</ExpectedException>");
+             "<ExpectedException action=remove>" + expectedException
+               + "</ExpectedException>");
          }
        }
 
@@ -1322,56 +1219,19 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
   /**
    * Insure queries on a pr is using index if not fail.
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForIndexUsageCheck(final String name) {
+  public CacheSerializableRunnable getCacheSerializableRunnableForIndexUsageCheck() {
     SerializableRunnable PrIndexCheck = new CacheSerializableRunnable("PrIndexCheck") {
       @Override
       public void run2() {
         Cache cache = getCache();
         
-//        Region parRegion = cache.getRegion(name);
         QueryService qs = cache.getQueryService();
         LogWriter logger = cache.getLogger();
       
           Collection indexes = qs.getIndexes();
           Iterator it = indexes.iterator();
-          while(it.hasNext()) {         
-            //logger.info("Following indexes found : " + it.next());
+          while(it.hasNext()) {
             PartitionedIndex ind = (PartitionedIndex)it.next();
-            /*List bucketIndex = ind.getBucketIndexes();
-            int k = 0;
-            logger.info("Total number of bucket index : "+bucketIndex.size());
-            while ( k < bucketIndex.size() ){
-              Index bukInd = (Index)bucketIndex.get(k);
-              logger.info("Buket Index "+bukInd+"  usage : "+bukInd.getStatistics().getTotalUses());
-              // if number of quries on pr change in getCacheSerializableRunnableForPRQueryAndCompareResults
-              // literal 6  should change.
-              //Asif :  With the optmization of Range Queries a where clause
-              // containing something like ID > 4 AND ID < 9 will be evaluated 
-              //using a single index lookup, so accordingly modifying the 
-              //assert value from 7 to 6
-              // Anil : With aquiringReadLock during Index.getSizeEstimate(), the
-              // Index usage in case of "ID = 0 OR ID = 1" is increased by 3.
-              int indexUsageWithSizeEstimation = 3;
-              int expectedUse = 6;
-              long indexUse = bukInd.getStatistics().getTotalUses();
-              // Anil : With chnages to use single index for PR query evaluation, once the index
-              // is identified the same index is used on other PR buckets, the sieEstimation is
-              // done only once, which adds additional index use for only one bucket index.
-              if (!(indexUse == expectedUse || indexUse == (expectedUse + indexUsageWithSizeEstimation))){
-                fail ("Index usage is not as expected, expected it to be either " + 
-                    expectedUse + " or " + (expectedUse + indexUsageWithSizeEstimation) + 
-                    " it is: " + indexUse);
-                //assertEquals(6 + indexUsageWithSizeEstimation, bukInd.getStatistics().getTotalUses());
-              }
-              k++;
-            }*/
-            //Shobhit: Now we dont need to check stats per bucket index,
-            //stats are accumulated in single pr index stats.
-            
-            // Anil : With aquiringReadLock during Index.getSizeEstimate(), the
-            // Index usage in case of "ID = 0 OR ID = 1" is increased by 3.
-            int indexUsageWithSizeEstimation = 3;
-            
             logger.info("index uses for "+ind.getNumberOfIndexedBuckets()+" index "+ind.getName()+": "+ind.getStatistics().getTotalUses());
             assertEquals(6, ind.getStatistics().getTotalUses());
           }
@@ -1421,8 +1281,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         }
         catch (QueryException e) {
-          // assertTrue("caught Exception"+ e.getMessage(),false);
-
           com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Caught an Exception while querying Constants"
@@ -1475,46 +1333,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
     return (CacheSerializableRunnable)createPrRegion;
   }
-  /**
-   * This function creates a Accessor node region on the given PR given the
-   * scope parameter.
-   * 
-   * @param regionName
-   * @return cacheSerializable object
-   */
-
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRAccessorCreate(
-      final String regionName, final int redundancy)
-  {
-    SerializableRunnable createPrRegion;
-    createPrRegion = new CacheSerializableRunnable(regionName) {
-      @Override
-      public void run2() throws CacheException
-      {
-        Cache cache = getCache();
-        Region partitionedregion = null;
-        int maxMem = 0;
-        AttributesFactory attr = new AttributesFactory();
-        attr.setValueConstraint(valueConstraint);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        PartitionAttributes prAttr = paf.setLocalMaxMemory(maxMem)
-        .setRedundantCopies(redundancy).create();
-        attr.setPartitionAttributes(prAttr);
-        partitionedregion = cache.createRegion(regionName, attr.create());
-        assertNotNull(
-            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRAccessorCreate: Partitioned Region "
-                + regionName + " not in cache", cache.getRegion(regionName));
-        assertNotNull(
-            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRAccessorCreate: Partitioned Region ref null",
-            partitionedregion);
-        assertTrue(
-            "PRQueryDUnitHelper#getCacheSerializableRunnableForPRAccessorCreate: Partitioned Region ref claims to be destroyed",
-            !partitionedregion.isDestroyed());
-      }
-    };
-
-    return (CacheSerializableRunnable)createPrRegion;
-  }
 
   /*
    * This function compares the two result sets passed based on <br> 1. Type
@@ -1528,8 +1346,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
     Set set1 = null;
     Set set2 = null;
-//    Iterator itert1 = null;
-//    Iterator itert2 = null;
     ObjectType type1, type2;
 
     for (int j = 0; j < len; j++) {
@@ -1579,68 +1395,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         assertEquals("PRQueryDUnitHelper#compareTwoQueryResults: FAILED: "
                      + "result contents are not equal, ", set1, set2);
-//        if (r[j][0] instanceof StructSet) {
-//          boolean pass = true;
-//          itert1 = set1.iterator();
-//          while (itert1.hasNext()) {
-//            StructImpl p1 = (StructImpl)itert1.next();
-//            itert2 = set2.iterator();
-//            boolean found = false;
-//            while (itert2.hasNext()) {
-//              StructImpl p2 = (StructImpl)itert2.next();
-//              Object[] values1 = p1.getFieldValues();
-//              Object[] values2 = p2.getFieldValues();
-//              if (values1.length != values2.length) {
-//                fail("PRQueryDUnitHelper#compareTwoQueryResults: The length of the values in struct fields does not match");
-//              }
-//              boolean exactMatch = true;
-//              for (int k = 0; k < values1.length; k++) {
-//                if (!values1[k].equals(values2[k]))
-//                  exactMatch = false;
-//              }
-//              if (exactMatch)
-//                found = true;
-//            }
-//
-//            if (!found)
-//              pass = false;
-//          }
-//
-//          if (pass) {
-//            getLogWriter()
-//            .info(
-//                  "PRQueryDUnitHelper#compareTwoQueryResults: Results found are StructSet and both of them are Equal.");
-//          }
-//          else {
-//            fail("PRQueryDUnitHelper#compareTwoQueryResults: Test failed the contents of the two resultSets are not same");
-//          }
-//        }
-//        else {
-//          boolean pass = true;
-//          itert1 = set1.iterator();
-//          while (itert1.hasNext()) {
-//            Object p1 = itert1.next();
-//            itert2 = set2.iterator();
-//            boolean found = false;
-//            while (itert2.hasNext()) {
-//              Object p2 = itert2.next();
-//              if (p2.equals(p1)) {
-//                found = true;
-//              }
-//            }
-//            if (!found)
-//              pass = false;
-//          }
-//
-//          if (pass) {
-//            getLogWriter()
-//              .info(
-//                  "PRQueryDUnitHelper#compareTwoQueryResults: Results found are ResultsSet and both of them are Equal.");
-//          }
-//          else {
-//            fail("PRQueryDUnitHelper#compareTwoQueryResults: Test failed the contents of the two resultSets are not same");
-//          }
-//        }
       }
     }
   }
@@ -1652,13 +1406,13 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
    * 3. Compares the appropriate resultSet <br>
    * 
    * @param regionName
-   * 
-   * 
+   *
+   *
    * @return cacheSerializable object
    */
 
   public CacheSerializableRunnable getCacheSerializableRunnableForPRInvalidQuery(
-      final String regionName, final String invalidQuery)
+    final String regionName)
   {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
       @Override
@@ -1704,7 +1458,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
    */
 
   public CacheSerializableRunnable getCacheSerializableRunnableForRegionClose(
-      final String regionName, final int redundancy)
+      final String regionName, final int redundancy, final Class constraint)
   {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("regionClose") {
       @Override
@@ -1729,12 +1483,8 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Region Closed on VM ");
-//        Region partitionedregion = null;
-//        Properties localProps = new Properties();
-//        String maxMem = "0";
-
         AttributesFactory attr = new AttributesFactory();
-        attr.setValueConstraint(PortfolioData.class);
+        attr.setValueConstraint(constraint);
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         PartitionAttributes prAttr = paf.setRedundantCopies(redundancy)
             .create();
@@ -1761,16 +1511,18 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
    * 2. creates the cache again & also the PR <br>
    * 
    * @return cacheSerializable object
+   *
+   * NOTE: Closing of the cache must be done from the test case rather than in PRQueryDUintHelper
+   *
    */
 
   public CacheSerializableRunnable getCacheSerializableRunnableForCacheClose(
-      final String regionName, final int redundancy)
+      final String regionName, final int redundancy, final Class constraint)
   {
     SerializableRunnable PrRegion = new CacheSerializableRunnable("cacheClose") {
       @Override
       public void run2() throws CacheException
       {
-        Cache cache = getCache();
         final String expectedCacheClosedException = CacheClosedException.class
             .getName();
         final String expectedReplyException = ReplyException.class.getName();
@@ -1780,26 +1532,12 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         getCache().getLogger().info(
             "<ExpectedException action=add>" + expectedReplyException
                 + "</ExpectedException>");
-
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
-            .info(
-                "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Closing cache");
-        closeCache();
-
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
-            .info(
-                "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: cache Closed on VM ");
-        cache = getCache();
-
+        Cache cache = getCache();
         com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Recreating the cache ");
-//        Region partitionedregion = null;
-//        Properties localProps = new Properties();
-//        String maxMem = "0";
-
         AttributesFactory attr = new AttributesFactory();
-        attr.setValueConstraint(PortfolioData.class);
+        attr.setValueConstraint(constraint);
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         PartitionAttributes prAttr = paf.setRedundantCopies(redundancy)
             .create();
@@ -1836,208 +1574,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
     return (CacheSerializableRunnable)PrRegion;
   }
 
-  /**
-   * This function <br>
-   * 1. The Creates an array of PortfolioData objects
-   * 
-   * @return PortFolioData Objects
-   */
-
-  public PortfolioData[] createPortfolioData(final int cnt, final int cntDest) {
-    PortfolioData[] portfolio = new PortfolioData[cntDest];
-    for (int k = cnt; k < cntDest; k++) {
-      portfolio[k] = new PortfolioData(k);
-    }
-    return portfolio;
-  }
-  
-  public Portfolio[] createPortfoliosAndPositions(int count) {
-    Position.cnt = 0; // reset Portfolio counter
-    Portfolio[] portfolios = new Portfolio[count];
-    for (int i = 0; i < count; i++) {
-      portfolios[i] = new Portfolio(i);
-    }
-    return portfolios;
-  }
-    
-
-  /**
-   * This function <br>
-   * 1. calls the region.destroyRegion() on the VM <br>
-   * 
-   * 
-   * @return cacheSerializable object
-   */
-
-  public CacheSerializableRunnable getCacheSerializableRunnableForRegionDestroy(
-      final String regionName, final int redundancy)
-  {
-    SerializableRunnable PrRegion = new CacheSerializableRunnable(
-        "regionDestroy") {
-      @Override
-      public void run2() throws CacheException
-      {
-        Cache cache = getCache();
-        final String expectedRegionDestroyedException = RegionDestroyedException.class
-            .getName();
-        final String expectedReplyException = ReplyException.class.getName();
-        getCache().getLogger().info(
-            "<ExpectedException action=add>" + expectedRegionDestroyedException
-                + "</ExpectedException>");
-        getCache().getLogger().info(
-            "<ExpectedException action=add>" + expectedReplyException
-                + "</ExpectedException>");
-
-        Region region = cache.getRegion(regionName);
-
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
-            .info(
-                "PRQueryRegionDestroyedDUnitTest#getCacheSerializableRunnableForRegionClose: Destroying region "
-                    + region);
-        region.destroyRegion();
-
-        assertTrue("Region destroy failed", region.isDestroyed());
-
-        getCache().getLogger().info(
-            "<ExpectedException action=remove>" + expectedReplyException
-                + "</ExpectedException>");
-        getCache().getLogger().info(
-            "<ExpectedException action=remove>"
-                + expectedRegionDestroyedException + "</ExpectedException>");
-      }
-
-    };
-    return (CacheSerializableRunnable)PrRegion;
-  }
-
-  /**
-   * This function <br>
-   * 1. Creates & executes a query with Logical Operators on the given PR Region
-   * 2. Executes the same query on the local region <br>
-   * 3. Compares the appropriate resultSet <br>
-   * 4. Compares and Print's the time taken for each <br>
-   */
-
-  public CacheSerializableRunnable PRQueryingVsLocalQuerying(
-      final String regionName, final String localRegion, final ResultsObject perfR)
-  {
-    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRvsLocal") {
-      @Override
-      public void run2() throws CacheException
-      {
-        PerfResultsObject prfRObject=new PerfResultsObject(perfR);
-        Cache cache = getCache();
-        // Querying the localRegion and the PR region
-
-        String[] query = { "ID = 0 OR ID = 1", "ID > 4 AND ID < 9", "ID = 5",
-            "ID < 5 ", "ID <= 5" , "ID > 7 AND status ='active'" };
-        Object r[][] = new Object[query.length][2];
-
-        Region local = cache.getRegion(localRegion);
-        Region region = cache.getRegion(regionName);
-        assertEquals(local.values(), region.values());
-        
-        long startTimeLocal = System.currentTimeMillis();
-        try {
-          for (int j = 0; j < query.length; j++) {
-            r[j][0] = local.query(query[j]);
-
-          }
-          long endTimeLocal=System.currentTimeMillis();
-          long queryTimeLocal = endTimeLocal-startTimeLocal;
-          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Time to Query Local cache "+queryTimeLocal + " ms");
-          
-          long startTimePR = System.currentTimeMillis();
-          for (int k = 0; k < query.length; k++) {
-            r[k][1] = region.query(query[k]);
-
-          }
-          long endTimePR = System.currentTimeMillis();
-          long queryTimePR = endTimePR-startTimePR;
-          
-          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Time to Query PR "+queryTimePR+" ms");
-          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
-              .info(
-                  "PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Queries Executed successfully on Local region & PR Region");
-
-          prfRObject.QueryingTimeLocal=queryTimeLocal;
-          prfRObject.QueryingTimePR=queryTimePR;
-          
-          prfRObject.displayResults();
-          compareTwoQueryResults(r, query.length);
-          
-        }
-        catch (QueryException e) {
-          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
-              .error(
-                  "PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Caught QueryException while querying"
-                      + e, e);
-          fail("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Caught unexpected query exception. Exception is "
-              + e);
-        }
-
-      }
-
-    };
-    return (CacheSerializableRunnable)PrRegion;
-  }
-  
-  class PerfResultsObject implements Serializable {
-    String OperationDescription;
-    String Scope= null;
-    long QueryingTimeLocal;
-    long QueryingTimePR;
-    int NumberOfDataStores = 0;
-    int NumberOfAccessors = 0;
-    int redundancy=0;
-    
-    public PerfResultsObject(ResultsObject prfR){
-      this.OperationDescription=prfR.OperationDescription;
-      this.redundancy=prfR.redundancy;
-      this.NumberOfAccessors=prfR.NumberOfAccessors;
-      this.NumberOfDataStores=prfR.NumberOfDataStores;
-    }
-    
-    
-    
-    public void displayResults(){
-      
-      try {
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper:PerfResultsObject#displayResults");
-        BufferedWriter out = new BufferedWriter(new FileWriter("PRQueryPerfDUnitTest.txt", true));
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("~~~~~~~~~~~~~~~~~~~~~~~PR Querying Performance Results~~~~~~~~~~~~~~~~~~~~~~~");
-        out.write("~~~~~~~~~~~~~~~~~~~~~~~PR Querying Performance Results~~~~~~~~~~~~~~~~~~~~~~~\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(this.OperationDescription);
-        out.write("\t"+this.OperationDescription+"\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Scope                    : "+this.Scope);
-        out.write("Scope                    : "+this.Scope+"\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Redundancy Level         : "+this.redundancy);
-        out.write("Redundancy Level         : "+this.redundancy+"\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Number of Accessor       : "+this.NumberOfAccessors);
-        out.write("Number of Accessor       : "+this.NumberOfAccessors+"\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Number of Datastore/s    : "+this.NumberOfDataStores);
-        out.write("Number of Datastore/s    : "+this.NumberOfDataStores+"\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("QueryingTime Local       : "+this.QueryingTimeLocal+" ms");
-        out.write("QueryingTime Local       : "+this.QueryingTimeLocal+" ms\n\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("QueryingTime PR          : "+this.QueryingTimePR+" ms");
-        out.write("QueryingTime PR          : "+this.QueryingTimePR+" ms\n");
-        
-        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
-        out.write("\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n");
-        out.close();
-    } catch (IOException e) {
-    }
-    }
-  }
-    
     /**
    * This function creates a appropriate index on a  PR given the name and 
    * other parameters.
@@ -2064,15 +1600,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                 indexedExpression, fromClause);
             logger.info(
                 "Index creted on partitioned region : " + parIndex);
-        /*    logger.info(
-                "Number of buckets indexed in the partitioned region locally : "
-                    + "" + ((PartitionedIndex)parIndex).getNumberOfIndexedBucket()
-                    + " and remote buckets indexed : "
-                    + ((PartitionedIndex)parIndex).getNumRemoteBucketsIndexed());
-                    */
-  
-            
-          } 
+          }
           else {
           logger.info("Test Creating index with Name : [ "+indexName+" ] " +
                         "IndexedExpression : [ "+indexedExpression+" ] Alias : [ "+alias+" ] FromClause : [ "+region.getFullPath() + " " + alias+" ] " );
@@ -2086,13 +1614,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                   + " and remote buckets indexed : "
                   + ((PartitionedIndex)parIndex).getNumRemoteBucketsIndexed());
           }
-          /*
-           * assertEquals("Max num of buckets in the partiotion regions and
-           * the " + "buckets indexed should be equal",
-           * ((PartitionedRegion)region).getTotalNumberOfBuckets(),
-           * (((PartionedIndex)parIndex).getNumberOfIndexedBucket()+((PartionedIndex)parIndex).getNumRemtoeBucketsIndexed()));
-           * should put all the assetion in a seperate function.
-           */
         }
         catch (Exception ex) {
           Assert.fail("Creating Index in this vm failed : ", ex);
@@ -2189,129 +1710,37 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
   }
 
   /**
-   * Creates an xml file used in subsequent tasks.
-   * 
-   */
-
-  public CacheSerializableRunnable getCacheSerializableForPrXmlFileGenerator(
-      final String regionName, final int redundancy, final String fileName)
-  {
-    SerializableRunnable prXMLFileCreator = new CacheSerializableRunnable(
-        "prXmlFileCreator") {
-      @Override
-      public void run2()
-      {
-        Cache cache = getCache();
-        Region partitionedregion = cache.getRegion(regionName);
-        cache.getLogger().info(
-            "the index created : "
-                + ((PartitionedRegion)partitionedregion).getIndex());
-        /*
-         try {
-         //AttributesFactory attr = new AttributesFactory();
-         //attr.setValueConstraint(valueConstraint);
-         // attr.setScope(scope);
-
-         PartitionAttributesFactory paf = new PartitionAttributesFactory();
-         PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
-
-         attr.setPartitionAttributes(prAttr);
-
-         // partitionedregion = cache.createRegion(regionName, attr.create());
-         }
-         catch (IllegalStateException ex) {
-         getLogWriter()
-         .warning(
-         "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
-         ex);
-         }
-         assertNotNull(
-         "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
-         + regionName + " not in cache", cache.getRegion(regionName));
-         assertNotNull(
-         "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null",
-         partitionedregion);
-         assertTrue(
-         "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed",
-         !partitionedregion.isDestroyed());
-         */
-        // genrate the xml file.
-        writeCacheXml(fileName, cache);
-      }
-
-    };
-    return (CacheSerializableRunnable)prXMLFileCreator;
-  }
-  
-  /**
-   * Finish what beginCacheXml started. It does this be generating a cache.xml
-   * file and then creating a real cache using that cache.xml.
-   */
-  public void writeCacheXml(String name, Cache cache)
-  {
-    File file = new File(name + "-cache.xml");
-    try {
-      PrintWriter pw = new PrintWriter(new FileWriter(file), true);
-      CacheXmlGenerator.generate(cache, pw);
-      pw.close();
-    }
-    catch (IOException ex) {
-      Assert.fail("IOException during cache.xml generation to " + file, ex);
-    }
-
-  }
-  
-  /**
    * Creats a partiotioned region using an xml file descriptions.
    * 
-   * @param xmlFileName
-   * 
+   *
    * @return CacheSerializable
    *
    */
-  public CacheSerializableRunnable getCacheSerializableRunnableForPRCreateThrougXML(
-      final String regionName, final String xmlFileName)
+  public CacheSerializableRunnable getCacheSerializableRunnableForPRCreate(final String regionName)
   {
     SerializableRunnable prIndexCreator = new CacheSerializableRunnable(
         "PrRegionCreator") {
       @Override
       public void run2()
       {
-        InternalDistributedSystem sys = null;
         try {
-        closeCache();
-       // Properties props = new Properties();
-        File file = findFile(xmlFileName);
-       // props.setProperty(DistributionConfig.CACHE_XML_FILE_NAME, file
-       //     .toString());
-        GemFireCacheImpl.testCacheXml = file;
-        sys = getSystem();
-        // add expected exception for possible index conflict
-        sys.getLogWriter().info("<ExpectedException action=add>"
-            + IndexNameConflictException.class.getName()
-            + "</ExpectedException>");
-        Cache cache = getCache();
-        LogWriter logger = cache.getLogger();
-        PartitionedRegion region = (PartitionedRegion)cache
+          Cache cache = getCache();
+          LogWriter logger = cache.getLogger();
+          PartitionedRegion region = (PartitionedRegion)cache
             .getRegion(regionName);
-        Map indexMap = region.getIndex();
-        Set indexSet = indexMap.entrySet();
-        Iterator it = indexSet.iterator();
-        while (it.hasNext()) {
-          Map.Entry entry = (Map.Entry)it.next();
-          Index index = (Index)entry.getValue();
-          logger.info("The partitioned index created on this region "
+          Map indexMap = region.getIndex();
+          Set indexSet = indexMap.entrySet();
+          Iterator it = indexSet.iterator();
+          while (it.hasNext()) {
+            Map.Entry entry = (Map.Entry)it.next();
+            Index index = (Index)entry.getValue();
+            logger.info("The partitioned index created on this region "
               + " " + index);
-          logger.info("Current number of buckets indexed : " + ""
+            logger.info("Current number of buckets indexed : " + ""
               + ((PartitionedIndex)index).getNumberOfIndexedBuckets());
         }
         }
         finally {
-          if (sys != null) {
-            sys.getLogWriter().info("<ExpectedException action=remove>"
-                + IndexNameConflictException.class.getName()
-                + "</ExpectedException>");
-          }
           GemFireCacheImpl.testCacheXml = null;
         }
                
@@ -2321,7 +1750,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
   }
   
   
-  protected File findFile(String fileName)
+  public File findFile(String fileName)
   {
     return new File(TestUtil.getResourcePath(PRQueryDUnitHelper.class, fileName));
   }
@@ -2329,15 +1758,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
   public CacheSerializableRunnable getCacheSerializableRunnableForIndexCreationCheck(
       final String name)
   {
-    CacheSerializableRunnable prIndexCheck = new CacheSerializableRunnable(
+    return new CacheSerializableRunnable(
         "PrIndexCreationCheck") {
       @Override
       public void run2()
       {
-        //closeCache();
-        Cache cache = getCache();
-        LogWriter logger = cache.getLogger();
-        PartitionedRegion region = (PartitionedRegion)cache.getRegion(name);
+        Cache cache1 = getCache();
+        LogWriter logger = cache1.getLogger();
+        PartitionedRegion region = (PartitionedRegion) cache1.getRegion(name);
         Map indexMap = region.getIndex();
         Set indexSet = indexMap.entrySet();
         Iterator it = indexSet.iterator();
@@ -2349,13 +1777,12 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           logger.info("Current number of buckets indexed : " + ""
               + ((PartitionedIndex)index).getNumberOfIndexedBuckets());
         }
-        
-        closeCache();
-        disconnectFromDS();
+
+        JUnit4CacheTestCase.closeCache();
+        JUnit4DistributedTestCase.disconnectFromDS();
 
       }
     };
-    return prIndexCheck;
   }
   
   /**
@@ -2417,17 +1844,17 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
   public CacheSerializableRunnable getCacheSerializableRunnableForRemoveIndex(
       final String name, final boolean random)
   {
-    CacheSerializableRunnable prRemoveIndex = new CacheSerializableRunnable(
+    return new CacheSerializableRunnable(
         "PrRemoveIndex") {
       @Override
       public void run2()
       {
-        
-        Cache cache = getCache();
-        LogWriter logger = cache.getLogger();
-        logger.info("Got the following cache : "+cache);
-        Region parRegion = cache.getRegion(name);
-        QueryService qs = cache.getQueryService();
+
+        Cache cache1 = getCache();
+        LogWriter logger = cache1.getLogger();
+        logger.info("Got the following cache : "+ cache1);
+        Region parRegion = cache1.getRegion(name);
+        QueryService qs = cache1.getQueryService();
         if (!random) {
           Collection indexes = qs.getIndexes();
           assertEquals(3, indexes.size());
@@ -2462,7 +1889,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
       } // ends run
     };
-    return prRemoveIndex;
   }
 
   public SerializableRunnableIF getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(
@@ -2511,10 +1937,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             ForceReattemptException.class.getName(),
             QueryInvocationTargetException.class.getName() };
 
-        for (int i = 0; i < expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                  + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         QueryService qs = getCache().getQueryService();
@@ -2581,10 +2007,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                   cce);
 
         } finally {
-          for (int i = 0; i < expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                    + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -2640,10 +2066,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             ForceReattemptException.class.getName(),
             QueryInvocationTargetException.class.getName() };
 
-        for (int i = 0; i < expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                  + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         QueryService qs = getCache().getQueryService();
@@ -2710,10 +2136,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                   cce);
 
         } finally {
-          for (int i = 0; i < expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                    + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -2739,7 +2165,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             "r1.ID = pos2.id",
             "r1.ID = pos2.id AND r1.ID > 5",
             "r1.ID = pos2.id AND r1.status = 'active'",
-            // "r1.ID = r2.id LIMIT 10",
             "r1.ID = pos2.id ORDER BY r1.ID",
             "r1.ID = pos2.id ORDER BY pos2.id",
             "r1.ID = pos2.id ORDER BY r2.status",
@@ -2770,14 +2195,13 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             ForceReattemptException.class.getName(),
             QueryInvocationTargetException.class.getName() };
 
-        for (int i = 0; i < expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                  + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         QueryService qs = getCache().getQueryService();
-        Object[] params;
         try {
           for (int j = 0; j < queries.length; j++) {
             getCache().getLogger().info(
@@ -2808,7 +2232,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
-          // compareTwoQueryResults(r, queries.length);
           StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
           ssORrs.CompareQueryResultsAsListWithoutAndWithIndexes(r,
               queries.length, false, false, queries);
@@ -2840,10 +2263,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                   cce);
 
         } finally {
-          for (int i = 0; i < expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                    + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -2900,10 +2323,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             ForceReattemptException.class.getName(),
             QueryInvocationTargetException.class.getName() };
 
-        for (int i = 0; i < expectedExceptions.length; i++) {
+        for (final String expectedException : expectedExceptions) {
           getCache().getLogger().info(
-              "<ExpectedException action=add>" + expectedExceptions[i]
-                  + "</ExpectedException>");
+            "<ExpectedException action=add>" + expectedException
+              + "</ExpectedException>");
         }
 
         QueryService qs = getCache().getQueryService();
@@ -2938,7 +2361,6 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
-          // compareTwoQueryResults(r, queries.length);
           StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
           ssORrs.CompareQueryResultsAsListWithoutAndWithIndexes(r,
               queries.length, false, false, queries);
@@ -2970,10 +2392,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                   cce);
 
         } finally {
-          for (int i = 0; i < expectedExceptions.length; i++) {
+          for (final String expectedException : expectedExceptions) {
             getCache().getLogger().info(
-                "<ExpectedException action=remove>" + expectedExceptions[i]
-                    + "</ExpectedException>");
+              "<ExpectedException action=remove>" + expectedException
+                + "</ExpectedException>");
           }
         }
 
@@ -3029,19 +2451,9 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
   public SerializableRunnable getCacheSerializableRunnableForCloseCache() {
     return new SerializableRunnable() {
       public void run() {
-        closeCache();
+        JUnit4CacheTestCase.closeCache();
       }
     };
   }
-
-
-  public NewPortfolio[] createNewPortfoliosAndPositions(int count) {
-    Position.cnt = 0; // reset Portfolio counter
-    NewPortfolio[] portfolios = new NewPortfolio[count];
-    for (int i = 0; i < count; i++) {
-      portfolios[i] = new NewPortfolio("" + i, i);
-    }
-    return portfolios;
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
index c305531..548d998 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
@@ -24,6 +24,8 @@ package com.gemstone.gemfire.cache.query.partitioned;
  *
  */
 
+import static com.gemstone.gemfire.cache.query.Utils.*;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -61,13 +63,16 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    */
 
   public PRQueryDUnitTest(String name) {
-
     super(name);
   }
 
-  static Properties props = new Properties();
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   int totalNumBuckets = 100;
 
@@ -106,19 +111,19 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creating PR's on the participating VM's
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
@@ -128,7 +133,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
@@ -136,7 +141,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, totalDataSize);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, totalDataSize);
 
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
@@ -191,7 +196,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     // Creating PR's on the participating VM's
     LogWriterUtils.getLogWriter()
       .info(
@@ -221,7 +226,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+    final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfoliosAndPositions,
@@ -280,19 +285,19 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm2 = host.getVM(2);
 
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creating PR's on the participating VM's
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Creating PR's on VM0, VM1 , VM2 , VM3");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
@@ -302,7 +307,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Creating Local region on VM0 to compare result Sets");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Successfully Created Local Region on VM0");
@@ -310,7 +315,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, totalDataSize);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, totalDataSize);
 
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
@@ -355,7 +360,6 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     final VM datastore2 = host.getVM(3);
     final int totalBuckets = 11;
     final int redCop = 0;
-
     CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
       @Override
       public void run2() throws CacheException {
@@ -434,7 +438,6 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     final VM datastore2 = host.getVM(3);
     final int totalBuckets = 10;
     final int redCop = 0;
-
     CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
       public void run2() throws CacheException {
         AttributesFactory attr = new AttributesFactory();
@@ -744,7 +747,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm2 = host.getVM(2);
 
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
@@ -752,7 +755,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Accessor node in the PR");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        0));
+        0, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully created the Accessor node in the PR");
@@ -762,11 +765,11 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created the Datastore node in the PR");
@@ -776,14 +779,14 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created PR's across all VM's");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created Local Region on VM0");
 
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, totalDataSize);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, totalDataSize);
 
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
@@ -836,7 +839,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0, vm1, vm2, vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -866,7 +869,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -923,7 +926,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0, vm1, vm2, vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
    .info(
@@ -953,7 +956,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -1012,7 +1015,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm2 = host.getVM(2);
 
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
@@ -1051,7 +1054,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(totalDataSize);
 
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfolio,
@@ -1093,7 +1096,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creating PR's on the participating VM's
     LogWriterUtils.getLogWriter()
       .info(
@@ -1123,7 +1126,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+    final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -1180,7 +1183,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm2 = host.getVM(2);
 
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
@@ -1188,7 +1191,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Creating the Accessor node in the PR");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        0));
+        0, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully created the Accessor node in the PR");
@@ -1198,11 +1201,11 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully Created the Datastore node in the PR");
@@ -1212,7 +1215,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully Created PR's across all VM's");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully Created Local Region on VM0");


[17/63] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17-2

Posted by kl...@apache.org.
Merge branch 'develop' into feature/GEODE-17-2

# Conflicts:
#	geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7e8294d7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7e8294d7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7e8294d7

Branch: refs/heads/feature/GEODE-1276
Commit: 7e8294d736dbdc3103dacbb42e9be2519d4e3e81
Parents: 7927758 46535f2
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Wed Apr 27 14:13:38 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Wed Apr 27 14:13:38 2016 -0700

----------------------------------------------------------------------
 geode-core/build.gradle                         |   22 -
 .../gemfire/cache/AttributesFactory.java        |   58 -
 .../gemfire/cache/AttributesMutator.java        |   14 -
 .../gemfire/cache/CustomEvictionAttributes.java |   78 -
 .../com/gemstone/gemfire/cache/DataPolicy.java  |   11 -
 .../gemfire/cache/EvictionCriteria.java         |   57 -
 .../com/gemstone/gemfire/cache/Operation.java   |   13 -
 .../gemfire/cache/RegionAttributes.java         |   23 -
 .../gemstone/gemfire/cache/RegionFactory.java   |   24 -
 .../internal/AsyncEventQueueFactoryImpl.java    |    5 -
 .../gemfire/cache/hdfs/HDFSIOException.java     |   52 -
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  |  341 --
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |  203 -
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    |  196 -
 .../cache/hdfs/StoreExistsException.java        |   32 -
 .../cache/hdfs/internal/FailureTracker.java     |   96 -
 .../cache/hdfs/internal/FlushObserver.java      |   53 -
 .../hdfs/internal/HDFSBucketRegionQueue.java    | 1232 ------
 .../cache/hdfs/internal/HDFSEntriesSet.java     |  329 --
 .../cache/hdfs/internal/HDFSEventListener.java  |  179 -
 .../hdfs/internal/HDFSEventQueueFilter.java     |   73 -
 .../hdfs/internal/HDFSGatewayEventImpl.java     |  180 -
 .../hdfs/internal/HDFSIntegrationUtil.java      |  117 -
 .../HDFSParallelGatewaySenderQueue.java         |  471 ---
 .../hdfs/internal/HDFSStoreConfigHolder.java    |  559 ---
 .../cache/hdfs/internal/HDFSStoreCreation.java  |  198 -
 .../hdfs/internal/HDFSStoreFactoryImpl.java     |   77 -
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  638 ---
 .../hdfs/internal/HDFSStoreMutatorImpl.java     |  200 -
 .../HDFSWriteOnlyStoreEventListener.java        |  184 -
 .../hdfs/internal/HoplogListenerForRegion.java  |   72 -
 .../cache/hdfs/internal/PersistedEventImpl.java |  202 -
 .../hdfs/internal/QueuedPersistentEvent.java    |   27 -
 .../hdfs/internal/SignalledFlushObserver.java   |  122 -
 .../internal/SortedHDFSQueuePersistedEvent.java |   86 -
 .../internal/SortedHoplogPersistedEvent.java    |  114 -
 .../UnsortedHDFSQueuePersistedEvent.java        |   76 -
 .../internal/UnsortedHoplogPersistedEvent.java  |   92 -
 .../hdfs/internal/hoplog/AbstractHoplog.java    |  357 --
 .../hoplog/AbstractHoplogOrganizer.java         |  430 --
 .../cache/hdfs/internal/hoplog/BloomFilter.java |   36 -
 .../hoplog/CloseTmpHoplogsTimerTask.java        |  108 -
 .../hdfs/internal/hoplog/CompactionStatus.java  |   72 -
 .../cache/hdfs/internal/hoplog/FlushStatus.java |   72 -
 .../internal/hoplog/HDFSCompactionManager.java  |  330 --
 .../internal/hoplog/HDFSFlushQueueArgs.java     |   93 -
 .../internal/hoplog/HDFSFlushQueueFunction.java |  287 --
 .../hoplog/HDFSForceCompactionArgs.java         |  107 -
 .../hoplog/HDFSForceCompactionFunction.java     |  129 -
 .../HDFSForceCompactionResultCollector.java     |  131 -
 .../hoplog/HDFSLastCompactionTimeFunction.java  |   56 -
 .../internal/hoplog/HDFSRegionDirector.java     |  480 ---
 .../hdfs/internal/hoplog/HDFSStoreDirector.java |   78 -
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |  447 ---
 .../hdfs/internal/hoplog/HFileSortedOplog.java  |  853 ----
 .../hoplog/HdfsSortedOplogOrganizer.java        | 2004 ----------
 .../cache/hdfs/internal/hoplog/Hoplog.java      |  263 --
 .../hdfs/internal/hoplog/HoplogConfig.java      |   74 -
 .../hdfs/internal/hoplog/HoplogListener.java    |   47 -
 .../hdfs/internal/hoplog/HoplogOrganizer.java   |  123 -
 .../hdfs/internal/hoplog/HoplogSetIterator.java |  166 -
 .../hdfs/internal/hoplog/HoplogSetReader.java   |  114 -
 .../internal/hoplog/SequenceFileHoplog.java     |  395 --
 .../hoplog/mapred/AbstractGFRecordReader.java   |  106 -
 .../internal/hoplog/mapred/GFInputFormat.java   |   95 -
 .../internal/hoplog/mapred/GFOutputFormat.java  |   75 -
 .../mapreduce/AbstractGFRecordReader.java       |  140 -
 .../hoplog/mapreduce/GFInputFormat.java         |  124 -
 .../hdfs/internal/hoplog/mapreduce/GFKey.java   |   72 -
 .../hoplog/mapreduce/GFOutputFormat.java        |  198 -
 .../hoplog/mapreduce/HDFSSplitIterator.java     |  197 -
 .../internal/hoplog/mapreduce/HoplogUtil.java   |  463 ---
 .../hoplog/mapreduce/RWSplitIterator.java       |   48 -
 .../hoplog/mapreduce/StreamSplitIterator.java   |   46 -
 .../org/apache/hadoop/io/SequenceFile.java      | 3726 ------------------
 .../query/internal/index/DummyQRegion.java      |    3 -
 .../cache/query/internal/index/HashIndex.java   |    1 -
 .../query/internal/index/IndexManager.java      |    8 -
 .../gemfire/cache/wan/GatewaySender.java        |    2 -
 .../gemstone/gemfire/internal/DSFIDFactory.java |    3 -
 .../internal/DataSerializableFixedID.java       |    1 -
 .../gemfire/internal/HeapDataOutputStream.java  |    3 -
 .../admin/remote/RemoteRegionAttributes.java    |   25 -
 .../cache/AbstractBucketRegionQueue.java        |   18 +-
 .../gemfire/internal/cache/AbstractRegion.java  |  147 -
 .../internal/cache/AbstractRegionEntry.java     |   36 +-
 .../internal/cache/AbstractRegionMap.java       |   86 +-
 .../gemfire/internal/cache/BucketAdvisor.java   |    1 -
 .../gemfire/internal/cache/BucketRegion.java    |  212 +-
 .../internal/cache/BucketRegionQueue.java       |    6 +-
 .../cache/CacheDistributionAdvisor.java         |   22 +-
 .../gemfire/internal/cache/CachePerfStats.java  |   75 -
 .../internal/cache/ColocationHelper.java        |    3 -
 .../cache/CustomEvictionAttributesImpl.java     |   35 -
 .../gemfire/internal/cache/DiskEntry.java       |    1 -
 .../gemfire/internal/cache/DistTXState.java     |    2 +-
 .../cache/DistributedCacheOperation.java        |    7 +-
 .../cache/DistributedPutAllOperation.java       |   33 +-
 .../internal/cache/DistributedRegion.java       |   31 +-
 .../cache/DistributedRemoveAllOperation.java    |   19 +-
 .../gemfire/internal/cache/EntryEventImpl.java  |   55 +-
 .../gemfire/internal/cache/EvictorService.java  |  284 --
 .../gemfire/internal/cache/FilterProfile.java   |   13 +-
 .../internal/cache/GemFireCacheImpl.java        |   99 -
 .../gemfire/internal/cache/HARegion.java        |   15 +-
 .../internal/cache/HDFSLRURegionMap.java        |  111 -
 .../gemfire/internal/cache/HDFSRegionMap.java   |   32 -
 .../internal/cache/HDFSRegionMapDelegate.java   |  540 ---
 .../internal/cache/HDFSRegionMapImpl.java       |   74 -
 .../gemfire/internal/cache/InternalCache.java   |    4 -
 .../internal/cache/InternalDataView.java        |   28 +-
 .../internal/cache/InternalRegionArguments.java |   16 -
 .../gemfire/internal/cache/LocalRegion.java     |  288 +-
 .../internal/cache/LocalRegionDataView.java     |   35 +-
 .../internal/cache/NonLocalRegionEntry.java     |   20 -
 .../gemstone/gemfire/internal/cache/Oplog.java  |   14 -
 .../gemfire/internal/cache/OverflowOplog.java   |    1 -
 .../internal/cache/PartitionedRegion.java       |  482 +--
 .../cache/PartitionedRegionDataStore.java       |   49 +-
 .../cache/PartitionedRegionDataView.java        |   27 +-
 .../gemfire/internal/cache/ProxyRegionMap.java  |   21 -
 .../gemfire/internal/cache/RegionEntry.java     |   20 -
 .../internal/cache/RegionMapFactory.java        |    6 -
 .../internal/cache/RemoteDestroyMessage.java    |    2 +-
 .../internal/cache/RemoteGetMessage.java        |    2 +-
 .../internal/cache/RemotePutMessage.java        |    2 +-
 .../gemfire/internal/cache/TXEntry.java         |    3 +-
 .../gemfire/internal/cache/TXEntryState.java    |   14 -
 .../gemfire/internal/cache/TXState.java         |   38 +-
 .../internal/cache/TXStateInterface.java        |   10 +-
 .../internal/cache/TXStateProxyImpl.java        |   30 +-
 .../gemfire/internal/cache/TXStateStub.java     |   32 +-
 .../gemfire/internal/cache/UpdateOperation.java |    3 -
 .../cache/UserSpecifiedRegionAttributes.java    |   24 +-
 .../cache/VMStatsDiskLRURegionEntryHeap.java    |    2 +-
 .../cache/VMStatsDiskLRURegionEntryOffHeap.java |    2 +-
 .../cache/VMStatsDiskRegionEntryHeap.java       |    2 +-
 .../cache/VMStatsDiskRegionEntryOffHeap.java    |    2 +-
 .../cache/VMStatsLRURegionEntryHeap.java        |    2 +-
 .../cache/VMStatsLRURegionEntryOffHeap.java     |    2 +-
 .../internal/cache/VMStatsRegionEntryHeap.java  |    2 +-
 .../cache/VMStatsRegionEntryOffHeap.java        |    2 +-
 .../cache/VMThinDiskLRURegionEntryHeap.java     |    2 +-
 .../cache/VMThinDiskLRURegionEntryOffHeap.java  |    2 +-
 .../cache/VMThinDiskRegionEntryHeap.java        |    2 +-
 .../cache/VMThinDiskRegionEntryOffHeap.java     |    2 +-
 .../cache/VMThinLRURegionEntryHeap.java         |    2 +-
 .../cache/VMThinLRURegionEntryOffHeap.java      |    2 +-
 .../internal/cache/VMThinRegionEntryHeap.java   |    2 +-
 .../cache/VMThinRegionEntryOffHeap.java         |    2 +-
 .../internal/cache/ValidatingDiskRegion.java    |   13 -
 .../VersionedStatsDiskLRURegionEntryHeap.java   |    2 +-
 ...VersionedStatsDiskLRURegionEntryOffHeap.java |    2 +-
 .../VersionedStatsDiskRegionEntryHeap.java      |    2 +-
 .../VersionedStatsDiskRegionEntryOffHeap.java   |    2 +-
 .../cache/VersionedStatsLRURegionEntryHeap.java |    2 +-
 .../VersionedStatsLRURegionEntryOffHeap.java    |    2 +-
 .../cache/VersionedStatsRegionEntryHeap.java    |    2 +-
 .../cache/VersionedStatsRegionEntryOffHeap.java |    2 +-
 .../VersionedThinDiskLRURegionEntryHeap.java    |    2 +-
 .../VersionedThinDiskLRURegionEntryOffHeap.java |    2 +-
 .../cache/VersionedThinDiskRegionEntryHeap.java |    2 +-
 .../VersionedThinDiskRegionEntryOffHeap.java    |    2 +-
 .../cache/VersionedThinLRURegionEntryHeap.java  |    2 +-
 .../VersionedThinLRURegionEntryOffHeap.java     |    2 +-
 .../cache/VersionedThinRegionEntryHeap.java     |    2 +-
 .../cache/VersionedThinRegionEntryOffHeap.java  |    2 +-
 .../cache/control/InternalResourceManager.java  |   10 -
 .../cache/partitioned/DestroyMessage.java       |    2 +-
 .../partitioned/FetchBulkEntriesMessage.java    |    2 +-
 .../internal/cache/partitioned/GetMessage.java  |   22 +-
 .../cache/partitioned/PutAllPRMessage.java      |   16 +-
 .../internal/cache/partitioned/PutMessage.java  |   12 +-
 .../persistence/soplog/ByteComparator.java      |   55 -
 .../persistence/soplog/CursorIterator.java      |   81 -
 .../soplog/DelegatingSerializedComparator.java  |   37 -
 .../soplog/HFileStoreStatistics.java            |  205 -
 .../persistence/soplog/KeyValueIterator.java    |   42 -
 .../soplog/SortedOplogStatistics.java           |  505 ---
 .../cache/persistence/soplog/SortedReader.java  |  255 --
 .../persistence/soplog/TrackedReference.java    |  153 -
 .../cache/tier/sockets/BaseCommand.java         |   11 +-
 .../internal/cache/tier/sockets/Message.java    |   16 -
 .../cache/tier/sockets/command/Destroy65.java   |    2 +-
 .../cache/tier/sockets/command/Get70.java       |    4 +-
 .../cache/tier/sockets/command/Request.java     |    4 +-
 .../internal/cache/tx/ClientTXRegionStub.java   |    4 +-
 .../cache/tx/DistributedTXRegionStub.java       |   14 +-
 .../cache/tx/PartitionedTXRegionStub.java       |    8 +-
 .../gemfire/internal/cache/tx/TXRegionStub.java |    4 +-
 .../cache/wan/AbstractGatewaySender.java        |   22 +-
 .../cache/wan/GatewaySenderAttributes.java      |    5 -
 .../cache/wan/GatewaySenderEventImpl.java       |    5 -
 ...rentParallelGatewaySenderEventProcessor.java |    3 -
 .../ConcurrentParallelGatewaySenderQueue.java   |   12 -
 .../ParallelGatewaySenderEventProcessor.java    |   22 +-
 .../parallel/ParallelGatewaySenderQueue.java    |   21 +-
 .../cache/xmlcache/AsyncEventQueueCreation.java |    9 -
 .../internal/cache/xmlcache/CacheCreation.java  |   39 +-
 .../internal/cache/xmlcache/CacheXml.java       |   31 -
 .../internal/cache/xmlcache/CacheXmlParser.java |  170 -
 .../xmlcache/RegionAttributesCreation.java      |   55 +-
 .../gemfire/internal/i18n/LocalizedStrings.java |   30 -
 .../internal/offheap/AbstractStoredObject.java  |    1 -
 .../internal/offheap/FreeListManager.java       |    7 -
 .../internal/offheap/OffHeapStoredObject.java   |    6 -
 .../internal/offheap/TinyStoredObject.java      |    4 -
 .../gemfire/internal/tcp/MsgStreamer.java       |   11 -
 .../management/DistributedRegionMXBean.java     |   11 -
 .../management/DistributedSystemMXBean.java     |    8 -
 .../gemfire/management/MemberMXBean.java        |    7 -
 .../gemfire/management/RegionMXBean.java        |   10 -
 .../internal/beans/DistributedRegionBridge.java |    5 -
 .../internal/beans/DistributedRegionMBean.java  |    5 -
 .../internal/beans/DistributedSystemBridge.java |   19 -
 .../internal/beans/DistributedSystemMBean.java  |    7 -
 .../internal/beans/HDFSRegionBridge.java        |  173 -
 .../management/internal/beans/MemberMBean.java  |    5 -
 .../internal/beans/MemberMBeanBridge.java       |   27 -
 .../internal/beans/PartitionedRegionBridge.java |   13 +-
 .../management/internal/beans/RegionMBean.java  |    5 -
 .../internal/beans/RegionMBeanBridge.java       |    5 -
 .../beans/stats/RegionClusterStatsMonitor.java  |    7 -
 .../cli/domain/RegionAttributesInfo.java        |   21 +-
 .../functions/DescribeHDFSStoreFunction.java    |   86 -
 .../cli/util/HDFSStoreNotFoundException.java    |   47 -
 .../cli/util/RegionAttributesNames.java         |    4 +-
 .../support/MemberMXBeanAdapter.java            |    5 -
 .../gemfire/pdx/internal/PdxReaderImpl.java     |    3 -
 .../geode.apache.org/schema/cache/cache-1.0.xsd |   31 -
 .../SignalledFlushObserverJUnitTest.java        |   97 -
 .../SortedListForAsyncQueueJUnitTest.java       |  564 ---
 .../GetOperationContextImplJUnitTest.java       |    1 -
 .../gemfire/cache30/Bug38741DUnitTest.java      |    2 +-
 .../gemfire/distributed/LocatorDUnitTest.java   |   31 +
 .../ParallelGatewaySenderQueueJUnitTest.java    |    2 +-
 .../domain/CacheElementJUnitTest.java           |    1 -
 .../internal/JUnit4DistributedTestCase.java     |    3 -
 .../sanctionedDataSerializables.txt             |   92 +-
 .../codeAnalysis/sanctionedSerializables.txt    |   15 +-
 .../cache/query/internal/cq/CqServiceImpl.java  |    2 +-
 geode-lucene/build.gradle                       |    4 -
 .../tools/pulse/internal/data/Cluster.java      |    9 -
 .../pulse/internal/data/PulseConstants.java     |    1 -
 .../internal/service/ClusterRegionService.java  |   11 -
 .../internal/service/ClusterRegionsService.java |   11 -
 .../service/ClusterSelectedRegionService.java   |    6 -
 .../scripts/pulsescript/PulseCallbacks.js       |    2 -
 .../webapp/scripts/pulsescript/clusterDetail.js |    7 +-
 .../controllers/PulseControllerJUnitTest.java   |    3 -
 .../gemfire/tools/pulse/tests/Region.java       |    9 +-
 geode-pulse/src/test/resources/test.properties  |    6 +-
 geode-rebalancer/build.gradle                   |    7 -
 .../cache/wan/GatewaySenderFactoryImpl.java     |    4 -
 .../internal/cache/UpdateVersionDUnitTest.java  |    6 +-
 255 files changed, 542 insertions(+), 25074 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/build.gradle
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e8294d7/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
----------------------------------------------------------------------


[08/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java
deleted file mode 100644
index 23dd840..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-
-/**
- * An iterator that iterates over a split in a read/write hoplog
- */
-public class RWSplitIterator extends HDFSSplitIterator {
-
-  public RWSplitIterator(FileSystem fs, Path[] path, long[] start, long[] len, long startTime, long endTime) throws IOException {
-    super(fs, path, start, len, startTime, endTime);
-  }
-
-  @Override
-  protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
-    SchemaMetrics.configureGlobally(fs.getConf());
-    return HFileSortedOplog.getHoplogForLoner(fs, path); 
-  }
-
-  public PersistedEventImpl getDeserializedValue() throws ClassNotFoundException, IOException {
-    return SortedHDFSQueuePersistedEvent.fromBytes(iterator.getValue());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java
deleted file mode 100644
index bfb2deb..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHDFSQueuePersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog;
-
-/**
- * An iterator that iterates over a split in a sequential hoplog.
- */
-public class StreamSplitIterator extends HDFSSplitIterator {
-
-  public StreamSplitIterator(FileSystem fs, Path[] path, long[] start, long[] len, long startTime, long endTime) throws IOException {
-    super(fs, path, start, len, startTime, endTime);
-  }
-
-  public PersistedEventImpl getDeserializedValue() throws ClassNotFoundException, IOException {
-    return UnsortedHDFSQueuePersistedEvent.fromBytes(iterator.getValue());
-  }
-
-  @Override
-  protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
-    return new SequenceFileHoplog(fs, path, null);
-  }
-}


[19/63] [abbrv] incubator-geode git commit: GEODE-1258: Added tests for WAN authentication.

Posted by kl...@apache.org.
GEODE-1258: Added tests for WAN authentication.

* Created tests for checking WAN authentication using valid and invalid credentials.

This closes #131


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/6b4cdb1c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/6b4cdb1c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/6b4cdb1c

Branch: refs/heads/feature/GEODE-1276
Commit: 6b4cdb1c46cd5c72a074fb908c9ed791e7219677
Parents: 46535f2
Author: nabarun <nn...@pivotal.io>
Authored: Tue Apr 19 14:15:10 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 15:57:50 2016 -0700

----------------------------------------------------------------------
 .../gemfire/internal/cache/wan/WANTestBase.java |  17 +
 .../wan/misc/NewWanAuthenticationDUnitTest.java | 309 +++++++++++++++++++
 2 files changed, 326 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6b4cdb1c/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
index 39154b2..09ec3e1 100644
--- a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
@@ -356,6 +356,23 @@ public class WANTestBase extends DistributedTestCase{
     return port;
   }
 
+  public static int createReceiverInSecuredCache() {
+    GatewayReceiverFactory fact = WANTestBase.cache.createGatewayReceiverFactory();
+    int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
+    fact.setStartPort(port);
+    fact.setEndPort(port);
+    fact.setManualStart(true);
+    GatewayReceiver receiver = fact.create();
+    try {
+      receiver.start();
+    }
+    catch (IOException e) {
+      e.printStackTrace();
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed to start GatewayRecevier on port " + port, e);
+    }
+    return port;
+  }
+
   public static void createReplicatedRegion(String regionName, String senderIds, Boolean offHeap){
     IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6b4cdb1c/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/NewWanAuthenticationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/NewWanAuthenticationDUnitTest.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/NewWanAuthenticationDUnitTest.java
new file mode 100644
index 0000000..f0303b9
--- /dev/null
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/NewWanAuthenticationDUnitTest.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.wan.misc;
+
+import java.util.Properties;
+
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.security.AuthInitialize;
+import com.gemstone.gemfire.security.AuthenticationFailedException;
+import com.gemstone.gemfire.security.SecurityTestUtils;
+import com.gemstone.gemfire.security.generator.CredentialGenerator;
+
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.Assert;
+import com.gemstone.gemfire.internal.cache.wan.WANTestBase;
+
+import com.gemstone.gemfire.security.generator.DummyCredentialGenerator;
+import com.gemstone.gemfire.security.templates.UserPasswordAuthInit;
+
+import org.apache.logging.log4j.Logger;
+
+public class NewWanAuthenticationDUnitTest extends WANTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public static final Logger logger = LogService.getLogger();
+
+  public NewWanAuthenticationDUnitTest(String name) {
+    super(name);
+  }
+
+  /**
+   * Authentication test for new WAN with valid credentials. Although, nothing
+   * related to authentication has been changed in new WAN, this test case is
+   * added on request from QA for defect 44650.
+   */
+  public void testWanAuthValidCredentials() {
+    Integer lnPort = (Integer)vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId( 1 ));
+    logger.info("Created locator on local site");
+
+    Integer nyPort = (Integer)vm1.invoke(() -> WANTestBase.createFirstRemoteLocator( 2, lnPort ));
+    logger.info("Created locator on remote site");
+
+
+    CredentialGenerator gen = new DummyCredentialGenerator();
+    Properties extraProps = gen.getSystemProperties();
+
+    String clientauthenticator = gen.getAuthenticator();
+    String clientauthInit = gen.getAuthInit();
+
+    Properties credentials1 = gen.getValidCredentials(1);
+    if (extraProps != null) {
+      credentials1.putAll(extraProps);
+    }
+    Properties javaProps1 = gen.getJavaProperties();
+
+    Properties credentials2 = gen.getValidCredentials(2);
+    if (extraProps != null) {
+      credentials2.putAll(extraProps);
+    }
+    Properties javaProps2 = gen.getJavaProperties();
+
+    Properties props1 = buildProperties(clientauthenticator, clientauthInit,
+      null, credentials1, null);
+    Properties props2 = buildProperties(clientauthenticator, clientauthInit,
+      null, credentials2, null);
+
+    vm2.invoke(() -> NewWanAuthenticationDUnitTest.createSecuredCache(
+      props1, javaProps1, lnPort ));
+    logger.info("Created secured cache in vm2");
+
+    vm3.invoke(() -> NewWanAuthenticationDUnitTest.createSecuredCache(
+      props2, javaProps2, nyPort ));
+    logger.info("Created secured cache in vm3");
+
+    vm2.invoke(() -> WANTestBase.createSender( "ln", 2,
+      false, 100, 10, false, false, null, true ));
+    logger.info("Created sender in vm2");
+
+    vm3.invoke(() -> createReceiverInSecuredCache());
+    logger.info("Created receiver in vm3");
+
+    vm2.invoke(() -> WANTestBase.createReplicatedRegion(
+      getTestMethodName() + "_RR", "ln", isOffHeap()  ));
+    logger.info("Created RR in vm2");
+    vm3.invoke(() -> WANTestBase.createReplicatedRegion(
+      getTestMethodName() + "_RR", null, isOffHeap()  ));
+    logger.info("Created RR in vm3");
+
+    vm2.invoke(() -> WANTestBase.startSender( "ln" ));
+    vm2.invoke(() -> WANTestBase.waitForSenderRunningState( "ln" ));
+    logger.info("Done successfully.");
+
+  }
+
+  /**
+   * Test authentication with new WAN with invalid credentials. Although,
+   * nothing related to authentication has been changed in new WAN, this test
+   * case is added on request from QA for defect 44650.
+   */
+  public void testWanAuthInvalidCredentials() {
+    Integer lnPort = (Integer)vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId( 1 ));
+    logger.info("Created locator on local site");
+
+    Integer nyPort = (Integer)vm1.invoke(() -> WANTestBase.createFirstRemoteLocator( 2, lnPort ));
+    logger.info("Created locator on remote site");
+
+
+    CredentialGenerator gen = new DummyCredentialGenerator();
+    logger.info("Picked up credential: " + gen);
+
+    Properties extraProps = gen.getSystemProperties();
+
+    String clientauthenticator = gen.getAuthenticator();
+    String clientauthInit = gen.getAuthInit();
+
+    Properties credentials1 = gen.getInvalidCredentials(1);
+    if (extraProps != null) {
+      credentials1.putAll(extraProps);
+    }
+    Properties javaProps1 = gen.getJavaProperties();
+    Properties credentials2 = gen.getInvalidCredentials(2);
+    if (extraProps != null) {
+      credentials2.putAll(extraProps);
+    }
+    Properties javaProps2 = gen.getJavaProperties();
+
+    Properties props1 = buildProperties(clientauthenticator, clientauthInit,
+      null, credentials1, null);
+    Properties props2 = buildProperties(clientauthenticator, clientauthInit,
+      null, credentials2, null);
+
+    logger.info("Done building auth properties");
+
+    vm2.invoke(() -> NewWanAuthenticationDUnitTest.createSecuredCache(
+      props1, javaProps1, lnPort ));
+    logger.info("Created secured cache in vm2");
+
+    vm3.invoke(() -> NewWanAuthenticationDUnitTest.createSecuredCache(
+      props2, javaProps2, nyPort ));
+    logger.info("Created secured cache in vm3");
+
+    vm2.invoke(() -> WANTestBase.createSender( "ln", 2,
+      false, 100, 10, false, false, null, true ));
+    logger.info("Created sender in vm2");
+
+    vm3.invoke(() -> createReceiverInSecuredCache());
+    logger.info("Created receiver in vm3");
+
+    vm2.invoke(() -> WANTestBase.createReplicatedRegion(
+      getTestMethodName() + "_RR", "ln", isOffHeap()  ));
+    logger.info("Created RR in vm2");
+    vm3.invoke(() -> WANTestBase.createReplicatedRegion(
+      getTestMethodName() + "_RR", null, isOffHeap()  ));
+    logger.info("Created RR in vm3");
+
+    try {
+      vm2.invoke(() -> WANTestBase.startSender( "ln" ));
+      fail("Authentication Failed: While starting the sender, an exception should have been thrown");
+    } catch (Exception e) {
+      if (!(e.getCause().getCause() instanceof AuthenticationFailedException)) {
+        fail("Authentication is not working as expected");
+      }
+    }
+  }
+
+  private static Properties buildProperties(String clientauthenticator,
+                                            String clientAuthInit, String accessor, Properties extraAuthProps,
+                                            Properties extraAuthzProps) {
+
+    Properties authProps = new Properties();
+    if (clientauthenticator != null) {
+      authProps.setProperty(
+        DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME,
+        clientauthenticator);
+    }
+    if (accessor != null) {
+      authProps.setProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME,
+        accessor);
+    }
+    if (clientAuthInit != null) {
+      authProps.setProperty(DistributionConfig.SECURITY_CLIENT_AUTH_INIT_NAME,
+        clientAuthInit);
+    }
+    if (extraAuthProps != null) {
+      authProps.putAll(extraAuthProps);
+    }
+    if (extraAuthzProps != null) {
+      authProps.putAll(extraAuthzProps);
+    }
+    return authProps;
+  }
+
+  public static void createSecuredCache(Properties authProps, Object javaProps, Integer locPort) {
+    authProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    authProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
+
+    logger.info("Set the server properties to: " + authProps);
+    logger.info("Set the java properties to: " + javaProps);
+
+    SecurityTestUtils tmpInstance = new SecurityTestUtils("temp");
+    DistributedSystem ds = tmpInstance.createSystem(authProps, (Properties)javaProps);
+    assertNotNull(ds);
+    assertTrue(ds.isConnected());
+    cache = CacheFactory.create(ds);
+    assertNotNull(cache);
+  }
+
+  public static boolean isDifferentServerInGetCredentialCall = false;
+  public static class UserPasswdAI extends UserPasswordAuthInit {
+    public static AuthInitialize createAI() {
+      return new UserPasswdAI();
+    }
+    @Override
+    public Properties getCredentials(Properties props,
+                                     DistributedMember server, boolean isPeer)
+      throws AuthenticationFailedException {
+      boolean val = ( CacheFactory.getAnyInstance().getDistributedSystem().getDistributedMember().getProcessId() != server.getProcessId());
+      Assert.assertTrue(val, "getCredentials: Server should be different");
+      Properties p = super.getCredentials(props, server, isPeer);
+      if(val) {
+        isDifferentServerInGetCredentialCall = true;
+        CacheFactory.getAnyInstance().getLoggerI18n().convertToLogWriter().config("setting  isDifferentServerInGetCredentialCall " + isDifferentServerInGetCredentialCall);
+      } else {
+        CacheFactory.getAnyInstance().getLoggerI18n().convertToLogWriter().config("setting22  isDifferentServerInGetCredentialCall " + isDifferentServerInGetCredentialCall);
+      }
+      return p;
+    }
+  }
+
+  public static void verifyDifferentServerInGetCredentialCall(){
+    Assert.assertTrue(isDifferentServerInGetCredentialCall, "verifyDifferentServerInGetCredentialCall: Server should be different");
+    isDifferentServerInGetCredentialCall = false;
+  }
+
+  public void testWanAuthValidCredentialsWithServer() {
+    disconnectAllFromDS();
+    {
+      Integer lnPort = (Integer)vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId( 1 ));
+      logger.info("Created locator on local site");
+
+      Integer nyPort = (Integer)vm1.invoke(() -> WANTestBase.createFirstRemoteLocator( 2, lnPort ));
+      logger.info("Created locator on remote site");
+
+      DummyCredentialGenerator gen = new DummyCredentialGenerator();
+      gen.init();
+      Properties extraProps = gen.getSystemProperties();
+
+      String clientauthenticator = gen.getAuthenticator();
+      String clientauthInit = UserPasswdAI.class.getName() + ".createAI";
+
+      Properties credentials1 = gen.getValidCredentials(1);
+      if (extraProps != null) {
+        credentials1.putAll(extraProps);
+      }
+      Properties javaProps1 = gen.getJavaProperties();
+
+      Properties credentials2 = gen.getValidCredentials(2);
+      if (extraProps != null) {
+        credentials2.putAll(extraProps);
+      }
+      Properties javaProps2 = gen.getJavaProperties();
+
+      Properties props1 = buildProperties(clientauthenticator, clientauthInit,
+        null, credentials1, null);
+      Properties props2 = buildProperties(clientauthenticator, clientauthInit,
+        null, credentials2, null);
+
+      vm2.invoke(() -> NewWanAuthenticationDUnitTest.createSecuredCache(
+        props1, javaProps1, lnPort ));
+      logger.info("Created secured cache in vm2");
+
+      vm3.invoke(() -> NewWanAuthenticationDUnitTest.createSecuredCache(
+        props2, javaProps2, nyPort ));
+      logger.info("Created secured cache in vm3");
+
+      vm2.invoke(() -> WANTestBase.createSender( "ln", 2,
+        false, 100, 10, false, false, null, true ));
+      logger.info("Created sender in vm2");
+
+      vm3.invoke(() -> createReceiverInSecuredCache());
+      logger.info("Created receiver in vm3");
+
+      vm2.invoke(() -> WANTestBase.startSender( "ln" ));
+      vm2.invoke(() -> WANTestBase.waitForSenderRunningState( "ln" ));
+
+      vm2.invoke(() -> verifyDifferentServerInGetCredentialCall());
+      vm3.invoke(() -> verifyDifferentServerInGetCredentialCall());
+
+    }
+  }
+}


[22/63] [abbrv] incubator-geode git commit: GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
index 6782dee..ecfbfae 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
@@ -16,14 +16,12 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfoliosAndPositions;
+
 import java.io.File;
-import java.util.Collection;
 
-import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
-import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook;
 import com.gemstone.gemfire.cache.query.internal.index.IndexUtils;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
@@ -31,7 +29,6 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
@@ -52,12 +49,19 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
   public PRBasicIndexCreationDeadlockDUnitTest(String name) {
     super(name);
   }
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
+  public void setCacheInVMsUsingXML(String xmlFile, VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> GemFireCacheImpl.testCacheXml = PRQHelp.findFile(xmlFile));
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
 
-  // int totalNumBuckets = 131;
-
-  int queryTestCycle = 10;
-
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "PartionedPortfolios";
 
@@ -77,11 +81,11 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    
-    Class valueConstraint = Portfolio.class;
+    setCacheInVMs(vm0,vm1);
     final String fileName1 = "PRPersistentIndexCreation_1.xml";
     final String fileName2 = "PRPersistentIndexCreation_2.xml";
-    
+    setCacheInVMsUsingXML(fileName1, vm0);
+    setCacheInVMsUsingXML(fileName1, vm1);
     final File dir1 = new File("overflowData1");
     final File dir2 = new File("overflowData2");
 
@@ -97,11 +101,11 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
           success = (dir2).mkdir();
         }
       });
-   
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name, fileName1));
-      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name, fileName2));
+
+      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name));
+      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name));
   
-      final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(100);
+      final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(100);
   
       // Putting the data into the PR's created
       vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -130,7 +134,7 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
         public void run2() throws CacheException {
           GemFireCacheImpl.testCacheXml = PRQHelp.findFile(fileName1);
           IndexUtils.testHook = new IndexUtilTestHook();
-          PRQHelp.getCache();
+          getCache();
         }
       });
   
@@ -161,7 +165,7 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
         @Override
         public void run2() throws CacheException {
           GemFireCacheImpl.testCacheXml = PRQHelp.findFile(fileName2);
-          PRQHelp.getCache();
+          getCache();
         }
       });
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
index d020ef6..7b93734 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.*;
+
 import java.util.ArrayList;
 import java.util.Collection;
 
@@ -46,12 +48,12 @@ public class PRBasicMultiIndexCreationDUnitTest extends
   public PRBasicMultiIndexCreationDUnitTest(String name) {
     super(name);
   }
-
-  // int totalNumBuckets = 131;
-
-  int queryTestCycle = 10;
-
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "PartionedPortfolios";
 
@@ -73,12 +75,12 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate started ....");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     // Creating local region on vm0 to compare the results of query.
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
     // Scope.DISTRIBUTED_ACK, redundancy));
@@ -87,13 +89,13 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     LogWriterUtils.getLogWriter()
         .info("PRBasicIndexCreationDUnitTest : creating all the prs ");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -134,21 +136,21 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     vm1.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -221,20 +223,20 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
+    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
     // fileName));
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedRegionThroughXMLAndAPI started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     ArrayList<String> names = new ArrayList<String>();
     names.add("PrIndexOnStatus");
@@ -250,9 +252,9 @@ public class PRBasicMultiIndexCreationDUnitTest extends
 
 //  adding a new node to an already existing system.
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -273,22 +275,21 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm3);
     // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
+    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
     // fileName));
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasAfterPuts started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     ArrayList<String> names = new ArrayList<String>();
     names.add("PrIndexOnStatus");
@@ -302,16 +303,8 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps));
 
-    //vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    //    "PrIndexOnId", "p.ID", "p"));
-
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    //    "PrIndexOnPKID", "p.pkid", "p"));
-//  adding a new node to an already existing system.
-    //vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-    //    Scope.DISTRIBUTED_ACK, redundancy));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -330,33 +323,21 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasBeforePuts started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-
-    // vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    // "PrIndexOnId", "p.ID", "p"));
+        redundancy, PortfolioData.class));
 
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    // "PrIndexOnPKID", "p.pkid", "p"));
-    // adding a new node to an already existing system.
-    // vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-    // Scope.DISTRIBUTED_ACK, redundancy));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -373,42 +354,8 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps));
 
-    /*
-    vm1.invoke(new CacheSerializableRunnable("IndexCreationOnPosition") {
-      public void run2(){
-        try {
-          Cache cache = getCache();
-          QueryService qs = cache.getQueryService();
-          Region region = cache.getRegion(name);
-          LogWriter logger = cache.getLogger();
-         // logger.info("Test Creating index with Name : [ "+indexName+" ] " +
-         //               "IndexedExpression : [ "+indexedExpression+" ] Alias : [ "+alias+" ] FromClause : [ "+region.getFullPath() + " " + alias+" ] " );
-          Index parIndex = qs.createIndex("IndexOnPotionMktValue", IndexType.FUNCTIONAL, "pVal.mktValue"
-              ,region.getFullPath()+" pf, pf.positions pVal TYPE Position", "import parReg.\"query\".Position;");
-          logger.info(
-              "Index creted on partitioned region : " + parIndex);
-          logger.info(
-              "Number of buckets indexed in the partitioned region locally : "
-                  + "" + ((PartitionedIndex)parIndex).getNumberOfIndexedBucket()
-                  + " and remote buckets indexed : "
-                  + ((PartitionedIndex)parIndex).getNumRemoteBucketsIndexed());
-          /*
-           * assertEquals("Max num of buckets in the partiotion regions and
-           * the " + "buckets indexed should be equal",
-           * ((PartitionedRegion)region).getTotalNumberOfBuckets(),
-           * (((PartionedIndex)parIndex).getNumberOfIndexedBucket()+((PartionedIndex)parIndex).getNumRemtoeBucketsIndexed()));
-           * should put all the assetion in a seperate function.
-           */ 
-       /* } 
-        catch (Exception ex) {
-          fail("Creating Index in this vm failed : ", ex);
-        }
-      
-      }
-    });*/
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    // vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
   } 
   
@@ -422,21 +369,18 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     ArrayList<String> names = new ArrayList<String>();
     names.add("PrIndexOnID");
@@ -446,20 +390,20 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
     // validation on index usage with queries over a pr
-    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
     LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery done ");
@@ -474,32 +418,27 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
+    setCacheInVMs(vm0,vm1);
 
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
-    
     int redundancy = 1;
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
-//    vm2.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-//        redundancy, PRQHelp.valueConstraint));
-    
-    
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+        redundancy, PortfolioData.class));
+
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     
     
     //Restart a single member
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    setCacheInVMs(vm0);
     AsyncInvocation regionCreateFuture = vm0.invokeAsync(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
+        redundancy, PortfolioData.class));
     
     //Ok, I want to do this in parallel
     ArrayList<String> names = new ArrayList<String>();
@@ -515,7 +454,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     indexCreateFuture.getResult(20 * 1000);
     
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
@@ -523,7 +462,6 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // validation on index usage with queries over a pr
     //The indexes may not have been completely created yet, because the buckets
     //may still be recovering from disk.
-//    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery done ");
@@ -541,18 +479,18 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     ArrayList<String> names = new ArrayList<String>();
     names.add("PrIndexOnID");
@@ -562,11 +500,11 @@ public class PRBasicMultiIndexCreationDUnitTest extends
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
@@ -586,9 +524,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
-//    VM vm3 = host.getVM(3);
-    // closeAllCache();
+    setCacheInVMs(vm0,vm1);
     final String fileName = "PRIndexCreation.xml";
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
@@ -596,35 +532,12 @@ public class PRBasicMultiIndexCreationDUnitTest extends
         "Starting and initializing partitioned regions and indexes using xml");
     LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
-   // AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
-   //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-   // AsyncInvocation asyInvk1 = vm1.invokeAsync(PRQHelp
-   //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-   // asyInvk1.join();
-   // if (asyInvk1.exceptionOccurred()) {
-   //   fail("asyInvk1 failed", asyInvk1.getException());
-   // }
-   // asyInvk0.join();
-   // if (asyInvk0.exceptionOccurred()) {
-    //  fail("asyInvk0 failed", asyInvk0.getException());
-   // }
-    // printing all the indexes are created.
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    //vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    /*
-    <index name="index8">
-    <functional from-clause="/PartionedPortfolios.keys k" expression="k" />
-  </index> */
-  //  vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-    
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-//    vm0.invoke(PRQHelp
-//        .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+        redundancy, PortfolioData.class));
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -643,12 +556,9 @@ public class PRBasicMultiIndexCreationDUnitTest extends
 
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps, fromClause));
     
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(localName,
-    //    "index8","k", "/LocalPortfolios.keys k" , ""));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    
-    
+
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML is done  " );
     
@@ -664,18 +574,19 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     // create more vms to host data.
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     //  Putting the data into the PR's created
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
      cnt, cntDest));
@@ -706,14 +617,15 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     //  Putting the data into the PR's created
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
      cnt, cntDest));
@@ -728,9 +640,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     
     // create an accessor vm.
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
-    
-    
+        redundancy, PortfolioData.class));
   }
 
   /**
@@ -760,7 +670,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -790,7 +700,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -861,7 +771,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
    .info(
@@ -891,7 +801,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -937,7 +847,6 @@ public class PRBasicMultiIndexCreationDUnitTest extends
 
 public void testIndexQueryingWithOrderByLimit() throws Exception
  {
-  int dataSize = 10;
   int step = 2;
   int totalDataSize = 90;
   final int i = 0;
@@ -952,7 +861,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -982,7 +891,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -1060,6 +969,4 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
-
-  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
index 44cec38..bbe3c90 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.*;
+
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.query.Index;
@@ -49,11 +51,13 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     super(name);
   }
 
-  int totalNumBuckets = 100;
-
-  int queryTestCycle = 10;
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
 
@@ -75,7 +79,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0); 
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0, vm1);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -87,11 +91,9 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Accessor node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     // Creating local region on vm0 to compare the results of query.
-//    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
-//        Scope.DISTRIBUTED_ACK, redundancy));
-    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Accessor node in the PR");
@@ -101,7 +103,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQBasicQueryDUnitTest:testPRBasicQuerying ----- Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     LogWriterUtils.getLogWriter()
         .info(
@@ -114,7 +116,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -146,7 +148,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm0 = host.getVM(0); 
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
-
+    setCacheInVMs(vm0, vm1, vm2);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Querying PR Test with DACK Started");
@@ -160,8 +162,6 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         redundancy, Portfolio.class));
     // Creating local region on vm0 to compare the results of query.
-//    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
-//        Scope.DISTRIBUTED_ACK, redundancy));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, Portfolio.class));
     LogWriterUtils.getLogWriter()
         .info(
@@ -189,7 +189,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest+100);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest+100);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest+100));
@@ -225,12 +225,12 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0, vm1);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm1.invoke(PRQHelp
         .getCacheSerializableRunnableForPRCreate(name, redundancy, Portfolio.class));
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
index 33ca1ac..bdbaeb3 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
@@ -35,8 +37,14 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
   public PRBasicRemoveIndexDUnitTest (String name) {
     super(name);
   }
+
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
   
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
   
   /**
    * Name of the partitioned region for the test.
@@ -67,19 +75,19 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicRemoveIndexDUnitTest.testPRBasicIndexCreate test now starts ....");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(start, end);
+    final PortfolioData[] portfolio = createPortfolioData(start, end);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         start, end));
@@ -110,18 +118,18 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicRemoveIndexDUnitTest.testPRBasicIndexCreate test now starts ....");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(start, end);
+        redundancy, PortfolioData.class));
+    final PortfolioData[] portfolio = createPortfolioData(start, end);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         start, end));
@@ -134,8 +142,5 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
     
 //  remove indexes
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForRemoveIndex(name, true));
-    
-    
   }
-  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
index 25c2393..f86b94e 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
@@ -19,6 +19,8 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.*;
+
 import java.util.ArrayList;
 
 import parReg.query.unittest.NewPortfolio;
@@ -68,7 +70,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
   int queryTestCycle = 10;
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios1";
 
@@ -88,7 +90,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
   public PRColocatedEquiJoinDUnitTest(String name) {
     super(name);
   }
-
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
   /**
    * A very basic dunit test that <br>
    * 1. Creates two PR Data Stores with redundantCopies = 1.
@@ -100,7 +106,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
   {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -146,8 +152,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -179,7 +185,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     IgnoredException.addIgnoredException("UnsupportedOperationException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -259,8 +265,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -383,7 +389,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -433,8 +439,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -472,7 +478,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -520,8 +526,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -559,7 +565,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
   {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -604,8 +610,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -643,7 +649,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -693,8 +699,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -732,7 +738,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -779,8 +785,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -818,7 +824,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
   {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -863,8 +869,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -902,7 +908,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -950,8 +956,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -989,7 +995,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1036,8 +1042,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -1076,7 +1082,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0, vm1);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1124,8 +1130,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -1233,7 +1239,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1283,8 +1289,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -1316,7 +1322,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1366,8 +1372,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -1398,7 +1404,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1448,8 +1454,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final Portfolio[] newPortfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final Portfolio[] newPortfolio = createPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -1480,7 +1486,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1533,8 +1539,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio,
         cnt, cntDest));
@@ -1577,7 +1583,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0, vm1);
     LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
@@ -1611,8 +1617,8 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
-    final NewPortfolio[] newPortfolio = PRQHelp.createNewPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
+    final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
     
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
index aa7f1fe..4fa2b5a 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
@@ -22,6 +22,8 @@ package com.gemstone.gemfire.cache.query.partitioned;
  * that they generate various Exceptions
  */
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
@@ -42,9 +44,15 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
     super(name);
   }
 
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
+
   int totalNumBuckets = 100;
 
-  PRQueryDUnitHelper prq = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper prq = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
 
@@ -73,7 +81,7 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
@@ -81,7 +89,7 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Creating the Accessor node in the PR");
     vm0.invoke(prq.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully created the Accessor node in the PR");
@@ -91,11 +99,11 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Creating the Datastore node in the PR");
     vm1.invoke(prq.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(prq.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(prq.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Created the Datastore node in the PR");
@@ -107,7 +115,7 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = prq.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the PR's created
     vm0.invoke(prq.getCacheSerializableRunnableForPRPuts(name, portfolio, i, i
@@ -124,8 +132,8 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
     final String invalidQuery = "Invalid Query";
     // querying the VM for data
-    vm0.invoke(prq.getCacheSerializableRunnableForPRInvalidQuery(name,
-        invalidQuery));
+    vm0.invoke(prq.getCacheSerializableRunnableForPRInvalidQuery(name
+    ));
     LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: *****Querying PR's Test with Expected Invalid Query Exception *****");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
index af53e11..97021f5 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
@@ -14,15 +14,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package com.gemstone.gemfire.cache.query.partitioned;
 
+/**
+ * This test tests the PR query behaviour with respect to cache closure
+ * happening on one of the data stores. PR is configured with redundantCopies =
+ * 1, and cache close is done randomly on one of the data stores and then
+ * recreated, thus avoiding any data loss.
+ * 
+ */
+
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Properties;
 import java.util.Random;
 
-import org.junit.experimental.categories.Category;
-
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
@@ -33,31 +42,32 @@ import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
-import com.gemstone.gemfire.test.junit.categories.FlakyTest;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
 
-/**
- * This test tests the PR query behaviour with respect to cache closure
- * happening on one of the data stores. PR is configured with redundantCopies =
- * 1, and cache close is done randomly on one of the data stores and then
- * recreated, thus avoiding any data loss.
- */
-public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
+public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
+{
+
+  /**
+   * constructor *
+   * 
+   * @param name
+   */
 
   public PRQueryCacheCloseDUnitTest(String name) {
+
     super(name);
   }
-
-  static Properties props = new Properties();
-
-  int totalNumBuckets = 100;
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
 
   int threadSleepTime = 500;
 
-  int querySleepTime = 2000;
-
   int queryTestCycle = 10;
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
 
@@ -90,7 +100,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
     VM accessor = host.getVM(0);
     VM datastore1 = host.getVM(1);
     VM datastore2 = host.getVM(2);
-
+    setCacheInVMs(accessor, datastore1, datastore2);
     List vmList = new LinkedList();
     vmList.add(datastore1);
     vmList.add(datastore2);
@@ -101,7 +111,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Creating Accessor node on VM0");
     accessor.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Created Accessor node on VM0");
@@ -110,9 +120,9 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Creating PR's across all VM1 , VM2");
     datastore1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     datastore2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Created PR on VM1 , VM2");
@@ -122,7 +132,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Creating Local Region on VM0");
     accessor.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Created Local Region on VM0");
@@ -130,7 +140,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
@@ -167,8 +177,12 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Calling for cache close on either of the Datastores VM1 , VM2 at random and then recreating the cache, with a predefined Delay ");
     for (int j = 0; j < queryTestCycle; j++) {
       int k = (random.nextInt(vmList.size()));
+      LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Closing cache");
+      ((VM)vmList.get(k)).invoke(() -> closeCache());
+      LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Cache Closed");
+      setCacheInVMs(((VM)vmList.get(k)));
       ((VM)(vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForCacheClose(
-          name, redundancy));
+          name, redundancy, PortfolioData.class));
       Wait.pause(threadSleepTime);
     }
     ThreadUtils.join(async0, 5 * 60 * 1000);
@@ -208,7 +222,6 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
    * 6. then recreates the PR on the same VM <br>
    * 7. Verfies the size , type , contents of both the resultSets Obtained <br>
    */
-  @Category(FlakyTest.class) // GEODE-1239: uses PRQueryDUnitHelper, uses Random, async actions, time sensitive, complex retry loop, thread unsafe test hook
   public void testPRWithCacheCloseInOneDatastoreWithoutDelay() throws Exception
   {
     LogWriterUtils.getLogWriter()
@@ -218,7 +231,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
-
+    setCacheInVMs(vm0, vm1, vm2);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm2);
@@ -228,7 +241,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Creating Accessor node on VM0");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created Accessor node on VM0");
@@ -237,9 +250,9 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Creating PR's across all VM1 , VM2");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created PR on VM1 , VM2");
@@ -249,7 +262,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created Local Region on VM0");
@@ -263,7 +276,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
         .info(
@@ -300,8 +313,12 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase {
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Calling for cache close on either of the Datastores VM1 , VM2 at random and then recreating the cache, with no delay ");
     for (int j = 0; j < queryTestCycle; j++) {
       int k = (random.nextInt(vmList.size()));
+      LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Closing cache");
+      ((VM)vmList.get(k)).invoke(() -> closeCache());
+      LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Cache Closed");
+      setCacheInVMs(((VM)vmList.get(k)));
       ((VM)(vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForCacheClose(
-          name, redundancy));
+          name, redundancy, PortfolioData.class));
     }
 
     ThreadUtils.join(async0, 5 * 60 * 1000);


[12/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java
deleted file mode 100644
index cdf7452..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.execute.Function;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.FunctionException;
-import com.gemstone.gemfire.cache.execute.FunctionService;
-import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
-import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
-import com.gemstone.gemfire.internal.cache.execute.LocalResultCollector;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor;
-import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-
-public class HDFSFlushQueueFunction implements Function, InternalEntity{
-  private static final int MAX_RETRIES = Integer.getInteger("gemfireXD.maxFlushQueueRetries", 3);
-  private static final boolean VERBOSE = Boolean.getBoolean("hdfsFlushQueueFunction.VERBOSE");
-  private static final Logger logger = LogService.getLogger();
-  private static final String ID = HDFSFlushQueueFunction.class.getName();
-  
-  public static void flushQueue(PartitionedRegion pr, int maxWaitTime) {
-    
-    Set<Integer> buckets = new HashSet<Integer>(pr.getRegionAdvisor().getBucketSet());
-
-    maxWaitTime *= 1000;
-    long start = System.currentTimeMillis();
-    
-    int retries = 0;
-    long remaining = 0;
-    while (retries++ < MAX_RETRIES && (remaining = waitTime(start, maxWaitTime)) > 0) {
-      if (logger.isDebugEnabled() || VERBOSE) {
-        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flushing buckets " + buckets 
-            + ", attempt = " + retries 
-            + ", remaining = " + remaining));
-      }
-      
-      HDFSFlushQueueArgs args = new HDFSFlushQueueArgs(buckets, remaining);
-      
-      HDFSFlushQueueResultCollector rc = new HDFSFlushQueueResultCollector(buckets);
-      AbstractExecution exec = (AbstractExecution) FunctionService
-          .onRegion(pr)
-          .withArgs(args)
-          .withCollector(rc);
-      exec.setWaitOnExceptionFlag(true);
-      
-      try {
-        exec.execute(ID);
-        if (rc.getResult()) {
-          if (logger.isDebugEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flushed all buckets successfully")); 
-          }
-          return;
-        }
-      } catch (FunctionException e) {
-        if (logger.isDebugEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Encountered error flushing queue"), e); 
-        }
-      }
-      
-      buckets.removeAll(rc.getSuccessfulBuckets());
-      for (int bucketId : buckets) {
-        remaining = waitTime(start, maxWaitTime);
-        if (logger.isDebugEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Waiting for bucket " + bucketId)); 
-        }
-        pr.getNodeForBucketWrite(bucketId, new PartitionedRegion.RetryTimeKeeper((int) remaining));
-      }
-    }
-    
-    pr.checkReadiness();
-    throw new FunctionException("Unable to flush the following buckets: " + buckets);
-  }
-  
-  private static long waitTime(long start, long max) {
-    if (max == 0) {
-      return Integer.MAX_VALUE;
-    }
-    return start + max - System.currentTimeMillis();
-  }
-  
-  @Override
-  public void execute(FunctionContext context) {
-    RegionFunctionContext rfc = (RegionFunctionContext) context;
-    PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
-    
-    HDFSFlushQueueArgs args = (HDFSFlushQueueArgs) rfc.getArguments();
-    Set<Integer> buckets = new HashSet<Integer>(args.getBuckets());
-    buckets.retainAll(pr.getDataStore().getAllLocalPrimaryBucketIds());
-
-    Map<Integer, AsyncFlushResult> flushes = new HashMap<Integer, AsyncFlushResult>();
-    for (int bucketId : buckets) {
-      try {
-        HDFSBucketRegionQueue brq = getQueue(pr, bucketId);
-        if (brq != null) {
-          if (logger.isDebugEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flushing bucket " + bucketId)); 
-          }
-          flushes.put(bucketId, brq.flush());
-        }
-      } catch (ForceReattemptException e) {
-        if (logger.isDebugEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Encountered error flushing bucket " + bucketId), e); 
-        }
-      }
-    }
-    
-    try {
-      long start = System.currentTimeMillis();
-      for (Map.Entry<Integer, AsyncFlushResult> flush : flushes.entrySet()) {
-        long remaining = waitTime(start, args.getMaxWaitTime());
-        if (logger.isDebugEnabled() || VERBOSE) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Waiting for bucket " + flush.getKey() 
-              + " to complete flushing, remaining = " + remaining)); 
-        }
-        
-        if (flush.getValue().waitForFlush(remaining, TimeUnit.MILLISECONDS)) {
-          if (logger.isDebugEnabled() || VERBOSE) {
-            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Bucket " + flush.getKey() + " flushed successfully")); 
-          }
-          rfc.getResultSender().sendResult(new FlushStatus(flush.getKey()));
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-    }
-    
-    if (logger.isDebugEnabled() || VERBOSE) {
-      logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Sending final flush result")); 
-    }
-    rfc.getResultSender().lastResult(FlushStatus.last());
-  }
-
-  private HDFSBucketRegionQueue getQueue(PartitionedRegion pr, int bucketId) 
-      throws ForceReattemptException {
-    AsyncEventQueueImpl aeq = pr.getHDFSEventQueue();
-    AbstractGatewaySender gw = (AbstractGatewaySender) aeq.getSender();
-    AbstractGatewaySenderEventProcessor ep = gw.getEventProcessor();
-    if (ep == null) {
-      return null;
-    }
-    
-    ConcurrentParallelGatewaySenderQueue queue = (ConcurrentParallelGatewaySenderQueue) ep.getQueue();
-    return queue.getBucketRegionQueue(pr, bucketId);
-  }
-  
-  @Override
-  public String getId() {
-    return ID;
-  }
-
-  @Override
-  public boolean hasResult() {
-    return true;
-  }
-
-  @Override
-  public boolean optimizeForWrite() {
-    return true;
-  }
-
-  @Override
-  public boolean isHA() {
-    return false;
-  }
-  
-  public static class HDFSFlushQueueResultCollector implements LocalResultCollector<Object, Boolean> {
-    private final CountDownLatch complete;
-    private final Set<Integer> expectedBuckets;
-    private final Set<Integer> successfulBuckets;
-
-    private volatile ReplyProcessor21 processor;
-    
-    public HDFSFlushQueueResultCollector(Set<Integer> expectedBuckets) {
-      this.expectedBuckets = expectedBuckets;
-      
-      complete = new CountDownLatch(1);
-      successfulBuckets = new HashSet<Integer>();
-    }
-    
-    public Set<Integer> getSuccessfulBuckets() {
-      synchronized (successfulBuckets) {
-        return new HashSet<Integer>(successfulBuckets);
-      }
-    }
-    
-    @Override
-    public Boolean getResult() throws FunctionException {
-      try {
-        complete.await();
-        synchronized (successfulBuckets) {
-          LogWriterI18n logger = InternalDistributedSystem.getLoggerI18n();
-          if (logger.fineEnabled() || VERBOSE) {
-            logger.info(LocalizedStrings.DEBUG, "Expected buckets: " + expectedBuckets);
-            logger.info(LocalizedStrings.DEBUG, "Successful buckets: " + successfulBuckets);
-          }
-          return expectedBuckets.equals(successfulBuckets);
-        }
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        GemFireCacheImpl.getExisting().getCancelCriterion().checkCancelInProgress(e);
-        throw new FunctionException(e);
-      }
-    }
-
-    @Override
-    public Boolean getResult(long timeout, TimeUnit unit)
-        throws FunctionException, InterruptedException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public synchronized void addResult(DistributedMember memberID, Object result) {
-      if (result instanceof FlushStatus) {
-        FlushStatus status = (FlushStatus) result;
-        if (!status.isLast()) {
-          synchronized (successfulBuckets) {
-            successfulBuckets.add(status.getBucketId());
-          }        
-        }
-      }
-    }
-
-    @Override
-    public void endResults() {    	
-      complete.countDown();
-    }
-
-    @Override
-    public void clearResults() {
-    }
-
-    @Override
-    public void setProcessor(ReplyProcessor21 processor) {
-      this.processor = processor;
-    }
-
-    @Override
-    public ReplyProcessor21 getProcessor() {
-      return processor;
-    }
-
-	@Override
-	public void setException(Throwable exception) {
-		// TODO Auto-generated method stub
-		
-	}
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java
deleted file mode 100644
index ec0f9ff..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.internal.VersionedDataSerializable;
-import com.gemstone.gemfire.internal.Version;
-
-/**
- * Arguments passed to the HDFSForceCompactionFunction
- * 
- */
-@SuppressWarnings("serial")
-public class HDFSForceCompactionArgs implements VersionedDataSerializable {
-
-  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
-
-  private HashSet<Integer> buckets;
-
-  private boolean isMajor;
-
-  private int maxWaitTime;
-
-  public HDFSForceCompactionArgs() {
-  }
-
-  public HDFSForceCompactionArgs(Set<Integer> buckets, boolean isMajor, Integer maxWaitTime) {
-    this.buckets = new HashSet<Integer>(buckets);
-    this.isMajor = isMajor;
-    this.maxWaitTime = maxWaitTime;
-  }
-
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    DataSerializer.writeHashSet(buckets, out);
-    out.writeBoolean(isMajor);
-    out.writeInt(maxWaitTime);
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException,
-      ClassNotFoundException {
-    this.buckets = DataSerializer.readHashSet(in);
-    this.isMajor = in.readBoolean();
-    this.maxWaitTime = in.readInt();
-  }
-
-  @Override
-  public Version[] getSerializationVersions() {
-    return serializationVersions;
-  }
-
-  public Set<Integer> getBuckets() {
-    return (Set<Integer>) buckets;
-  }
-
-  public void setBuckets(Set<Integer> buckets) {
-    this.buckets = new HashSet<Integer>(buckets);
-  }
-
-  public boolean isMajor() {
-    return isMajor;
-  }
-
-  public void setMajor(boolean isMajor) {
-    this.isMajor = isMajor;
-  }
-
-  public boolean isSynchronous() {
-    return maxWaitTime == 0;
-  }
-
-  public int getMaxWaitTime() {
-    return this.maxWaitTime;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(getClass().getCanonicalName()).append("@")
-    .append(System.identityHashCode(this))
-    .append(" buckets:").append(buckets)
-    .append(" isMajor:").append(isMajor)
-    .append(" maxWaitTime:").append(maxWaitTime);
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java
deleted file mode 100644
index d26ac1b..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.execute.Function;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.logging.LogService;
-
-/**
- * Function responsible for forcing a compaction on all members
- * of the system
- *
- */
-@SuppressWarnings("serial")
-public class HDFSForceCompactionFunction implements Function, InternalEntity {
-
-  public static final int FORCE_COMPACTION_MAX_RETRIES = Integer.getInteger("gemfireXD.maxCompactionRetries", 3);
-
-  public static final int BUCKET_ID_FOR_LAST_RESULT = -1;
-
-  public static final String ID = "HDFSForceCompactionFunction";
-
-  private static final Logger logger = LogService.getLogger();
-  
-  @Override
-  public void execute(FunctionContext context) {
-    if (context.isPossibleDuplicate()) {
-      // do not re-execute the function, another function
-      // targeting the failed buckets will be invoked
-      context.getResultSender().lastResult(new CompactionStatus(BUCKET_ID_FOR_LAST_RESULT, false));
-      return;
-    }
-    RegionFunctionContext rfc = (RegionFunctionContext) context;
-    PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
-    HDFSForceCompactionArgs args = (HDFSForceCompactionArgs) rfc.getArguments();
-    Set<Integer> buckets = new HashSet<Integer>(args.getBuckets()); // copying avoids race when the function coordinator
-                                                                    // also runs the function locally
-    buckets.retainAll(pr.getDataStore().getAllLocalPrimaryBucketIds());
-
-    List<Future<CompactionStatus>> futures =  pr.forceLocalHDFSCompaction(buckets, args.isMajor(), 0);
-    int waitFor = args.getMaxWaitTime();
-    for (Future<CompactionStatus> future : futures) {
-      long start = System.currentTimeMillis();
-      CompactionStatus status = null;
-      try {
-        // TODO use a CompletionService instead
-        if (!args.isSynchronous() && waitFor <= 0) {
-          break;
-        }
-        status = args.isSynchronous() ? future.get() : future.get(waitFor, TimeUnit.MILLISECONDS);
-        buckets.remove(status.getBucketId());
-        if (logger.isDebugEnabled()) {
-          logger.debug("HDFS: ForceCompaction sending result:"+status);
-        }
-        context.getResultSender().sendResult(status);
-        long elapsedTime = System.currentTimeMillis() - start;
-        waitFor -= elapsedTime;
-      } catch (InterruptedException e) {
-        // send a list of failed buckets after waiting for all buckets
-      } catch (ExecutionException e) {
-        // send a list of failed buckets after waiting for all buckets
-      } catch (TimeoutException e) {
-        // do not wait for other buckets to complete
-        break;
-      }
-    }
-    // for asynchronous invocation, the status is true for buckets that we did not wait for
-    boolean status = args.isSynchronous() ? false : true;
-    for (Integer bucketId : buckets) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("HDFS: ForceCompaction sending result for bucket:"+bucketId);
-      }
-      context.getResultSender().sendResult(new CompactionStatus(bucketId, status));
-    }
-    if (logger.isDebugEnabled()) {
-      logger.debug("HDFS: ForceCompaction sending last result");
-    }
-    context.getResultSender().lastResult(new CompactionStatus(BUCKET_ID_FOR_LAST_RESULT, true));
-  }
-
-  @Override
-  public String getId() {
-    return ID;
-  }
-
-  @Override
-  public boolean hasResult() {
-    return true;
-  }
-
-  @Override
-  public boolean optimizeForWrite() {
-    // run compaction on primary members
-    return true;
-  }
-
-  @Override
-  public boolean isHA() {
-    // so that we can target re-execution on failed buckets
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java
deleted file mode 100644
index ee5e4aa..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import com.gemstone.gemfire.cache.execute.FunctionException;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.execute.LocalResultCollector;
-
-/**
- * 
- */
-public class HDFSForceCompactionResultCollector implements LocalResultCollector<Object, List<CompactionStatus>> {
-
-  /** list of received replies*/
-  private List<CompactionStatus> reply = new ArrayList<CompactionStatus>();
-
-  /** semaphore to block the caller of getResult()*/
-  private CountDownLatch waitForResults = new CountDownLatch(1);
-
-  /** boolean to indicate if clearResults() was called to indicate a failure*/
-  private volatile boolean shouldRetry;
-
-  private ReplyProcessor21 processor;
-
-  @Override
-  public List<CompactionStatus> getResult() throws FunctionException {
-    try {
-      waitForResults.await();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      GemFireCacheImpl.getExisting().getCancelCriterion().checkCancelInProgress(e);
-      throw new FunctionException(e);
-    }
-    return reply;
-  }
-
-  @Override
-  public List<CompactionStatus> getResult(long timeout, TimeUnit unit)
-      throws FunctionException, InterruptedException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void addResult(DistributedMember memberID,
-      Object resultOfSingleExecution) {
-    if (resultOfSingleExecution instanceof CompactionStatus) {
-      CompactionStatus status = (CompactionStatus) resultOfSingleExecution;
-      if (status.getBucketId() != HDFSForceCompactionFunction.BUCKET_ID_FOR_LAST_RESULT) {
-        reply.add(status);
-      }
-    }
-  }
-
-  @Override
-  public void endResults() {
-    waitForResults.countDown();
-  }
-
-  @Override
-  public void clearResults() {
-    this.shouldRetry = true;
-    waitForResults.countDown();
-  }
-
-  /**
-   * @return true if retry should be attempted
-   */
-  public boolean shouldRetry() {
-    return this.shouldRetry || !getFailedBucketIds().isEmpty();
-  }
-
-  private Set<Integer> getFailedBucketIds() {
-    Set<Integer> result = new HashSet<Integer>();
-    for (CompactionStatus status : reply) {
-      if (!status.isStatus()) {
-        result.add(status.getBucketId());
-      }
-    }
-    return result;
-  }
-
-  public Set<Integer> getSuccessfulBucketIds() {
-    Set<Integer> result = new HashSet<Integer>();
-    for (CompactionStatus status : reply) {
-      if (status.isStatus()) {
-        result.add(status.getBucketId());
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public void setProcessor(ReplyProcessor21 processor) {
-    this.processor = processor;
-  }
-
-  @Override
-  public ReplyProcessor21 getProcessor() {
-    return this.processor;
-  }
-
-@Override
-public void setException(Throwable exception) {
-	// TODO Auto-generated method stub
-	
-}
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java
deleted file mode 100644
index 789fe4d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-
-/**
- * Function that returns the oldest timestamp among all the major
- * compacted buckets on the members
- *
- */
-@SuppressWarnings("serial")
-public class HDFSLastCompactionTimeFunction extends FunctionAdapter implements InternalEntity{
-
-  public static final String ID = "HDFSLastCompactionTimeFunction";
-
-  @Override
-  public void execute(FunctionContext context) {
-    RegionFunctionContext rfc = (RegionFunctionContext) context;
-    PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
-    rfc.getResultSender().lastResult(pr.lastLocalMajorHDFSCompaction());
-  }
-
-  @Override
-  public String getId() {
-    return ID;
-  }
-
-  @Override
-  public boolean isHA() {
-    return true;
-  }
-
-  @Override
-  public boolean optimizeForWrite() {
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java
deleted file mode 100644
index 6d70dce..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import com.gemstone.gemfire.StatisticsFactory;
-import com.gemstone.gemfire.cache.GemFireCache;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.internal.SystemTimer;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-
-import org.apache.logging.log4j.Logger;
-
-/**
- * Cache for hoplog organizers associated with buckets of a region. The director creates an
- * instance of organizer on first get request. It does not read HDFS in advance. Creation of
- * organizer depends on File system initialization that takes outside this class. This class also
- * provides utility methods to monitor usage and manage bucket sets.
- * 
- */
-public class HDFSRegionDirector {
-  /*
-   * Maps each region name to its listener and store objects. This map must be populated before file
-   * organizers of a bucket can be created
-   */
-  private final ConcurrentHashMap<String, HdfsRegionManager> regionManagerMap;
-  
-  /**
-   * regions of this Gemfire cache are managed by this director. TODO this
-   * should be final and be provided at the time of creation of this instance or
-   * through a cache directory
-   */
-  private GemFireCache cache;
-  
-  // singleton instance
-  private static HDFSRegionDirector instance;
-  
-  final ScheduledExecutorService janitor;
-  private JanitorTask janitorTask;
-  
-  private static final Logger logger = LogService.getLogger();
-  protected final static String logPrefix = "<" + "RegionDirector" + "> ";
-  
-  
-  private HDFSRegionDirector() {
-    regionManagerMap = new ConcurrentHashMap<String, HDFSRegionDirector.HdfsRegionManager>();
-    janitor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
-      @Override
-      public Thread newThread(Runnable r) {
-        Thread thread = new Thread(r, "HDFSRegionJanitor");
-        thread.setDaemon(true);
-        return thread;
-      }
-    });
-    
-    long interval = Long.getLong(HoplogConfig.JANITOR_INTERVAL_SECS,
-        HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
-    
-    janitorTask = new JanitorTask();
-    janitor.scheduleWithFixedDelay(janitorTask, interval, interval,
-        TimeUnit.SECONDS);
-  }
-  
-  public synchronized static HDFSRegionDirector getInstance() {
-    if (instance == null) {
-      instance = new HDFSRegionDirector();
-    }
-    return instance;
-  }
-  
-  public HDFSRegionDirector setCache(GemFireCache cache) {
-    this.cache = cache;
-    return this;
-  }
-
-  public GemFireCache getCache() {
-    return this.cache;
-  }
-  /**
-   * Caches listener, store object and list of organizers associated with the region associated with
-   * a region. Subsequently, these objects will be used each time an organizer is created
-   */
-  public synchronized HdfsRegionManager manageRegion(LocalRegion region, String storeName,
-      HoplogListener listener) {
-    
-    HdfsRegionManager manager = regionManagerMap.get(region.getFullPath());
-    if (manager != null) {
-      // this is an attempt to re-register a region. Assuming this was required
-      // to modify listener or hdfs store impl associated with the region. Hence
-      // will clear the region first.
-
-      clear(region.getFullPath());
-    }
-    
-    HDFSStoreImpl store = HDFSStoreDirector.getInstance().getHDFSStore(storeName);
-    manager = new HdfsRegionManager(region, store, listener, getStatsFactory(), this);
-    regionManagerMap.put(region.getFullPath(), manager);
-    
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Now managing region " + region.getFullPath(), logPrefix);
-    }
-    
-    return manager;
-  }
-  
-  /**
-   * Find the regions that are part of a particular HDFS store.
-   */
-  public Collection<String> getRegionsInStore(HDFSStore store) {
-    TreeSet<String> regions = new TreeSet<String>();
-    for(Map.Entry<String, HdfsRegionManager> entry : regionManagerMap.entrySet()) {
-      if(entry.getValue().getStore().equals(store)) {
-        regions.add(entry.getKey());
-      }
-    }
-    return regions;
-  }
-  
-  public int getBucketCount(String regionPath) {
-    HdfsRegionManager manager = regionManagerMap.get(regionPath);
-    if (manager == null) {
-      throw new IllegalStateException("Region not initialized");
-    }
-
-    return manager.bucketOrganizerMap.size();
-  }
-  
-  public void closeWritersForRegion(String regionPath, int minSizeForFileRollover) throws IOException {
-    regionManagerMap.get(regionPath).closeWriters(minSizeForFileRollover);
-  }
-  /**
-   * removes and closes all {@link HoplogOrganizer} of this region. This call is expected with
-   * a PR disowns a region.
-   */
-  public synchronized void clear(String regionPath) {
-    HdfsRegionManager manager = regionManagerMap.remove(regionPath);
-    if (manager != null) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Closing hoplog region manager for " + regionPath, logPrefix);
-      }
-      manager.close();
-    }
-  }
-
-  /**
-   * Closes all region managers, organizers and hoplogs. This method should be
-   * called before closing the cache to gracefully release all resources
-   */
-  public static synchronized void reset() {
-    if (instance == null) {
-      // nothing to reset
-      return;
-    }
-    
-    instance.janitor.shutdownNow();
-    
-    for (String region : instance.regionManagerMap.keySet()) {
-      instance.clear(region);
-    }
-    instance.cache = null;
-    instance = null;
-  }
-  
-  /**
-   * Terminates current janitor task and schedules a new. The rate of the new
-   * task is based on the value of system property at that time
-   */
-  public static synchronized void resetJanitor() {
-    instance.janitorTask.terminate();
-    instance.janitorTask = instance.new JanitorTask();
-    long interval = Long.getLong(HoplogConfig.JANITOR_INTERVAL_SECS,
-        HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
-    instance.janitor.scheduleWithFixedDelay(instance.janitorTask, 0, interval,
-        TimeUnit.SECONDS);
-  }
-  
-  /**
-   * @param regionPath name of region for which stats object is desired
-   * @return {@link SortedOplogStatistics} instance associated with hdfs region
-   *         name. Null if region is not managed by director
-   */
-  public synchronized SortedOplogStatistics getHdfsRegionStats(String regionPath) {
-    HdfsRegionManager manager = regionManagerMap.get(regionPath);
-    return manager == null ? null : manager.getHdfsStats();
-  }
-  
-  private StatisticsFactory getStatsFactory() {
-    return cache.getDistributedSystem();
-  }
-
-  /**
-   * A helper class to manage region and its organizers
-   */
-  public static class HdfsRegionManager {
-    // name and store configuration of the region whose buckets are managed by this director.
-    private LocalRegion region;
-    private HDFSStoreImpl store;
-    private HoplogListener listener;
-    private volatile boolean closed = false;
-    private final int FILE_ROLLOVER_TASK_INTERVAL = Integer.parseInt
-        (System.getProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "60"));
-    
-    private SystemTimer hoplogCloseTimer = null;
-    
-    // instance of hdfs statistics object for this hdfs based region. This
-    // object will collect usage and performance related statistics.
-    private final SortedOplogStatistics hdfsStats;
-
-    /*
-     * An instance of organizer is created for each bucket of regionName region residing on this
-     * node. This member maps bucket id with its corresponding organizer instance. A lock is used to
-     * manage concurrent writes to the map.
-     */
-    private ConcurrentMap<Integer, HoplogOrganizer> bucketOrganizerMap;
-    
-    private HDFSRegionDirector hdfsRegionDirector;
-
-    /**
-     * @param listener
-     *          listener of change events like file creation and deletion
-     * @param hdfsRegionDirector 
-     */
-    HdfsRegionManager(LocalRegion region, HDFSStoreImpl store,
-        HoplogListener listener, StatisticsFactory statsFactory, HDFSRegionDirector hdfsRegionDirector) {
-      bucketOrganizerMap = new ConcurrentHashMap<Integer, HoplogOrganizer>();
-      this.region = region;
-      this.listener = listener;
-      this.store = store;
-      this.hdfsStats = new SortedOplogStatistics(statsFactory, "HDFSRegionStatistics", region.getFullPath());
-      this.hdfsRegionDirector = hdfsRegionDirector;
-    }
-
-    public void closeWriters(int minSizeForFileRollover) throws IOException {
-      final long startTime = System.currentTimeMillis();
-      long elapsedTime = 0;
-        
-      Collection<HoplogOrganizer> organizers = bucketOrganizerMap.values();
-      
-      for (HoplogOrganizer organizer : organizers) {
-      
-        try {
-          this.getRegion().checkReadiness();
-        } catch (Exception e) {
-          break;
-        }
-        
-        ((HDFSUnsortedHoplogOrganizer)organizer).synchronizedCloseWriter(true, 0, 
-            minSizeForFileRollover);
-      }
-      
-    }
-
-    public synchronized <T extends PersistedEventImpl> HoplogOrganizer<T> create(int bucketId) throws IOException {
-      assert !bucketOrganizerMap.containsKey(bucketId);
-
-      HoplogOrganizer<?> organizer = region.getHDFSWriteOnly() 
-          ? new HDFSUnsortedHoplogOrganizer(this, bucketId) 
-          : new HdfsSortedOplogOrganizer(this, bucketId);
-
-      bucketOrganizerMap.put(bucketId, organizer);
-      // initialize a timer that periodically closes the hoplog writer if the 
-      // time for rollover has passed. It also has the responsibility to fix the files.  
-      if (this.region.getHDFSWriteOnly() && 
-          hoplogCloseTimer == null) {
-        hoplogCloseTimer = new SystemTimer(hdfsRegionDirector.
-            getCache().getDistributedSystem(), true);
-        
-        // schedule the task to fix the files that were not closed properly 
-        // last time. 
-        hoplogCloseTimer.scheduleAtFixedRate(new CloseTmpHoplogsTimerTask(this), 
-            1000, FILE_ROLLOVER_TASK_INTERVAL * 1000);
-        
-        if (logger.isDebugEnabled()) {
-          logger.debug("{}Schedulng hoplog rollover timer with interval "+ FILE_ROLLOVER_TASK_INTERVAL + 
-              " for hoplog organizer for " + region.getFullPath()
-              + ":" + bucketId + " " + organizer, logPrefix);
-        }
-      }
-      
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Constructed hoplog organizer for " + region.getFullPath()
-            + ":" + bucketId + " " + organizer, logPrefix);
-      }
-      return (HoplogOrganizer<T>) organizer;
-    }
-    
-    public synchronized <T extends PersistedEventImpl> void addOrganizer(
-        int bucketId, HoplogOrganizer<T> organizer) {
-      if (bucketOrganizerMap.containsKey(bucketId)) {
-        throw new IllegalArgumentException();
-      }
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}added pre constructed organizer " + region.getFullPath()
-            + ":" + bucketId + " " + organizer, logPrefix);
-      }
-      bucketOrganizerMap.put(bucketId, organizer);
-    }
-
-    public void close() {
-      closed = true;
-      
-      if (this.region.getHDFSWriteOnly() && 
-          hoplogCloseTimer != null) {
-        hoplogCloseTimer.cancel();
-        hoplogCloseTimer = null;
-      }
-      for (int bucket : bucketOrganizerMap.keySet()) {
-        close(bucket);
-      }
-    }
-    
-    public boolean isClosed() {
-      return closed;
-    }
-
-    public synchronized void close(int bucketId) {
-      try {
-        HoplogOrganizer organizer = bucketOrganizerMap.remove(bucketId);
-        if (organizer != null) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("{}Closing hoplog organizer for " + region.getFullPath() + ":" + 
-                bucketId + " " + organizer, logPrefix);
-          }
-          organizer.close();
-        }
-      } catch (IOException e) {
-        if (logger.isDebugEnabled()) {
-          logger.debug(logPrefix + "Error closing hoplog organizer for " + region.getFullPath() + ":" + bucketId, e);
-        }
-      }
-      //TODO abort compaction and flush requests for this region
-    }
-    
-    public static String getRegionFolder(String regionPath) {
-      String folder = regionPath;
-      //Change any underscore into a double underscore
-      folder = folder.replace("_", "__");
-      //get rid of the leading slash
-      folder = folder.replaceFirst("^/", "");
-      //replace slashes with underscores
-      folder = folder.replace('/', '_');
-      return folder;
-    }
-
-    public String getRegionFolder() {
-      return getRegionFolder(region.getFullPath());
-    }
-
-    public HoplogListener getListener() {
-      return listener;
-    }
-
-    public HDFSStoreImpl getStore() {
-      return store;
-    }
-
-    public LocalRegion getRegion() {
-      return region;
-    }
-    
-    public SortedOplogStatistics getHdfsStats() {
-      return hdfsStats;
-    }
-    
-    public Collection<HoplogOrganizer> getBucketOrganizers(){
-      return this.bucketOrganizerMap.values();
-    }
-
-    /**
-     * get the HoplogOrganizers only for the given set of buckets
-     */
-    public Collection<HoplogOrganizer> getBucketOrganizers(Set<Integer> buckets){
-      Set<HoplogOrganizer> result = new HashSet<HoplogOrganizer>();
-      for (Integer bucketId : buckets) {
-        result.add(this.bucketOrganizerMap.get(bucketId));
-      }
-      return result;
-    }
-
-    /**
-     * Delete all files from HDFS for this region. This method
-     * should be called after all members have destroyed their
-     * region in gemfire, so there should be no threads accessing
-     * these files.
-     * @throws IOException 
-     */
-    public void destroyData() throws IOException {
-      //Make sure everything is shut down and closed.
-      close();
-      if (store == null) {
-        return;
-      }
-      Path regionPath = new Path(store.getHomeDir(), getRegionFolder());
-      
-      //Delete all files in HDFS.
-      FileSystem fs = getStore().getFileSystem();
-      if(!fs.delete(regionPath, true)) {
-        if(fs.exists(regionPath)) {
-          throw new IOException("Unable to delete " + regionPath);
-        }
-      }
-    }
-
-    public void performMaintenance() throws IOException {
-      Collection<HoplogOrganizer> buckets = getBucketOrganizers();
-      for (HoplogOrganizer bucket : buckets) {
-        bucket.performMaintenance();
-      }
-    }
-  }
-  
-  private class JanitorTask implements Runnable {
-    boolean terminated = false;
-    @Override
-    public void run() {
-      if (terminated) {
-        return;
-      }
-      fineLog("Executing HDFS Region janitor task", null);
-      
-      Collection<HdfsRegionManager> regions = regionManagerMap.values();
-      for (HdfsRegionManager region : regions) {
-        fineLog("Maintaining region:" + region.getRegionFolder(), null);
-        try {
-          region.performMaintenance();
-        } catch (Throwable e) {
-          logger.info(LocalizedMessage.create(LocalizedStrings.HOPLOG_IO_ERROR , region.getRegionFolder()));
-          logger.info(LocalizedMessage.create(LocalizedStrings.ONE_ARG, e.getMessage()));
-          fineLog(null, e);
-        }
-      }
-    }
-
-    public void terminate() {
-      terminated = true;
-    }
-  }
-  
-  protected static void fineLog(String message, Throwable e) {
-    if(logger.isDebugEnabled()) {
-      logger.debug(message, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java
deleted file mode 100644
index 880ef3e..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.concurrent.ConcurrentHashMap;
-
-
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-
-/**
- * HDFSStoreDirector is created for managing all instances of HDFSStoreImpl.    
- *
- */
-public final class HDFSStoreDirector {
-  private final ConcurrentHashMap<String, HDFSStoreImpl> storeMap = new ConcurrentHashMap<String, HDFSStoreImpl>();
-
-  // singleton instance
-  private static volatile HDFSStoreDirector instance;
-  
-  private HDFSStoreDirector() {
-
-  }
-  
-  public static final HDFSStoreDirector getInstance() {
-    if (instance == null) {
-      synchronized (HDFSStoreDirector.class)  {
-        if (instance == null)
-          instance = new HDFSStoreDirector();
-      }
-    }
-    return instance;
-  }
-
-  // Called when the region is created.
-  public final void addHDFSStore(HDFSStoreImpl hdfsStore){
-    this.storeMap.put(hdfsStore.getName(), hdfsStore); 
-  }
-  
-  public final HDFSStoreImpl getHDFSStore(String hdfsStoreName) {
-    return this.storeMap.get(hdfsStoreName);
-  }
-  
-  public final void removeHDFSStore(String hdfsStoreName) {
-    this.storeMap.remove(hdfsStoreName);
-  } 
-  
-  public void closeHDFSStores() {
-    Iterator<HDFSStoreImpl> it = this.storeMap.values().iterator();
-    while (it.hasNext()) {
-      HDFSStoreImpl hsi = it.next();
-      hsi.close();
-    }
-    this.storeMap.clear();
-  }
-
-   public ArrayList<HDFSStoreImpl> getAllHDFSStores() {
-    ArrayList<HDFSStoreImpl> hdfsStores = new ArrayList<HDFSStoreImpl>();
-    hdfsStores.addAll(this.storeMap.values());
-    return hdfsStores;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
deleted file mode 100644
index cbb35cb..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.internal.HeapDataOutputStream;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * Manages unsorted Hoplog files for a bucket (Streaming Ingest option). An instance per bucket 
- * will exist in each PR
- * 
- *
- */
-public class HDFSUnsortedHoplogOrganizer extends AbstractHoplogOrganizer<UnsortedHoplogPersistedEvent> {
-  public static final String HOPLOG_REGEX = HOPLOG_NAME_REGEX + "("
-      + SEQ_HOPLOG_EXTENSION + "|" + TEMP_HOPLOG_EXTENSION + ")";
-  public static final Pattern HOPLOG_PATTERN = Pattern.compile(HOPLOG_REGEX);
-  protected static String TMP_FILE_NAME_REGEX = HOPLOG_NAME_REGEX + SEQ_HOPLOG_EXTENSION + TEMP_HOPLOG_EXTENSION + "$";
-  protected static final Pattern patternForTmpHoplog = Pattern.compile(TMP_FILE_NAME_REGEX);
-  
-   volatile private HoplogWriter writer;
-   volatile private Hoplog currentHoplog;
-   
-   volatile private long lastFlushTime = System.currentTimeMillis();
-   
-   volatile private boolean abortFlush = false;
-   private FileSystem fileSystem;
-   
-   public HDFSUnsortedHoplogOrganizer(HdfsRegionManager region, int bucketId) throws IOException{
-    super(region, bucketId);
-    writer = null;
-    sequence = new AtomicInteger(0);
-
-    fileSystem = store.getFileSystem();
-    if (! fileSystem.exists(bucketPath)) {
-      return;
-    }
-    
-    FileStatus validHoplogs[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
-      @Override
-      public boolean accept(Path file) {
-        // All valid hoplog files must match the regex
-        Matcher matcher = HOPLOG_PATTERN.matcher(file.getName());
-        return matcher.matches();
-      }
-    });
-
-    if (validHoplogs != null && validHoplogs.length > 0) {
-      for (FileStatus file : validHoplogs) {
-        // account for the disk used by this file
-        incrementDiskUsage(file.getLen());
-      }
-    }
-
-  }
-  
-    @Override
-    public void close() throws IOException {
-      super.close();
-      if (logger.isDebugEnabled())
-        logger.debug("{}Closing the hoplog organizer and the open files", logPrefix);
-      // abort the flush so that we can immediately call the close current writer. 
-      abortFlush = true;
-      synchronizedCloseWriter(true, 0, 0);
-    }
-    
-    
-    /**
-     * Flushes the data to HDFS. 
-     * Synchronization ensures that the writer is not closed when flush is happening.
-     * To abort the flush, abortFlush needs to be set.  
-     * @throws ForceReattemptException 
-     */
-     @Override
-    public synchronized void flush(Iterator<? extends QueuedPersistentEvent> bufferIter, final int count)
-        throws IOException, ForceReattemptException {
-      assert bufferIter != null;
-      
-      if (abortFlush)
-        throw new CacheClosedException("Either the region has been cleared " +
-            "or closed. Aborting the ongoing flush operation.");
-      if (logger.isDebugEnabled())
-        logger.debug("{}Initializing flush operation", logPrefix);
-      
-      // variables for updating stats
-      long start = stats.getFlush().begin();
-      int byteCount = 0;
-      if (writer == null) {
-        // Hoplogs of sequence files are always created with a 0 sequence number
-        currentHoplog = getTmpSortedOplog(0, SEQ_HOPLOG_EXTENSION);
-        try {
-          writer = this.store.getSingletonWriter().runSerially(new Callable<Hoplog.HoplogWriter>() {
-            @Override
-            public HoplogWriter call() throws Exception {
-              return currentHoplog.createWriter(count);
-            }
-          });
-        } catch (Exception e) {
-          if (e instanceof IOException) {
-            throw (IOException)e;
-          }
-          throw new IOException(e);
-        }
-      }
-      long timeSinceLastFlush = (System.currentTimeMillis() - lastFlushTime)/1000 ;
-      
-      try {
-        /**MergeGemXDHDFSToGFE changed the following statement as the code of HeapDataOutputStream is not merged */
-        //HeapDataOutputStream out = new HeapDataOutputStream();
-        while (bufferIter.hasNext()) {
-          HeapDataOutputStream out = new HeapDataOutputStream(1024, null);
-          if (abortFlush) {
-            stats.getFlush().end(byteCount, start);
-            throw new CacheClosedException("Either the region has been cleared " +
-            		"or closed. Aborting the ongoing flush operation.");
-          }
-          QueuedPersistentEvent item = bufferIter.next();
-          item.toHoplogEventBytes(out);
-          byte[] valueBytes = out.toByteArray();
-          writer.append(item.getRawKey(), valueBytes);
-          // add key length and value length to stats byte counter
-          byteCount += (item.getRawKey().length + valueBytes.length);
-          /**MergeGemXDHDFSToGFE how to clear for reuse. Leaving it for Darrel to merge this change*/
-          //out.clearForReuse();
-        }
-        // ping secondaries before making the file a legitimate file to ensure 
-        // that in case of split brain, no other vm has taken up as primary. #50110. 
-        if (!abortFlush)
-          pingSecondaries();
-        // append completed. If the file is to be rolled over, 
-        // close writer and rename the file to a legitimate name.
-        // Else, sync the already written data with HDFS nodes. 
-        int maxFileSize = this.store.getWriteOnlyFileRolloverSize() * 1024 * 1024;  
-        int fileRolloverInterval = this.store.getWriteOnlyFileRolloverInterval(); 
-        if (writer.getCurrentSize() >= maxFileSize || 
-            timeSinceLastFlush >= fileRolloverInterval) {
-          closeCurrentWriter();
-        }
-        else {
-          // if flush is not aborted, hsync the batch. It ensures that 
-          // the batch has reached HDFS and we can discard it. 
-          if (!abortFlush)
-            writer.hsync();
-        }
-      } catch (IOException e) {
-        stats.getFlush().error(start);
-        // as there is an exception, it can be probably be a file specific problem.
-        // close the current file to avoid any file specific issues next time  
-        closeCurrentWriter();
-        // throw the exception so that async queue will dispatch the same batch again 
-        throw e;
-      } 
-      
-      stats.getFlush().end(byteCount, start);
-    }
-    
-    /**
-     * Synchronization ensures that the writer is not closed when flush is happening. 
-     */
-    synchronized void synchronizedCloseWriter(boolean forceClose, 
-        long timeSinceLastFlush, int minsizeforrollover) throws IOException { 
-      long writerSize = 0;
-      if (writer != null){
-        writerSize = writer.getCurrentSize();
-      }
-      
-      if (writerSize < (minsizeforrollover * 1024L))
-        return;
-      
-      int maxFileSize = this.store.getWriteOnlyFileRolloverSize() * 1024 * 1024;  
-      int fileRolloverInterval = this.store.getWriteOnlyFileRolloverInterval(); 
-      if (writerSize >= maxFileSize || 
-          timeSinceLastFlush >= fileRolloverInterval || forceClose) {
-        closeCurrentWriter();
-      }
-      }
-        
-    
-    /**
-     * Closes the current writer so that next time a new hoplog can 
-     * be created. Also, fixes any tmp hoplogs. 
-     * 
-     * @throws IOException
-     */
-    void closeCurrentWriter() throws IOException {
-      
-      if (writer != null) {
-        // If this organizer is closing, it is ok to ignore exceptions here
-        // because CloseTmpHoplogsTimerTask
-        // on another member may have already renamed the hoplog
-        // fixes bug 49141
-        boolean isClosing = abortFlush;
-        try {
-          incrementDiskUsage(writer.getCurrentSize());
-        } catch (IOException e) {
-          if (!isClosing) {
-            throw e;
-          }
-        }
-        if (logger.isDebugEnabled())
-          logger.debug("{}Closing hoplog " + currentHoplog.getFileName(), logPrefix);
-        try{
-          writer.close();
-          makeLegitimate(currentHoplog);
-        } catch (IOException e) {
-          if (!isClosing) {
-            logger.warn(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e);
-            throw e;
-          }
-        } finally {
-          writer = null;
-          lastFlushTime = System.currentTimeMillis();
-        }
-      }
-      else
-        lastFlushTime = System.currentTimeMillis();
-    }
-
-    @Override
-    public void clear() throws IOException {
-      boolean prevAbortFlushFlag = abortFlush;
-      // abort the flush so that we can immediately call the close current writer. 
-      abortFlush = true;
-      
-      // Close if there is any existing writer. 
-      try {
-        synchronizedCloseWriter(true, 0, 0);
-      } catch (IOException e) {
-        logger.warn(LocalizedStrings.HOPLOG_CLOSE_FAILED, e);
-      }
-      
-      // reenable the aborted flush
-      abortFlush = prevAbortFlushFlag;
-      
-      // Mark the hoplogs for deletion
-      markHoplogsForDeletion();
-      
-    }
-  
-    @Override
-    public void performMaintenance() {
-      // TODO remove the timer for tmp file conversion. Use this instead
-    }
-
-    @Override
-    public Future<CompactionStatus> forceCompaction(boolean isMajor) {
-      return null;
-    }
-
-    @Override
-    protected Hoplog getHoplog(Path hoplogPath) throws IOException {
-      Hoplog so = new SequenceFileHoplog(fileSystem, hoplogPath, stats);
-      return so;
-    }
-  
-  /**
-   * Fixes the size of hoplogs that were not closed properly last time. 
-   * Such hoplogs are *.tmphop files. Identify them and open them and close 
-   * them, this fixes the size. After doing this rename them to *.hop. 
-   * 
-   * @throws IOException
-   * @throws ForceReattemptException 
-   */
-  void identifyAndFixTmpHoplogs(FileSystem fs) throws IOException, ForceReattemptException {
-    if (logger.isDebugEnabled())
-      logger.debug("{}Fixing temporary hoplogs", logPrefix);
-    
-    // A different filesystem is passed to this function for the following reason: 
-    // For HDFS, if a file wasn't closed properly last time, 
-    // while calling FileSystem.append for this file, FSNamesystem.startFileInternal->
-    // FSNamesystem.recoverLeaseInternal function gets called. 
-    // This function throws AlreadyBeingCreatedException if there is an open handle, to any other file, 
-    // created using the same FileSystem object. This is a bug and is being tracked at: 
-    // https://issues.apache.org/jira/browse/HDFS-3848?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
-    // 
-    // The fix for this bug is not yet part of Pivotal HD. So to overcome the bug, 
-    // we create a new file system for the timer task so that it does not encounter the bug. 
-    
-    FileStatus tmpHoplogs[] = FSUtils.listStatus(fs, fs.makeQualified(bucketPath), new PathFilter() {
-      @Override
-      public boolean accept(Path file) {
-        // All valid hoplog files must match the regex
-        Matcher matcher = patternForTmpHoplog.matcher(file.getName());
-        return matcher.matches();
-      }
-    });
-    
-    if (tmpHoplogs == null || tmpHoplogs.length == 0) {
-      if (logger.isDebugEnabled())
-        logger.debug("{}No files to fix", logPrefix);
-      return;
-    }
-    // ping secondaries so that in case of split brain, no other vm has taken up 
-    // as primary. #50110. 
-    pingSecondaries();
-    if (logger.isDebugEnabled())
-      logger.debug("{}Files to fix " + tmpHoplogs.length, logPrefix);
-
-    String currentHoplogName = null;
-    // get the current hoplog name. We need to ignore current hoplog while fixing. 
-    if (currentHoplog != null) {
-      currentHoplogName = currentHoplog.getFileName();
-    }
-    
-    for (int i = 0; i < tmpHoplogs.length; i++) {
-      // Skip directories
-      if (tmpHoplogs[i].isDirectory()) {
-        continue;
-      }
-
-      final Path p = tmpHoplogs[i].getPath();
-      
-      if (tmpHoplogs[i].getPath().getName().equals(currentHoplogName)){
-        if (logger.isDebugEnabled())
-          logger.debug("Skipping current file: " + tmpHoplogs[i].getPath().getName(), logPrefix);
-        continue;
-      } 
-      
-      SequenceFileHoplog hoplog = new SequenceFileHoplog(fs, p, stats);
-      try {
-        makeLegitimate(hoplog);
-        logger.info (LocalizedMessage.create(LocalizedStrings.DEBUG, "Hoplog " + p + " was a temporary " +
-            "hoplog because the node managing it wasn't shutdown properly last time. Fixed the hoplog name."));
-      } catch (IOException e) {
-        logger.info (LocalizedMessage.create(LocalizedStrings.DEBUG, "Hoplog " + p + " is still a temporary " +
-            "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
-            "change the hoplog name because an exception was thrown while fixing it. " + e));
-      }
-    }
-  }
-  
-  private FileStatus[] getExpiredHoplogs() throws IOException {
-    FileStatus files[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
-      @Override
-      public boolean accept(Path file) {
-        // All expired hoplog end with expire extension and must match the valid file regex
-        String fileName = file.getName();
-        if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
-          return false;
-        }
-        return true;
-      }
-    });
-    return files;
-  }
-  /**
-   * locks sorted oplogs collection, removes oplog and renames for deletion later
-   * @throws IOException 
-   */
-  private void markHoplogsForDeletion() throws IOException {
-    
-    ArrayList<IOException> errors = new ArrayList<IOException>();
-    FileStatus validHoplogs[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
-      @Override
-      public boolean accept(Path file) {
-        // All valid hoplog files must match the regex
-        Matcher matcher = HOPLOG_PATTERN.matcher(file.getName());
-        return matcher.matches();
-      }
-    });
-    
-    FileStatus[] expired = getExpiredHoplogs();
-    validHoplogs = filterValidHoplogs(validHoplogs, expired);
-
-    if (validHoplogs == null || validHoplogs.length == 0) {
-      return;
-    }
-    for (FileStatus fileStatus : validHoplogs) {
-      try {
-        addExpiryMarkerForAFile(getHoplog(fileStatus.getPath()));
-      } catch (IOException e) {
-        // even if there is an IO error continue removing other hoplogs and
-        // notify at the end
-        errors.add(e);
-      }
-    }
-    
-    if (!errors.isEmpty()) {
-      for (IOException e : errors) {
-        logger.warn(LocalizedStrings.HOPLOG_HOPLOG_REMOVE_FAILED, e);
-      }
-    }
-  }
-  
-  @Override
-  public Compactor getCompactor() {
-    throw new UnsupportedOperationException("Not supported for " + this.getClass().getSimpleName());
-  }
-  
-    @Override
-  public HoplogIterator<byte[], UnsortedHoplogPersistedEvent> scan(
-      long startOffset, long length) throws IOException {
-    throw new UnsupportedOperationException("Not supported for " + this.getClass().getSimpleName());
-    }
-
-  public long getLastFlushTime() {
-    return this.lastFlushTime;
-      }
-  
-  public long getfileRolloverInterval(){
-    int fileRolloverInterval = this.store.getWriteOnlyFileRolloverInterval(); 
-    return fileRolloverInterval;
-    }
-
-  @Override
-  public long getLastMajorCompactionTimestamp() {
-    throw new UnsupportedOperationException();
-  }
-
-}


[39/63] [abbrv] incubator-geode git commit: GEODE-17: move GeodeSecurityUtil and two other classes to internal package

Posted by kl...@apache.org.
GEODE-17: move GeodeSecurityUtil and two other classes to internal package


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f04b6695
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f04b6695
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f04b6695

Branch: refs/heads/feature/GEODE-1276
Commit: f04b66956c8b98909b924f2ed648ba735610bebb
Parents: 7c38f0d
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 10:06:12 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 10:06:12 2016 -0700

----------------------------------------------------------------------
 .../internal/security/GeodeSecurityUtil.java    | 165 +++++++++++++++++
 .../security/shiro/CustomAuthRealm.java         | 177 +++++++++++++++++++
 .../security/shiro/JMXShiroAuthenticator.java   |  69 ++++++++
 .../management/internal/ManagementAgent.java    |   2 +-
 .../internal/SystemManagementService.java       |   2 +-
 .../internal/cli/commands/DataCommands.java     |   2 +-
 .../internal/cli/remote/CommandProcessor.java   |   2 +-
 .../internal/security/AccessControlMBean.java   |   2 +-
 .../internal/security/MBeanServerWrapper.java   |   2 +-
 .../controllers/AbstractCommandsController.java |   2 +-
 .../support/LoginHandlerInterceptor.java        |   2 +-
 .../gemfire/security/CustomAuthRealm.java       | 174 ------------------
 .../gemfire/security/GeodeSecurityUtil.java     | 163 -----------------
 .../gemfire/security/JMXShiroAuthenticator.java |  68 -------
 .../GeodeSecurityUtilCustomRealmJUnitTest.java  |   2 +-
 .../GeodeSecurityUtilWithIniFileJUnitTest.java  |   2 +-
 .../gemfire/tools/pulse/tests/Server.java       |   4 +-
 17 files changed, 423 insertions(+), 417 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
new file mode 100644
index 0000000..4fd92ed
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.internal.security;
+
+import java.util.concurrent.Callable;
+
+import com.gemstone.gemfire.cache.operations.OperationContext;
+import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
+import com.gemstone.gemfire.cache.operations.OperationContext.Resource;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+import com.gemstone.gemfire.management.internal.security.ResourceOperationContext;
+import com.gemstone.gemfire.security.AuthenticationFailedException;
+import com.gemstone.gemfire.security.GemFireSecurityException;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.logging.log4j.Logger;
+import org.apache.shiro.SecurityUtils;
+import org.apache.shiro.ShiroException;
+import org.apache.shiro.UnavailableSecurityManagerException;
+import org.apache.shiro.authc.UsernamePasswordToken;
+import org.apache.shiro.subject.Subject;
+import org.apache.shiro.util.ThreadContext;
+
+public class GeodeSecurityUtil {
+
+  private static Logger logger = LogService.getLogger();
+
+  public static void login(String username, String password){
+    if(!isShiroConfigured())
+      return;
+
+    Subject currentUser = SecurityUtils.getSubject();
+
+    UsernamePasswordToken token =
+        new UsernamePasswordToken(username, password);
+    try {
+      logger.info("Logging in "+username+"/"+password);
+      currentUser.login(token);
+    } catch (ShiroException e) {
+      throw new AuthenticationFailedException(e.getMessage(), e);
+    }
+  }
+
+  public static void logout(){
+    if(!isShiroConfigured())
+      return;
+
+    Subject currentUser = SecurityUtils.getSubject();
+    try {
+      logger.info("Logging out "+currentUser.getPrincipal());
+      currentUser.logout();
+    }
+    catch(ShiroException e){
+      throw new AuthenticationFailedException(e.getMessage(), e);
+    }
+    // clean out Shiro's thread local content
+    ThreadContext.remove();
+  }
+
+  public static Callable associateWith(Callable callable){
+    if(!isShiroConfigured())
+      return callable;
+
+    Subject currentUser = SecurityUtils.getSubject();
+    return currentUser.associateWith(callable);
+  }
+
+  public static void authorize(ResourceOperation resourceOperation) {
+    if(resourceOperation==null)
+      return;
+
+    authorize(resourceOperation.resource().name(),
+      resourceOperation.operation().name(),
+      null);
+  }
+
+  public static void authorizeClusterManage(){
+    authorize("CLUSTER", "MANAGE");
+  }
+
+  public static void authorizeClusterWrite(){
+    authorize("CLUSTER", "WRITE");
+  }
+
+  public static void authorizeClusterRead(){
+    authorize("CLUSTER", "READ");
+  }
+
+  public static void authorizeDataManage(){
+    authorize("DATA", "MANAGE");
+  }
+
+  public static void authorizeDataWrite(){
+    authorize("DATA", "WRITE");
+  }
+
+  public static void authorizeDataRead(){
+    authorize("DATA", "READ");
+  }
+
+  public static void authorizeRegionWrite(String regionName){
+    authorize("DATA", "WRITE", regionName);
+  }
+
+  public static void authorizeRegionRead(String regionName){
+    authorize("DATA", "READ", regionName);
+  }
+
+  public static void authorize(String resource, String operation){
+    authorize(resource, operation, null);
+  }
+
+  private static void authorize(String resource, String operation, String regionName){
+    regionName = StringUtils.stripStart(regionName, "/");
+    authorize(new ResourceOperationContext(resource, operation, regionName));
+  }
+
+  public static void authorize(OperationContext context) {
+    if(context==null)
+      return;
+
+    if(context.getResource()== Resource.NULL && context.getOperationCode()== OperationCode.NULL)
+      return;
+
+    if(!isShiroConfigured())
+      return;
+
+
+    Subject currentUser = SecurityUtils.getSubject();
+    try {
+      currentUser.checkPermission(context);
+    }
+    catch(ShiroException e){
+      logger.info(currentUser.getPrincipal() + " not authorized for " + context);
+      throw new GemFireSecurityException(e.getMessage(), e);
+    }
+  }
+
+  private static boolean isShiroConfigured(){
+    try{
+      SecurityUtils.getSecurityManager();
+    }
+    catch(UnavailableSecurityManagerException e){
+      return false;
+    }
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
new file mode 100644
index 0000000..afc3125
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.security.shiro;
+
+import static com.gemstone.gemfire.management.internal.security.ResourceConstants.*;
+
+import java.lang.reflect.Method;
+import java.security.AccessControlContext;
+import java.security.AccessController;
+import java.security.Principal;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import javax.management.remote.JMXPrincipal;
+import javax.security.auth.Subject;
+
+import com.gemstone.gemfire.cache.operations.OperationContext;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.ClassLoadUtil;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.management.internal.security.ResourceConstants;
+import com.gemstone.gemfire.security.AccessControl;
+import com.gemstone.gemfire.security.AuthenticationFailedException;
+import com.gemstone.gemfire.security.Authenticator;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.shiro.authc.AuthenticationException;
+import org.apache.shiro.authc.AuthenticationInfo;
+import org.apache.shiro.authc.AuthenticationToken;
+import org.apache.shiro.authc.SimpleAuthenticationInfo;
+import org.apache.shiro.authc.UsernamePasswordToken;
+import org.apache.shiro.authz.AuthorizationInfo;
+import org.apache.shiro.authz.Permission;
+import org.apache.shiro.realm.AuthorizingRealm;
+import org.apache.shiro.subject.PrincipalCollection;
+
+public class CustomAuthRealm extends AuthorizingRealm{
+  public static final String REALM_NAME = "CUSTOMAUTHREALM";
+
+  private static final Logger logger = LogManager.getLogger(CustomAuthRealm.class);
+  private String authzFactoryName;
+  private String postAuthzFactoryName;
+  private String authenticatorFactoryName;
+  private Properties securityProps = null;
+  private ConcurrentMap<Principal, AccessControl> cachedAuthZCallback;
+  private ConcurrentMap<Principal, AccessControl> cachedPostAuthZCallback;
+
+  public CustomAuthRealm(Properties securityProps) {
+    this.securityProps = securityProps;
+    this.authzFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME);
+    this.postAuthzFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_PP_NAME);
+    this.authenticatorFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME);
+    this.cachedAuthZCallback = new ConcurrentHashMap<>();
+    this.cachedPostAuthZCallback = new ConcurrentHashMap<>();
+  }
+
+  @Override
+  protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException {
+    UsernamePasswordToken authToken = (UsernamePasswordToken) token;
+    String username = authToken.getUsername();
+    String password = new String(authToken.getPassword());
+
+    Properties credentialProps = new Properties();
+    credentialProps.put(ResourceConstants.USER_NAME, username);
+    credentialProps.put(ResourceConstants.PASSWORD, password);
+
+    Principal principal  = getAuthenticator(securityProps).authenticate(credentialProps);
+
+    return new SimpleAuthenticationInfo(principal, authToken.getPassword(), REALM_NAME);
+  }
+
+
+  @Override
+  protected AuthorizationInfo doGetAuthorizationInfo(PrincipalCollection principals) {
+    // we intercepted the call to this method by overriding the isPermitted call
+    return null;
+  }
+
+  @Override
+  public boolean isPermitted(PrincipalCollection principals, Permission permission) {
+    OperationContext context =(OperationContext)permission;
+    Principal principal = (Principal)principals.getPrimaryPrincipal();
+    // if no access control is specified, then we allow all
+    if(StringUtils.isBlank(authzFactoryName))
+      return true;
+    AccessControl accessControl = getAccessControl(principal, false);
+    return accessControl.authorizeOperation(context.getRegionName(), context);
+  }
+
+  public AccessControl getAccessControl(Principal principal, boolean isPost) {
+    if (!isPost) {
+      if (cachedAuthZCallback.containsKey(principal)) {
+        return cachedAuthZCallback.get(principal);
+      } else if (!StringUtils.isBlank(authzFactoryName)) {
+        try {
+          Method authzMethod = ClassLoadUtil.methodFromName(authzFactoryName);
+          AccessControl authzCallback = (AccessControl) authzMethod.invoke(null, (Object[]) null);
+          authzCallback.init(principal, null);
+          cachedAuthZCallback.put(principal, authzCallback);
+          return authzCallback;
+        } catch (Exception ex) {
+          throw new AuthenticationFailedException(
+              LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
+        }
+      }
+    } else {
+      if (cachedPostAuthZCallback.containsKey(principal)) {
+        return cachedPostAuthZCallback.get(principal);
+      } else if (!StringUtils.isBlank(postAuthzFactoryName)) {
+        try {
+          Method authzMethod = ClassLoadUtil.methodFromName(postAuthzFactoryName);
+          AccessControl postAuthzCallback = (AccessControl) authzMethod.invoke(null, (Object[]) null);
+          postAuthzCallback.init(principal, null);
+          cachedPostAuthZCallback.put(principal, postAuthzCallback);
+          return postAuthzCallback;
+        } catch (Exception ex) {
+          throw new AuthenticationFailedException(
+              LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
+        }
+      }
+    }
+    return null;
+  }
+
+  private Authenticator getAuthenticator(Properties gfSecurityProperties) throws AuthenticationFailedException {
+    Authenticator auth;
+    try {
+      Method instanceGetter = ClassLoadUtil.methodFromName(this.authenticatorFactoryName);
+      auth = (Authenticator) instanceGetter.invoke(null, (Object[]) null);
+    } catch (Exception ex) {
+      throw new AuthenticationFailedException(
+          LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
+    }
+    if (auth == null) {
+      throw new AuthenticationFailedException(
+          LocalizedStrings.HandShake_AUTHENTICATOR_INSTANCE_COULD_NOT_BE_OBTAINED.toLocalizedString());
+    }
+    auth.init(gfSecurityProperties);
+    return auth;
+  }
+
+  public void postAuthorize(OperationContext context) {
+    if (StringUtils.isBlank(postAuthzFactoryName)){
+      return ;
+    }
+
+    AccessControlContext acc = AccessController.getContext();
+    Subject subject = Subject.getSubject(acc);
+    Set<JMXPrincipal> principals = subject.getPrincipals(JMXPrincipal.class);
+    if (principals == null || principals.isEmpty()) {
+      throw new SecurityException(ACCESS_DENIED_MESSAGE);
+    }
+    Principal principal = principals.iterator().next();
+    AccessControl accessControl = getAccessControl(principal, true);
+    if (!accessControl.authorizeOperation(null, context)) {
+      throw new SecurityException(ACCESS_DENIED_MESSAGE);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/JMXShiroAuthenticator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/JMXShiroAuthenticator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/JMXShiroAuthenticator.java
new file mode 100644
index 0000000..4a4cc28
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/JMXShiroAuthenticator.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.security.shiro;
+
+import static com.gemstone.gemfire.management.internal.security.ResourceConstants.*;
+
+import java.util.Collections;
+import java.util.Properties;
+import javax.management.Notification;
+import javax.management.NotificationListener;
+import javax.management.remote.JMXAuthenticator;
+import javax.management.remote.JMXConnectionNotification;
+import javax.management.remote.JMXPrincipal;
+import javax.security.auth.Subject;
+
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.management.internal.security.ResourceConstants;
+
+/**
+ * this will make JMX authentication to use Shiro for Authentication
+ */
+
+public class JMXShiroAuthenticator implements JMXAuthenticator, NotificationListener {
+
+  @Override
+  public Subject authenticate(Object credentials) {
+    String username = null, password = null;
+    if (credentials instanceof String[]) {
+      final String[] aCredentials = (String[]) credentials;
+      username = aCredentials[0];
+      password = aCredentials[1];
+    } else if (credentials instanceof Properties) {
+      username = ((Properties) credentials).getProperty(ResourceConstants.USER_NAME);
+      password = ((Properties) credentials).getProperty(ResourceConstants.PASSWORD);
+    } else {
+      throw new SecurityException(WRONGE_CREDENTIALS_MESSAGE);
+    }
+
+    GeodeSecurityUtil.login(username, password);
+
+    return new Subject(true, Collections.singleton(new JMXPrincipal(username)), Collections.EMPTY_SET,
+      Collections.EMPTY_SET);
+  }
+
+  @Override
+  public void handleNotification(Notification notification, Object handback) {
+    if (notification instanceof JMXConnectionNotification) {
+      JMXConnectionNotification cxNotification = (JMXConnectionNotification) notification;
+      String type = cxNotification.getType();
+      if (JMXConnectionNotification.CLOSED.equals(type)) {
+        GeodeSecurityUtil.logout();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementAgent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementAgent.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementAgent.java
index d6c18df..adc69c4 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementAgent.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementAgent.java
@@ -61,7 +61,7 @@ import com.gemstone.gemfire.management.internal.security.AccessControlMBean;
 import com.gemstone.gemfire.management.internal.security.MBeanServerWrapper;
 import com.gemstone.gemfire.management.internal.security.ResourceConstants;
 import com.gemstone.gemfire.management.internal.unsafe.ReadOpFileAccessController;
-import com.gemstone.gemfire.security.JMXShiroAuthenticator;
+import com.gemstone.gemfire.internal.security.shiro.JMXShiroAuthenticator;
 
 import org.apache.logging.log4j.Logger;
 import org.eclipse.jetty.server.Server;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
index 7fec9b7..dac016e 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
@@ -54,7 +54,7 @@ import com.gemstone.gemfire.management.RegionMXBean;
 import com.gemstone.gemfire.management.internal.beans.ManagementAdapter;
 import com.gemstone.gemfire.management.membership.MembershipEvent;
 import com.gemstone.gemfire.management.membership.MembershipListener;
-import com.gemstone.gemfire.security.CustomAuthRealm;
+import com.gemstone.gemfire.internal.security.shiro.CustomAuthRealm;
 import org.apache.logging.log4j.Logger;
 import org.apache.shiro.SecurityUtils;
 import org.apache.shiro.config.IniSecurityManagerFactory;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
index 61803fe..fafea9a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
@@ -73,7 +73,7 @@ import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 
 import org.springframework.shell.core.CommandMarker;
 import org.springframework.shell.core.annotation.CliAvailabilityIndicator;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
index c3b0b7f..7edc3e4 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
@@ -31,7 +31,7 @@ import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 import com.gemstone.gemfire.management.internal.cli.util.CommentSkipHelper;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 
 import org.springframework.shell.core.Parser;
 import org.springframework.shell.event.ParseResult;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
index 33b80e2..1a7191b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
@@ -17,7 +17,7 @@
 package com.gemstone.gemfire.management.internal.security;
 
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 
 /**
  * AccessControlMBean Implementation. This retrieves JMXPrincipal from AccessController

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
index 8d1031a..99cbe2e 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
@@ -46,7 +46,7 @@ import javax.management.remote.MBeanServerForwarder;
 
 import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 
 /**
  * This class intercepts all MBean requests for GemFire MBeans and passed it to

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
index 08865b4..c411972 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
@@ -50,7 +50,7 @@ import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.management.internal.web.controllers.support.LoginHandlerInterceptor;
 import com.gemstone.gemfire.management.internal.web.controllers.support.MemberMXBeanAdapter;
 import com.gemstone.gemfire.management.internal.web.util.UriUtils;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 
 import org.apache.logging.log4j.Logger;
 import org.springframework.beans.propertyeditors.StringArrayPropertyEditor;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
index 5465ea3..e6cdbee 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
@@ -27,7 +27,7 @@ import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.management.internal.security.ResourceConstants;
 import com.gemstone.gemfire.security.Authenticator;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 
 import org.apache.logging.log4j.Logger;
 import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java b/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
deleted file mode 100644
index 706a7cc..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.security;
-
-import static com.gemstone.gemfire.management.internal.security.ResourceConstants.*;
-
-import java.lang.reflect.Method;
-import java.security.AccessControlContext;
-import java.security.AccessController;
-import java.security.Principal;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import javax.management.remote.JMXPrincipal;
-import javax.security.auth.Subject;
-
-import com.gemstone.gemfire.cache.operations.OperationContext;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
-import com.gemstone.gemfire.internal.ClassLoadUtil;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.lang.StringUtils;
-import com.gemstone.gemfire.management.internal.security.ResourceConstants;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.shiro.authc.AuthenticationException;
-import org.apache.shiro.authc.AuthenticationInfo;
-import org.apache.shiro.authc.AuthenticationToken;
-import org.apache.shiro.authc.SimpleAuthenticationInfo;
-import org.apache.shiro.authc.UsernamePasswordToken;
-import org.apache.shiro.authz.AuthorizationInfo;
-import org.apache.shiro.authz.Permission;
-import org.apache.shiro.realm.AuthorizingRealm;
-import org.apache.shiro.subject.PrincipalCollection;
-
-public class CustomAuthRealm extends AuthorizingRealm{
-  public static final String REALM_NAME = "CUSTOMAUTHREALM";
-
-  private static final Logger logger = LogManager.getLogger(CustomAuthRealm.class);
-  private String authzFactoryName;
-  private String postAuthzFactoryName;
-  private String authenticatorFactoryName;
-  private Properties securityProps = null;
-  private ConcurrentMap<Principal, AccessControl> cachedAuthZCallback;
-  private ConcurrentMap<Principal, AccessControl> cachedPostAuthZCallback;
-
-  public CustomAuthRealm(Properties securityProps) {
-    this.securityProps = securityProps;
-    this.authzFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME);
-    this.postAuthzFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_PP_NAME);
-    this.authenticatorFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME);
-    this.cachedAuthZCallback = new ConcurrentHashMap<>();
-    this.cachedPostAuthZCallback = new ConcurrentHashMap<>();
-  }
-
-  @Override
-  protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException {
-    UsernamePasswordToken authToken = (UsernamePasswordToken) token;
-    String username = authToken.getUsername();
-    String password = new String(authToken.getPassword());
-
-    Properties credentialProps = new Properties();
-    credentialProps.put(ResourceConstants.USER_NAME, username);
-    credentialProps.put(ResourceConstants.PASSWORD, password);
-
-    Principal principal  = getAuthenticator(securityProps).authenticate(credentialProps);
-
-    return new SimpleAuthenticationInfo(principal, authToken.getPassword(), REALM_NAME);
-  }
-
-
-  @Override
-  protected AuthorizationInfo doGetAuthorizationInfo(PrincipalCollection principals) {
-    // we intercepted the call to this method by overriding the isPermitted call
-    return null;
-  }
-
-  @Override
-  public boolean isPermitted(PrincipalCollection principals, Permission permission) {
-    OperationContext context =(OperationContext)permission;
-    Principal principal = (Principal)principals.getPrimaryPrincipal();
-    // if no access control is specified, then we allow all
-    if(StringUtils.isBlank(authzFactoryName))
-      return true;
-    AccessControl accessControl = getAccessControl(principal, false);
-    return accessControl.authorizeOperation(context.getRegionName(), context);
-  }
-
-  public AccessControl getAccessControl(Principal principal, boolean isPost) {
-    if (!isPost) {
-      if (cachedAuthZCallback.containsKey(principal)) {
-        return cachedAuthZCallback.get(principal);
-      } else if (!StringUtils.isBlank(authzFactoryName)) {
-        try {
-          Method authzMethod = ClassLoadUtil.methodFromName(authzFactoryName);
-          AccessControl authzCallback = (AccessControl) authzMethod.invoke(null, (Object[]) null);
-          authzCallback.init(principal, null);
-          cachedAuthZCallback.put(principal, authzCallback);
-          return authzCallback;
-        } catch (Exception ex) {
-          throw new AuthenticationFailedException(
-              LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
-        }
-      }
-    } else {
-      if (cachedPostAuthZCallback.containsKey(principal)) {
-        return cachedPostAuthZCallback.get(principal);
-      } else if (!StringUtils.isBlank(postAuthzFactoryName)) {
-        try {
-          Method authzMethod = ClassLoadUtil.methodFromName(postAuthzFactoryName);
-          AccessControl postAuthzCallback = (AccessControl) authzMethod.invoke(null, (Object[]) null);
-          postAuthzCallback.init(principal, null);
-          cachedPostAuthZCallback.put(principal, postAuthzCallback);
-          return postAuthzCallback;
-        } catch (Exception ex) {
-          throw new AuthenticationFailedException(
-              LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
-        }
-      }
-    }
-    return null;
-  }
-
-  private Authenticator getAuthenticator(Properties gfSecurityProperties) throws AuthenticationFailedException {
-    Authenticator auth;
-    try {
-      Method instanceGetter = ClassLoadUtil.methodFromName(this.authenticatorFactoryName);
-      auth = (Authenticator) instanceGetter.invoke(null, (Object[]) null);
-    } catch (Exception ex) {
-      throw new AuthenticationFailedException(
-          LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
-    }
-    if (auth == null) {
-      throw new AuthenticationFailedException(
-          LocalizedStrings.HandShake_AUTHENTICATOR_INSTANCE_COULD_NOT_BE_OBTAINED.toLocalizedString());
-    }
-    auth.init(gfSecurityProperties);
-    return auth;
-  }
-
-  public void postAuthorize(OperationContext context) {
-    if (StringUtils.isBlank(postAuthzFactoryName)){
-      return ;
-    }
-
-    AccessControlContext acc = AccessController.getContext();
-    Subject subject = Subject.getSubject(acc);
-    Set<JMXPrincipal> principals = subject.getPrincipals(JMXPrincipal.class);
-    if (principals == null || principals.isEmpty()) {
-      throw new SecurityException(ACCESS_DENIED_MESSAGE);
-    }
-    Principal principal = principals.iterator().next();
-    AccessControl accessControl = getAccessControl(principal, true);
-    if (!accessControl.authorizeOperation(null, context)) {
-      throw new SecurityException(ACCESS_DENIED_MESSAGE);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java
deleted file mode 100644
index 148a963..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.security;
-
-import java.util.concurrent.Callable;
-
-import com.gemstone.gemfire.cache.operations.OperationContext;
-import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
-import com.gemstone.gemfire.cache.operations.OperationContext.Resource;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.security.ResourceOperation;
-import com.gemstone.gemfire.management.internal.security.ResourceOperationContext;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.logging.log4j.Logger;
-import org.apache.shiro.SecurityUtils;
-import org.apache.shiro.ShiroException;
-import org.apache.shiro.UnavailableSecurityManagerException;
-import org.apache.shiro.authc.UsernamePasswordToken;
-import org.apache.shiro.subject.Subject;
-import org.apache.shiro.util.ThreadContext;
-
-public class GeodeSecurityUtil {
-
-  private static Logger logger = LogService.getLogger();
-
-  public static void login(String username, String password){
-    if(!isShiroConfigured())
-      return;
-
-    Subject currentUser = SecurityUtils.getSubject();
-
-    UsernamePasswordToken token =
-        new UsernamePasswordToken(username, password);
-    try {
-      logger.info("Logging in "+username+"/"+password);
-      currentUser.login(token);
-    } catch (ShiroException e) {
-      throw new AuthenticationFailedException(e.getMessage(), e);
-    }
-  }
-
-  public static void logout(){
-    if(!isShiroConfigured())
-      return;
-
-    Subject currentUser = SecurityUtils.getSubject();
-    try {
-      logger.info("Logging out "+currentUser.getPrincipal());
-      currentUser.logout();
-    }
-    catch(ShiroException e){
-      throw new AuthenticationFailedException(e.getMessage(), e);
-    }
-    // clean out Shiro's thread local content
-    ThreadContext.remove();
-  }
-
-  public static Callable associateWith(Callable callable){
-    if(!isShiroConfigured())
-      return callable;
-
-    Subject currentUser = SecurityUtils.getSubject();
-    return currentUser.associateWith(callable);
-  }
-
-  public static void authorize(ResourceOperation resourceOperation) {
-    if(resourceOperation==null)
-      return;
-
-    authorize(resourceOperation.resource().name(),
-      resourceOperation.operation().name(),
-      null);
-  }
-
-  public static void authorizeClusterManage(){
-    authorize("CLUSTER", "MANAGE");
-  }
-
-  public static void authorizeClusterWrite(){
-    authorize("CLUSTER", "WRITE");
-  }
-
-  public static void authorizeClusterRead(){
-    authorize("CLUSTER", "READ");
-  }
-
-  public static void authorizeDataManage(){
-    authorize("DATA", "MANAGE");
-  }
-
-  public static void authorizeDataWrite(){
-    authorize("DATA", "WRITE");
-  }
-
-  public static void authorizeDataRead(){
-    authorize("DATA", "READ");
-  }
-
-  public static void authorizeRegionWrite(String regionName){
-    authorize("DATA", "WRITE", regionName);
-  }
-
-  public static void authorizeRegionRead(String regionName){
-    authorize("DATA", "READ", regionName);
-  }
-
-  public static void authorize(String resource, String operation){
-    authorize(resource, operation, null);
-  }
-
-  private static void authorize(String resource, String operation, String regionName){
-    regionName = StringUtils.stripStart(regionName, "/");
-    authorize(new ResourceOperationContext(resource, operation, regionName));
-  }
-
-  public static void authorize(OperationContext context) {
-    if(context==null)
-      return;
-
-    if(context.getResource()== Resource.NULL && context.getOperationCode()== OperationCode.NULL)
-      return;
-
-    if(!isShiroConfigured())
-      return;
-
-
-    Subject currentUser = SecurityUtils.getSubject();
-    try {
-      currentUser.checkPermission(context);
-    }
-    catch(ShiroException e){
-      logger.info(currentUser.getPrincipal() + " not authorized for " + context);
-      throw new GemFireSecurityException(e.getMessage(), e);
-    }
-  }
-
-  private static boolean isShiroConfigured(){
-    try{
-      SecurityUtils.getSecurityManager();
-    }
-    catch(UnavailableSecurityManagerException e){
-      return false;
-    }
-    return true;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java b/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
deleted file mode 100644
index c55e700..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.security;
-
-import static com.gemstone.gemfire.management.internal.security.ResourceConstants.*;
-
-import java.util.Collections;
-import java.util.Properties;
-import javax.management.Notification;
-import javax.management.NotificationListener;
-import javax.management.remote.JMXAuthenticator;
-import javax.management.remote.JMXConnectionNotification;
-import javax.management.remote.JMXPrincipal;
-import javax.security.auth.Subject;
-
-import com.gemstone.gemfire.management.internal.security.ResourceConstants;
-
-/**
- * this will make JMX authentication to use Shiro for Authentication
- */
-
-public class JMXShiroAuthenticator implements JMXAuthenticator, NotificationListener {
-
-  @Override
-  public Subject authenticate(Object credentials) {
-    String username = null, password = null;
-    if (credentials instanceof String[]) {
-      final String[] aCredentials = (String[]) credentials;
-      username = aCredentials[0];
-      password = aCredentials[1];
-    } else if (credentials instanceof Properties) {
-      username = ((Properties) credentials).getProperty(ResourceConstants.USER_NAME);
-      password = ((Properties) credentials).getProperty(ResourceConstants.PASSWORD);
-    } else {
-      throw new SecurityException(WRONGE_CREDENTIALS_MESSAGE);
-    }
-
-    GeodeSecurityUtil.login(username, password);
-
-    return new Subject(true, Collections.singleton(new JMXPrincipal(username)), Collections.EMPTY_SET,
-      Collections.EMPTY_SET);
-  }
-
-  @Override
-  public void handleNotification(Notification notification, Object handback) {
-    if (notification instanceof JMXConnectionNotification) {
-      JMXConnectionNotification cxNotification = (JMXConnectionNotification) notification;
-      String type = cxNotification.getType();
-      if (JMXConnectionNotification.CLOSED.equals(type)) {
-        GeodeSecurityUtil.logout();
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
index cc6af0e..0bf3cab 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilCustomRealmJUnitTest.java
@@ -20,7 +20,7 @@ package com.gemstone.gemfire.management.internal.security;
 import java.util.Properties;
 
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
-import com.gemstone.gemfire.security.CustomAuthRealm;
+import com.gemstone.gemfire.internal.security.shiro.CustomAuthRealm;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 import org.apache.shiro.SecurityUtils;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
index 4ad390d..fe80180 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GeodeSecurityUtilWithIniFileJUnitTest.java
@@ -21,7 +21,7 @@ import static org.assertj.core.api.Assertions.*;
 
 import com.gemstone.gemfire.cache.operations.OperationContext;
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.GeodeSecurityUtil;
+import com.gemstone.gemfire.internal.security.GeodeSecurityUtil;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 import org.apache.shiro.SecurityUtils;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f04b6695/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Server.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Server.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Server.java
index 0ae5d26..3759895 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Server.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Server.java
@@ -42,8 +42,8 @@ import com.gemstone.gemfire.management.internal.security.AccessControlMBean;
 import com.gemstone.gemfire.management.internal.security.JSONAuthorization;
 import com.gemstone.gemfire.management.internal.security.MBeanServerWrapper;
 import com.gemstone.gemfire.management.internal.security.ResourceConstants;
-import com.gemstone.gemfire.security.CustomAuthRealm;
-import com.gemstone.gemfire.security.JMXShiroAuthenticator;
+import com.gemstone.gemfire.internal.security.shiro.CustomAuthRealm;
+import com.gemstone.gemfire.internal.security.shiro.JMXShiroAuthenticator;
 import com.vmware.gemfire.tools.pulse.internal.data.PulseConstants;
 
 import org.apache.shiro.SecurityUtils;


[46/63] [abbrv] incubator-geode git commit: GEODE-17: do not initialize the JSONAuthorization with a default json

Posted by kl...@apache.org.
GEODE-17: do not initialize the JSONAuthorization with a default json


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c06a7956
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c06a7956
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c06a7956

Branch: refs/heads/feature/GEODE-1276
Commit: c06a7956619284da55c4028f289f3b2c922e437c
Parents: 72be65f
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 14:25:14 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 14:25:14 2016 -0700

----------------------------------------------------------------------
 .../management/internal/security/ResourceOperationContext.java     | 2 +-
 .../gemfire/management/internal/security/JSONAuthorization.java    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c06a7956/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
index 50f9b78..7f6f72e 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
@@ -45,7 +45,7 @@ public class ResourceOperationContext extends OperationContext {
     if (operation != null) this.operation = OperationCode.valueOf(operation);
     if (regionName !=null ) this.regionName = regionName;
 
-    setParts(this.resource.name()+":"+this.operation.name()+":"+regionName);
+    setParts(this.resource.name()+":"+this.operation.name()+":"+this.regionName);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c06a7956/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
index 9670822..7f1d2bf 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
@@ -66,7 +66,7 @@ public class JSONAuthorization implements AccessControl, Authenticator {
 
   public JSONAuthorization() throws IOException, JSONException {
     // initialize with a default json file
-    setUpWithJsonFile("shiro-ini.json");
+    //setUpWithJsonFile("shiro-ini.json");
   }
 
   public JSONAuthorization(String jsonFileName) throws IOException, JSONException {


[54/63] [abbrv] incubator-geode git commit: GEODE-11: Added conditional for null case

Posted by kl...@apache.org.
GEODE-11: Added conditional for null case


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1400eaed
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1400eaed
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1400eaed

Branch: refs/heads/feature/GEODE-1276
Commit: 1400eaed861288da69a1c0ab669e7cbedb3621c1
Parents: 2a786ee
Author: Barry Oglesby <bo...@pivotal.io>
Authored: Mon May 2 14:21:56 2016 -0700
Committer: Barry Oglesby <bo...@pivotal.io>
Committed: Mon May 2 14:21:56 2016 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1400eaed/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
index f530f8c..1158fd1 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
@@ -91,7 +91,7 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex {
   }
 
   public void setFieldAnalyzers(Map<String, Analyzer> fieldAnalyzers) {
-    this.fieldAnalyzers = Collections.unmodifiableMap(fieldAnalyzers);
+    this.fieldAnalyzers = fieldAnalyzers == null ? null : Collections.unmodifiableMap(fieldAnalyzers);
   }
 
   protected abstract void initialize();


[25/63] [abbrv] incubator-geode git commit: GEODE-710: Replaced thread.sleep with Awaitility

Posted by kl...@apache.org.
GEODE-710: Replaced thread.sleep with Awaitility

* Replaced thread.sleep with awaitility.
* Place an awaitility to wait for the isCancelled flag to be true before
checking the counter values.

This closes #134


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/20117a80
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/20117a80
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/20117a80

Branch: refs/heads/feature/GEODE-1276
Commit: 20117a80387095194b80ddf9c8dc6c2cdba8c91a
Parents: 55d8b9f
Author: nabarun <nn...@pivotal.io>
Authored: Wed Apr 20 14:07:39 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 16:01:04 2016 -0700

----------------------------------------------------------------------
 ...ScheduledThreadPoolExecutorWithKeepAliveJUnitTest.java | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/20117a80/geode-core/src/test/java/com/gemstone/gemfire/internal/ScheduledThreadPoolExecutorWithKeepAliveJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/ScheduledThreadPoolExecutorWithKeepAliveJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/ScheduledThreadPoolExecutorWithKeepAliveJUnitTest.java
index 8cddfa6..5aaa124 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/ScheduledThreadPoolExecutorWithKeepAliveJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/ScheduledThreadPoolExecutorWithKeepAliveJUnitTest.java
@@ -35,6 +35,7 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.test.junit.categories.FlakyTest;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+import com.jayway.awaitility.Awaitility;
 
 @Category(IntegrationTest.class)
 public class ScheduledThreadPoolExecutorWithKeepAliveJUnitTest {
@@ -190,9 +191,12 @@ public class ScheduledThreadPoolExecutorWithKeepAliveJUnitTest {
       }
     };
     ScheduledFuture f = ex.scheduleAtFixedRate(run, 0, 1, TimeUnit.SECONDS);
-    Thread.sleep(5000);
-    f.cancel(true);
-    assertTrue("Task was not executed repeatedly", counter.get() > 1);
+    Awaitility.await().atMost(30,TimeUnit.SECONDS).until(() -> assertEquals("Task was not executed repeatedly"
+      ,true, counter.get() > 1));
+    Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> assertEquals("The task could not be cancelled"
+      ,true, f.cancel(true)));
+    Awaitility.await().atMost(30,TimeUnit.SECONDS).until(() -> assertEquals("Task was not cancelled within 30 sec"
+      ,true,f.isCancelled()));
     int oldValue = counter.get();
     Thread.sleep(5000);
     assertEquals("Task was not cancelled", oldValue, counter.get());


[51/63] [abbrv] incubator-geode git commit: GEODE-1323: Adding SuppressWarnings for unchecked assignment

Posted by kl...@apache.org.
GEODE-1323: Adding SuppressWarnings for unchecked assignment


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/152ef59a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/152ef59a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/152ef59a

Branch: refs/heads/feature/GEODE-1276
Commit: 152ef59af96fd26f334d249e9d4b827413f1afe8
Parents: 9681329
Author: Jens Deppe <jd...@pivotal.io>
Authored: Mon May 2 09:41:27 2016 -0700
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Mon May 2 09:41:27 2016 -0700

----------------------------------------------------------------------
 .../gemfire/modules/session/junit/PerTestClassLoaderRunner.java    | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/152ef59a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
----------------------------------------------------------------------
diff --git a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java b/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
index dda5307..c56a259 100644
--- a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
+++ b/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
@@ -179,6 +179,7 @@ public class PerTestClassLoaderRunner extends NamedRunner {
     return new RunBefores(statement, befores, target);
   }
 
+  @SuppressWarnings("unchecked")
   @Override
   protected List<MethodRule> rules(Object target) {
     List<MethodRule> result = testClassFromClassLoader.getAnnotatedMethodValues(target,
@@ -190,6 +191,7 @@ public class PerTestClassLoaderRunner extends NamedRunner {
     return result;
   }
 
+  @SuppressWarnings("unchecked")
   @Override
   protected List<TestRule> getTestRules(Object target) {
     List<TestRule> result = testClassFromClassLoader.getAnnotatedMethodValues(target,


[24/63] [abbrv] incubator-geode git commit: GEODE-935: Removed sleeps in SerialWANPropagationDUnitTest

Posted by kl...@apache.org.
GEODE-935: Removed sleeps in SerialWANPropagationDUnitTest

The sleeps in the test cases were removed as the awaitility clause in
the subsequent validate region size waits for at max 30 sec.

This closes #133


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/55d8b9fc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/55d8b9fc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/55d8b9fc

Branch: refs/heads/feature/GEODE-1276
Commit: 55d8b9fc8d11cb10d49e6c9c8cf414fb590f4041
Parents: 6fb84d9
Author: nabarun <nn...@pivotal.io>
Authored: Wed Apr 20 11:14:26 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 16:00:11 2016 -0700

----------------------------------------------------------------------
 .../internal/cache/wan/serial/SerialWANPropogationDUnitTest.java | 4 ----
 1 file changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/55d8b9fc/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/serial/SerialWANPropogationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/serial/SerialWANPropogationDUnitTest.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/serial/SerialWANPropogationDUnitTest.java
index a9a6c29..65f5bca 100644
--- a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/serial/SerialWANPropogationDUnitTest.java
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/serial/SerialWANPropogationDUnitTest.java
@@ -137,8 +137,6 @@ public class SerialWANPropogationDUnitTest extends WANTestBase {
     vm2.invoke(() -> WANTestBase.createReceiver());
     vm3.invoke(() -> WANTestBase.createReceiver());
 
-    Thread.sleep(5000);
-
     vm4.invoke(() -> WANTestBase.validateRegionSize(
         getTestMethodName() + "_RR", 1000 ));
 
@@ -384,8 +382,6 @@ public class SerialWANPropogationDUnitTest extends WANTestBase {
       e.printStackTrace();
       fail();
     }
-    //sleep for some time to let all the events propagate to remote site
-    Thread.sleep(20);
     //vm4.invoke(() -> WANTestBase.verifyQueueSize( "ln", 0 ));
     vm2.invoke(() -> WANTestBase.validateRegionSize(
         getTestMethodName() + "_RR_1", 1000 ));


[40/63] [abbrv] incubator-geode git commit: GEODE-1321 Caught IllegealStateException and then ignored

Posted by kl...@apache.org.
GEODE-1321 Caught IllegealStateException and then ignored


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/bcae9065
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/bcae9065
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/bcae9065

Branch: refs/heads/feature/GEODE-1276
Commit: bcae906597b167c9bef3b14de5f1341a822f564a
Parents: a254c42
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Thu Apr 28 15:04:23 2016 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Fri Apr 29 10:20:47 2016 -0700

----------------------------------------------------------------------
 .../internal/membership/gms/fd/GMSHealthMonitor.java          | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bcae9065/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
index 2d0f039..f27e0b8 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
@@ -519,7 +519,12 @@ public class GMSHealthMonitor implements HealthMonitor, MessageHandler {
     catch (IOException e) {
       // this is expected if it is a connection-timeout or other failure
       // to connect
-    } 
+    }
+    catch (IllegalStateException e) {
+      if (!isStopping) {
+        logger.trace("Unexpected exception", e);
+      }
+    }
     finally {
       try {
         if (clientSocket != null) {


[55/63] [abbrv] incubator-geode git commit: GEODE-1329 auto-reconnect attempts cease if kicked out during boot-up of the cache

Posted by kl...@apache.org.
GEODE-1329 auto-reconnect attempts cease if kicked out during boot-up of the cache

InternalDistributedSystem.reconnect() now includes cache-creation in its retry
loop and, should the cache fail to start due to a CancelException it will shut
down and try again.

While creating a new test in ReconnectDUnitTest I found problems with the
other tests in that class that I fixed.  Notably the method
getDistributedSystemProperties() wasn't returning the correct properties
for a lot of test cases because the dsProperties variable wasn't being set
by them and they were using the getCache() method.  This was causing the
current distributed system to be destroyed and a new one created with
different properties than the test wanted.  That was causing periodic
test failures.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b893abe0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b893abe0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b893abe0

Branch: refs/heads/feature/GEODE-1276
Commit: b893abe094b2df73e51cdce1d0716fc984b1115c
Parents: 1400eae
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Mon May 2 14:33:35 2016 -0700
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Mon May 2 14:35:38 2016 -0700

----------------------------------------------------------------------
 .../gemfire/distributed/DistributedSystem.java  |   7 +-
 .../internal/InternalDistributedSystem.java     | 240 +++++++++----------
 .../internal/cache/DistributedRegion.java       |  11 +-
 .../internal/cache/GemFireCacheImpl.java        |  20 +-
 .../gemfire/cache30/ReconnectDUnitTest.java     | 235 ++++++++++++------
 5 files changed, 308 insertions(+), 205 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b893abe0/geode-core/src/main/java/com/gemstone/gemfire/distributed/DistributedSystem.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/distributed/DistributedSystem.java b/geode-core/src/main/java/com/gemstone/gemfire/distributed/DistributedSystem.java
old mode 100644
new mode 100755
index 1de675d..3a52ee0
--- a/geode-core/src/main/java/com/gemstone/gemfire/distributed/DistributedSystem.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/distributed/DistributedSystem.java
@@ -1594,12 +1594,17 @@ public abstract class DistributedSystem implements StatisticsFactory {
 
       } else {
         boolean existingSystemDisconnecting = true;
-        while (!existingSystems.isEmpty() && existingSystemDisconnecting) {
+        boolean isReconnecting = false;
+        while (!existingSystems.isEmpty() && existingSystemDisconnecting && !isReconnecting) {
           Assert.assertTrue(existingSystems.size() == 1);
 
           InternalDistributedSystem existingSystem =
               (InternalDistributedSystem) existingSystems.get(0);
           existingSystemDisconnecting = existingSystem.isDisconnecting();
+          // a reconnecting DS will block on GemFireCache.class and a ReconnectThread
+          // holds that lock and invokes this method, so we break out of the loop
+          // if we detect this condition
+          isReconnecting = existingSystem.isReconnectingDS();
           if (existingSystemDisconnecting) {
             boolean interrupted = Thread.interrupted();
             try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b893abe0/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
old mode 100644
new mode 100755
index 3ef8e80..df85417
--- a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
@@ -1445,7 +1445,9 @@ public class InternalDistributedSystem
    * the attempt has been cancelled.
    */
   public boolean isReconnectCancelled() {
-    return this.reconnectCancelled;
+    synchronized(reconnectCancelledLock) {
+      return reconnectCancelled;
+    }
   }
 
   /**
@@ -2377,10 +2379,7 @@ public class InternalDistributedSystem
    * to reconnect and that failed.
    * */
   private volatile static int reconnectAttemptCounter = 0;
-  public static int getReconnectAttemptCounter() {
-    return reconnectAttemptCounter;
-  }
-  
+
   /**
    * The time at which reconnect attempts last began
    */
@@ -2420,9 +2419,21 @@ public class InternalDistributedSystem
    * this instance of the DS is now disconnected and unusable.
    */
   public boolean isReconnecting(){
-    return attemptingToReconnect || (reconnectDS != null);
+    InternalDistributedSystem rds = this.reconnectDS;
+    if (!attemptingToReconnect) {
+      return false;
+    }
+    if (reconnectCancelled) {
+      return false;
+    }
+    boolean newDsConnected = (rds == null || !rds.isConnected());
+    if (!newDsConnected) {
+      return false;
+    }
+    return true;
   }
-  
+
+
   /**
    * Returns true if we are reconnecting the distributed system
    * and this instance was created for one of the connection
@@ -2498,6 +2509,9 @@ public class InternalDistributedSystem
    */
   public boolean tryReconnect(boolean forcedDisconnect, String reason, GemFireCacheImpl oldCache) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
+    if (this.isReconnectingDS && forcedDisconnect) {
+      return false;
+    }
     synchronized (CacheFactory.class) { // bug #51335 - deadlock with app thread trying to create a cache
       synchronized (GemFireCacheImpl.class) {
         // bug 39329: must lock reconnectLock *after* the cache
@@ -2535,7 +2549,7 @@ public class InternalDistributedSystem
    * Returns the value for the number of time reconnect has been tried.
    * Test method used by DUnit.
    * */
-  public static int getReconnectCount(){
+  public static int getReconnectAttemptCounter() {
     return reconnectAttemptCounter;
   }
   
@@ -2590,8 +2604,6 @@ public class InternalDistributedSystem
     int maxTries = oldConfig.getMaxNumReconnectTries();
 
     final boolean isDebugEnabled = logger.isDebugEnabled();
-    
-//    logger.info("reconnecting IDS@"+System.identityHashCode(this));
 
     if (Thread.currentThread().getName().equals("DisconnectThread")) {
       if (isDebugEnabled) {
@@ -2625,18 +2637,17 @@ public class InternalDistributedSystem
     }
     try {
       while (this.reconnectDS == null || !this.reconnectDS.isConnected()) {
-        synchronized(this.reconnectCancelledLock) {
-          if (this.reconnectCancelled) {
-            break;
-          }
+        if (isReconnectCancelled()) {
+          break;
         }
+
         if (!forcedDisconnect) {
           if (isDebugEnabled) {
             logger.debug("Max number of tries : {} and max time out : {}", maxTries, timeOut);
           }
           if(reconnectAttemptCounter >= maxTries){
             if (isDebugEnabled) {
-              logger.debug("Stopping the checkrequiredrole thread becuase reconnect : {} reached the max number of reconnect tries : {}", reconnectAttemptCounter, maxTries);
+              logger.debug("Stopping the checkrequiredrole thread because reconnect : {} reached the max number of reconnect tries : {}", reconnectAttemptCounter, maxTries);
             }
             throw new CacheClosedException(LocalizedStrings.InternalDistributedSystem_SOME_REQUIRED_ROLES_MISSING.toLocalizedString());
           }
@@ -2647,18 +2658,12 @@ public class InternalDistributedSystem
         }
         reconnectAttemptCounter++;
         
-        synchronized(this.reconnectCancelledLock) { 
-          if (this.reconnectCancelled) {
-            if (isDebugEnabled) {
-              logger.debug("reconnect can no longer be done because of an explicit disconnect");
-            }
-            return;
-          }
+        if (isReconnectCancelled()) {
+          return;
         }
     
         logger.info("Disconnecting old DistributedSystem to prepare for a reconnect attempt");
-//        logger.info("IDS@"+System.identityHashCode(this));
-        
+
         try {
           disconnect(true, reason, false);
         }
@@ -2667,7 +2672,6 @@ public class InternalDistributedSystem
         }
         
         try {
-  //        log.fine("waiting " + timeOut + " before reconnecting to the distributed system");
           reconnectLock.wait(timeOut);
         }
         catch (InterruptedException e) {
@@ -2675,13 +2679,9 @@ public class InternalDistributedSystem
           Thread.currentThread().interrupt();
           return;
         }
-        synchronized(this.reconnectCancelledLock) { 
-          if (this.reconnectCancelled) {
-            if (isDebugEnabled) {
-              logger.debug("reconnect can no longer be done because of an explicit disconnect");
-            }
-            return;
-          }
+
+        if (isReconnectCancelled()) {
+          return;
         }
         
     
@@ -2691,32 +2691,30 @@ public class InternalDistributedSystem
         try {
           // notify listeners of each attempt and then again after successful
           notifyReconnectListeners(this, this.reconnectDS, true);
+
           if (this.locatorDMTypeForced) {
             System.setProperty(InternalLocator.FORCE_LOCATOR_DM_TYPE, "true");
           }
-  //        log.fine("DistributedSystem@"+System.identityHashCode(this)+" reconnecting distributed system.  attempt #"+reconnectAttemptCounter);
+
           configProps.put(DistributionConfig.DS_RECONNECTING_NAME, Boolean.TRUE);
           if (quorumChecker != null) {
             configProps.put(DistributionConfig.DS_QUORUM_CHECKER_NAME, quorumChecker);
           }
+
           InternalDistributedSystem newDS = null;
-          synchronized(this.reconnectCancelledLock) { 
-            if (this.reconnectCancelled) {
-              if (isDebugEnabled) {
-                logger.debug("reconnect can no longer be done because of an explicit disconnect");
-              }
-              return;
-            }
+          if (isReconnectCancelled()) {
+            return;
           }
+
           try {
+
             newDS = (InternalDistributedSystem)connect(configProps);
-          } catch (DistributedSystemDisconnectedException e) {
-            synchronized(this.reconnectCancelledLock) {
-          	  if (this.reconnectCancelled) {
-          	    return;
-          	  } else {
-          	    throw e;
-          	  }
+
+          } catch (CancelException e) {
+            if (isReconnectCancelled()) {
+              return;
+            } else {
+              throw e;
             }
           } finally {
             if (newDS == null  &&  quorumChecker != null) {
@@ -2724,36 +2722,29 @@ public class InternalDistributedSystem
               quorumChecker.resume();
             }
           }
-          if (newDS != null) { // newDS will not be null here but findbugs requires this check
-            boolean cancelled;
-            synchronized(this.reconnectCancelledLock) { 
-              cancelled = this.reconnectCancelled;
-            }
-            if (cancelled) {
-              newDS.disconnect();
-            } else {
-              this.reconnectDS = newDS;
-              newDS.isReconnectingDS = false;
-              notifyReconnectListeners(this, this.reconnectDS, false);
-            }
+
+          if (this.reconnectCancelled) {
+            newDS.disconnect();
+            continue;
           }
+
+          this.reconnectDS = newDS;
         }
         catch (SystemConnectException e) {
-          // retry;
-          if (isDebugEnabled) {
-            logger.debug("Attempt to reconnect failed with SystemConnectException");
-          }
-          if (e.getMessage().contains("Rejecting the attempt of a member using an older version")
-              || e.getMessage().contains("15806")) { // 15806 is in the message if it's been localized to another language
+          logger.debug("Attempt to reconnect failed with SystemConnectException");
+
+          if (e.getMessage().contains("Rejecting the attempt of a member using an older version")) {
             logger.warn(LocalizedMessage.create(LocalizedStrings.InternalDistributedSystem_EXCEPTION_OCCURED_WHILE_TRYING_TO_CONNECT_THE_SYSTEM_DURING_RECONNECT), e);
             attemptingToReconnect = false;
             return;
           }
+          continue;
         }
         catch (GemFireConfigException e) {
           if (isDebugEnabled) {
             logger.debug("Attempt to reconnect failed with GemFireConfigException");
           }
+          continue;
         }
         catch (Exception ee) {
           logger.warn(LocalizedMessage.create(LocalizedStrings.InternalDistributedSystem_EXCEPTION_OCCURED_WHILE_TRYING_TO_CONNECT_THE_SYSTEM_DURING_RECONNECT), ee);
@@ -2766,9 +2757,64 @@ public class InternalDistributedSystem
           }
           reconnectAttemptCounter = savNumOfTries;
         }
+
+
+        DM newDM = this.reconnectDS.getDistributionManager();
+        if ( !inhibitCacheForSQLFire && (newDM instanceof DistributionManager) ) {
+          // sqlfire will have already replayed DDL and recovered.
+          // Admin systems don't carry a cache, but for others we can now create
+          // a cache
+          if (((DistributionManager)newDM).getDMType() != DistributionManager.ADMIN_ONLY_DM_TYPE) {
+            try {
+              CacheConfig config = new CacheConfig();
+              if (cacheXML != null) {
+                config.setCacheXMLDescription(cacheXML);
+              }
+              cache = GemFireCacheImpl.create(this.reconnectDS, config);
+
+              createAndStartCacheServers(cacheServerCreation, cache);
+
+              if (cache.getCachePerfStats().getReliableRegionsMissing() == 0){
+                reconnectAttemptCounter = 0;
+              }
+              else {
+                // this try failed. The new cache will call reconnect again
+              }
+            }
+            catch (CancelException ignor) {
+              logger.warn("Exception occured while trying to create the cache during reconnect",ignor);
+              reconnectDS.disconnect();
+              reconnectDS = null;
+            }
+            catch (Exception e) {
+              logger.warn(LocalizedMessage.create(LocalizedStrings.InternalDistributedSystem_EXCEPTION_OCCURED_WHILE_TRYING_TO_CREATE_THE_CACHE_DURING_RECONNECT), e);
+            }
+          }
+        }
+
+        if (reconnectDS != null && reconnectDS.isConnected()) {
+          // make sure the new DS and cache are stable before exiting this loop
+          try {
+            Thread.sleep(config.getMemberTimeout() * 3);
+          } catch (InterruptedException e) {
+            logger.info("Reconnect thread has been interrupted - exiting");
+            Thread.currentThread().interrupt();
+            return;
+          }
+        }
+
       } // while()
+
+      if (isReconnectCancelled()) {
+        reconnectDS.disconnect();
+      } else {
+        reconnectDS.isReconnectingDS = false;
+        notifyReconnectListeners(this, this.reconnectDS, false);
+      }
+
     } finally {
       systemAttemptingReconnect = null;
+      attemptingToReconnect = false;
       if (appendToLogFile == null) {
         System.getProperties().remove(APPEND_TO_LOG_FILE);
       } else {
@@ -2783,59 +2829,18 @@ public class InternalDistributedSystem
         mbrMgr.releaseQuorumChecker(quorumChecker);
       }
     }
-    
-    boolean cancelled;
-    synchronized(this.reconnectCancelledLock) { 
-      cancelled = this.reconnectCancelled;
-    }
-    if (cancelled) {
-      if (isDebugEnabled) {
-        logger.debug("reconnect can no longer be done because of an explicit disconnect");
-      }
+
+    if (isReconnectCancelled()) {
+      logger.debug("reconnect can no longer be done because of an explicit disconnect");
       if (reconnectDS != null) {
         reconnectDS.disconnect();
       }
       attemptingToReconnect = false;
       return;
+    } else {
+      logger.info("Reconnect completed.\nNew DistributedSystem is {}\nNew Cache is {}", reconnectDS, cache);
     }
 
-    try {
-      DM newDM = this.reconnectDS.getDistributionManager();
-      if ( !inhibitCacheForSQLFire && (newDM instanceof DistributionManager) ) {
-        // sqlfire will have already replayed DDL and recovered.
-        // Admin systems don't carry a cache, but for others we can now create
-        // a cache
-        if (((DistributionManager)newDM).getDMType() != DistributionManager.ADMIN_ONLY_DM_TYPE) {
-          try {
-            CacheConfig config = new CacheConfig();
-            if (cacheXML != null) {
-              config.setCacheXMLDescription(cacheXML);
-            }
-            cache = GemFireCacheImpl.create(this.reconnectDS, config);
-            
-            createAndStartCacheServers(cacheServerCreation, cache);
-
-            if (cache.getCachePerfStats().getReliableRegionsMissing() == 0){
-              reconnectAttemptCounter = 0;
-              logger.info("Reconnected properly");
-            }
-            else {
-              // this try failed. The new cache will call reconnect again
-            }
-          }
-          catch (CancelException ignor) {
-              //getLogWriter().warning("Exception occured while trying to create the cache during reconnect : "+ignor.toString());
-              throw ignor;
-              // this.reconnectDS.reconnect();
-          }
-          catch (Exception e) {
-            logger.warn(LocalizedMessage.create(LocalizedStrings.InternalDistributedSystem_EXCEPTION_OCCURED_WHILE_TRYING_TO_CREATE_THE_CACHE_DURING_RECONNECT), e);
-          }
-        }
-      }
-    } finally {
-      attemptingToReconnect = false;
-    }
   }
 
 
@@ -3017,11 +3022,8 @@ public class InternalDistributedSystem
     }
     synchronized(this.reconnectLock) {
       InternalDistributedSystem recon = this.reconnectDS;
-//      (new ManagerLogWriter(LogWriterImpl.FINE_LEVEL, System.out)).fine("IDS.waitUntilReconnected: reconnectCancelled = "+reconnectCancelled
-//          +"; reconnectDS="+reconnectDS);
 
-          
-      while (attemptingToReconnect && (recon == null || !recon.isConnected())) {
+      while (isReconnecting()) {
         synchronized(this.reconnectCancelledLock) {
           if (this.reconnectCancelled) {
             break;
@@ -3030,16 +3032,12 @@ public class InternalDistributedSystem
         if (time != 0) {
           this.reconnectLock.wait(sleepTime);
         }
-        if (recon == null) {
-          recon = this.reconnectDS;
-        }
         if (time == 0  ||  System.currentTimeMillis() > endTime) {
-//          (new ManagerLogWriter(LogWriterImpl.FINE_LEVEL, System.out)).fine("IDS.waitUntilReconnected timed out");
           break;
         }
       }
-//      (new ManagerLogWriter(LogWriterImpl.FINE_LEVEL, System.out)).fine("IDS.waitUntilReconnected finished & returning: attemptingToReconnect="
-//                +attemptingToReconnect+"; reconnectDS=" + recon);
+
+      recon = this.reconnectDS;
       return !attemptingToReconnect  &&  recon != null  &&  recon.isConnected();
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b893abe0/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
old mode 100644
new mode 100755
index 226d914..cc86e2c
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
@@ -944,8 +944,10 @@ public class DistributedRegion extends LocalRegion implements
   protected boolean lostReliability(final InternalDistributedMember id,
       final Set newlyMissingRoles)
   {
-    if (DistributedRegion.ignoreReconnect)
+    if (DistributedRegion.ignoreReconnect) { // test hook
       return false;
+    }
+
     boolean async = false;
     try {
       if (getMembershipAttributes().getLossAction().isReconnect()) {
@@ -998,12 +1000,11 @@ public class DistributedRegion extends LocalRegion implements
           public void run()
           {
             try {
-              // TODO: may need to check isReconnecting and checkReadiness...
-              if (logger.isDebugEnabled()) {
-                logger.debug("Reliability loss with policy of reconnect and membership thread doing reconnect");
-              }
+              logger.debug("Reliability loss with policy of reconnect and membership thread doing reconnect");
+
               initializationLatchAfterMemberTimeout.await();
               getSystem().tryReconnect(false, "Role Loss", getCache());
+
               synchronized (missingRequiredRoles) {
                 // any number of threads may be waiting on missingRequiredRoles
                 missingRequiredRoles.notifyAll();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b893abe0/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index c477466..96b7bbc 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -337,6 +337,7 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
 
   private final ConcurrentMap pathToRegion = new ConcurrentHashMap();
 
+  protected volatile boolean isInitialized = false;
   protected volatile boolean isClosing = false;
   protected volatile boolean closingGatewaySendersByShutdownAll = false;
   protected volatile boolean closingGatewayReceiversByShutdownAll = false;
@@ -1187,6 +1188,7 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         DEFAULT_CLIENT_FUNCTION_TIMEOUT);
     clientFunctionTimeout = time >= 0 ? time : DEFAULT_CLIENT_FUNCTION_TIMEOUT;
 
+    isInitialized = true;
   }
 
   /**
@@ -2344,7 +2346,15 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
 
   // see Cache.waitUntilReconnected(long, TimeUnit)
   public boolean waitUntilReconnected(long time, TimeUnit units) throws InterruptedException {
-    return this.system.waitUntilReconnected(time,  units);
+    boolean systemReconnected = this.system.waitUntilReconnected(time,  units);
+    if (!systemReconnected) {
+      return false;
+    }
+    GemFireCacheImpl cache = getInstance();
+    if (cache == null || !cache.isInitialized()) {
+      return false;
+    }
+    return true;
   }
   
   // see Cache.stopReconnecting()
@@ -2354,8 +2364,8 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
   
   // see Cache.getReconnectedCache()
   public Cache getReconnectedCache() {
-    Cache c = GemFireCacheImpl.getInstance();
-    if (c == this) {
+    GemFireCacheImpl c = GemFireCacheImpl.getInstance();
+    if (c == this || !c.isInitialized()) {
       c = null;
     }
     return c;
@@ -3502,6 +3512,10 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
 
+  public boolean isInitialized() {
+    return this.isInitialized;
+  }
+
   public boolean isClosed() {
     return this.isClosing;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b893abe0/geode-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
old mode 100644
new mode 100755
index a4ba33d..fdbc96c
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
@@ -16,31 +16,10 @@
  */
 package com.gemstone.gemfire.cache30;
 
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.Iterator;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
 import com.gemstone.gemfire.CancelException;
 import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.LossAction;
-import com.gemstone.gemfire.cache.MembershipAttributes;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionAttributes;
-import com.gemstone.gemfire.cache.RegionDestroyedException;
-import com.gemstone.gemfire.cache.RegionExistsException;
-import com.gemstone.gemfire.cache.ResumptionAction;
-import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.Locator;
@@ -53,21 +32,18 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManage
 import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershipManager;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
-import com.gemstone.gemfire.test.dunit.Assert;
-import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
-import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.IgnoredException;
-import com.gemstone.gemfire.test.dunit.Invoke;
-import com.gemstone.gemfire.test.dunit.LogWriterUtils;
-import com.gemstone.gemfire.test.dunit.SerializableCallable;
-import com.gemstone.gemfire.test.dunit.SerializableRunnable;
-import com.gemstone.gemfire.test.dunit.ThreadUtils;
-import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.Wait;
-import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.*;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Iterator;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 @SuppressWarnings("serial")
 public class ReconnectDUnitTest extends CacheTestCase
@@ -75,19 +51,21 @@ public class ReconnectDUnitTest extends CacheTestCase
   static int locatorPort;
   static Locator locator;
   static DistributedSystem savedSystem;
+  static GemFireCacheImpl savedCache;
   static int locatorVMNumber = 3;
   static Thread gfshThread;
   
-  Properties dsProperties;
-  
+  static Properties dsProperties;
+  static String fileSeparator = File.separator;
+
   public ReconnectDUnitTest(String name) {
     super(name);
   }
   
   @Override
   public final void postSetUp() throws Exception {
-    this.locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final int locPort = this.locatorPort;
+    locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    final int locPort = locatorPort;
     Host.getHost(0).getVM(locatorVMNumber)
       .invoke(new SerializableRunnable("start locator") {
       public void run() {
@@ -96,6 +74,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           if (ds != null) {
             ds.disconnect();
           }
+          dsProperties = null;
           locatorPort = locPort;
           Properties props = getDistributedSystemProperties();
           locator = Locator.startLocatorAndDS(locatorPort, new File(""), props);
@@ -107,6 +86,16 @@ public class ReconnectDUnitTest extends CacheTestCase
       }
     });
 
+    SerializableRunnable setDistributedSystemProperties = new SerializableRunnable("set distributed system properties") {
+      public void run() {
+        dsProperties = null;
+        locatorPort = locPort;
+        getDistributedSystemProperties();
+      }
+    };
+    setDistributedSystemProperties.run();
+    Invoke.invokeInEveryVM(setDistributedSystemProperties);
+
     beginCacheXml();
     createRegion("myRegion", createAtts());
     finishCacheXml("MyDisconnect");
@@ -119,11 +108,12 @@ public class ReconnectDUnitTest extends CacheTestCase
   @Override
   public Properties getDistributedSystemProperties() {
     if (dsProperties == null) {
-      dsProperties = super.getDistributedSystemProperties();
+      dsProperties = new Properties();
       dsProperties.put(DistributionConfig.MAX_WAIT_TIME_FOR_RECONNECT_NAME, "20000");
       dsProperties.put(DistributionConfig.ENABLE_NETWORK_PARTITION_DETECTION_NAME, "true");
       dsProperties.put(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME, "false");
-      dsProperties.put(DistributionConfig.LOCATORS_NAME, "localHost["+this.locatorPort+"]");
+      dsProperties.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
+      dsProperties.put(DistributionConfig.LOCATORS_NAME, "localHost["+locatorPort+"]");
       dsProperties.put(DistributionConfig.MCAST_PORT_NAME, "0");
       dsProperties.put(DistributionConfig.MEMBER_TIMEOUT_NAME, "1000");
       dsProperties.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
@@ -133,23 +123,25 @@ public class ReconnectDUnitTest extends CacheTestCase
   
   @Override
   public final void postTearDownCacheTestCase() throws Exception {
-    try {
-      Host.getHost(0).getVM(locatorVMNumber).invoke(new SerializableRunnable("stop locator") {
-        public void run() {
-          if (locator != null) {
-            LogWriterUtils.getLogWriter().info("stopping locator " + locator);
-            locator.stop();
-          }
+    System.out.println("entering postTearDownCacheTestCase");
+    SerializableRunnable disconnect = new SerializableRunnable("disconnect and clean up") {
+      public void run() {
+        if (savedSystem != null && savedSystem.isReconnecting()) {
+          savedSystem.stopReconnecting();
         }
-      });
-    } finally {
-      Invoke.invokeInEveryVM(new SerializableRunnable() {
-        public void run() {
-          ReconnectDUnitTest.savedSystem = null;
+        savedSystem = null;
+        savedCache = null;
+        dsProperties = null;
+        locator = null;
+        locatorPort = 0;
+        InternalDistributedSystem ds = InternalDistributedSystem.getAnyInstance();
+        if (ds != null) {
+          ds.disconnect();
         }
-      });
-      disconnectAllFromDS();
-    }
+      }
+    };
+    Invoke.invokeInEveryVM(disconnect);
+    disconnect.run();
   }
 
   /**
@@ -199,9 +191,9 @@ public class ReconnectDUnitTest extends CacheTestCase
         //      DebuggerSupport.waitForJavaDebugger(getLogWriter(), " about to create region");
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
-        props.put("cache-xml-file", xmlFileLoc+"/MyDisconnect-cache.xml");
+        props.put("cache-xml-file", xmlFileLoc+ fileSeparator +"MyDisconnect-cache.xml");
         props.put("max-num-reconnect-tries", "2");
-        props.put("log-file", "autoReconnectVM"+VM.getCurrentVMNum()+"_"+getPID()+".log");
+//        props.put("log-file", "autoReconnectVM"+VM.getCurrentVMNum()+"_"+getPID()+".log");
         Cache cache = new CacheFactory(props).create();
         IgnoredException.addIgnoredException("com.gemstone.gemfire.ForcedDisconnectException||Possible loss of quorum");
         Region myRegion = cache.getRegion("root/myRegion");
@@ -249,7 +241,7 @@ public class ReconnectDUnitTest extends CacheTestCase
   /** bug #51335 - customer is also trying to recreate the cache */
   // this test is disabled due to a high failure rate during CI test runs.
   // see bug #52160
-  public void disabledtestReconnectCollidesWithApplication() throws Exception  {
+  public void testReconnectCollidesWithApplication() throws Exception  {
     doTestReconnectOnForcedDisconnect(true);
   }
   
@@ -278,7 +270,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         //      DebuggerSupport.waitForJavaDebugger(getLogWriter(), " about to create region");
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
-        props.put("cache-xml-file", xmlFileLoc+"/MyDisconnect-cache.xml");
+        props.put("cache-xml-file", xmlFileLoc+ fileSeparator +"MyDisconnect-cache.xml");
         props.put("max-wait-time-reconnect", "1000");
         props.put("max-num-reconnect-tries", "2");
 //        props.put("log-file", "autoReconnectVM"+VM.getCurrentVMNum()+"_"+getPID()+".log");
@@ -298,7 +290,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         //            DebuggerSupport.waitForJavaDebugger(getLogWriter(), " about to create region");
         locatorPort = locPort;
         final Properties props = getDistributedSystemProperties();
-        props.put("cache-xml-file", xmlFileLoc+"/MyDisconnect-cache.xml");
+        props.put("cache-xml-file", xmlFileLoc+ fileSeparator +"MyDisconnect-cache.xml");
         props.put("max-wait-time-reconnect", "5000");
         props.put("max-num-reconnect-tries", "2");
         props.put("start-locator", "localhost["+secondLocPort+"]");
@@ -336,7 +328,10 @@ public class ReconnectDUnitTest extends CacheTestCase
 
     vm0.invoke(create1);
     DistributedMember dm = (DistributedMember)vm1.invoke(create2);
+
+    IgnoredException.addIgnoredException("ForcedDisconnectException");
     forceDisconnect(vm1);
+
     DistributedMember newdm = (DistributedMember)vm1.invoke(new SerializableCallable("wait for reconnect(1)") {
       public Object call() {
         final DistributedSystem ds = ReconnectDUnitTest.savedSystem;
@@ -393,6 +388,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         DistributedSystem newDs = InternalDistributedSystem.getAnyInstance();
         if (newDs != null) {
           LogWriterUtils.getLogWriter().warning("expected distributed system to be disconnected: " + newDs);
+          newDs.disconnect();
           return false;
         }
         return true;
@@ -499,7 +495,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         //      DebuggerSupport.waitForJavaDebugger(getLogWriter(), " about to create region");
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
-        props.put("cache-xml-file", xmlFileLoc+"/MyDisconnect-cache.xml");
+        props.put("cache-xml-file", xmlFileLoc+ fileSeparator +"MyDisconnect-cache.xml");
         props.put("max-wait-time-reconnect", "1000");
         props.put("max-num-reconnect-tries", "2");
         ReconnectDUnitTest.savedSystem = getSystem(props);
@@ -664,17 +660,17 @@ public class ReconnectDUnitTest extends CacheTestCase
 
     SerializableRunnable roleLoss = new CacheSerializableRunnable(
         "ROLERECONNECTTESTS") {
-      public void run2() throws CacheException, RuntimeException
+      public void run2() throws RuntimeException
       {
         LogWriterUtils.getLogWriter().info("####### STARTING THE REAL TEST ##########");
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
-        props.put("cache-xml-file", xmlFileLoc+File.separator+"RoleReconnect-cache.xml");
+        props.put("cache-xml-file", xmlFileLoc+ fileSeparator +"RoleReconnect-cache.xml");
         props.put("max-wait-time-reconnect", "200");
         final int timeReconnect = 3;
         props.put("max-num-reconnect-tries", "3");
         props.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
-        props.put("log-file", "roleLossVM0.log");
+//        props.put("log-file", "roleLossVM0.log");
 
         getSystem(props);
 
@@ -693,7 +689,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           basicGetSystem().getLogWriter().info("<ExpectedException action=remove>"
               + "CacheClosedException" + "</ExpectedException");
         }
-        LogWriterUtils.getLogWriter().fine("roleLoss Sleeping SO call dumprun.sh");
+
         WaitCriterion ev = new WaitCriterion() {
           public boolean done() {
             return reconnectTries >= timeReconnect;
@@ -821,10 +817,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           return true;
         }
         Object res = vm0.invoke(() -> ReconnectDUnitTest.reconnectTries());
-        if (((Integer)res).intValue() != 0) {
-          return true;
-        }
-        return false;
+        return ((Integer) res).intValue() != 0;
       }
       public String description() {
         return "waiting for event";
@@ -883,7 +876,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           LogWriterUtils.getLogWriter().info(startupMessage);
           WaitCriterion ev = new WaitCriterion() {
             public boolean done() {
-              return ((Boolean)otherVM.invoke(() -> ReconnectDUnitTest.isInitialRolePlayerStarted())).booleanValue();
+              return otherVM.invoke(() -> ReconnectDUnitTest.isInitialRolePlayerStarted()).booleanValue();
             }
             public String description() {
               return null;
@@ -930,7 +923,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           ev = new WaitCriterion() {
             String excuse;
             public boolean done() {
-              if (InternalDistributedSystem.getReconnectCount() != 0) {
+              if (InternalDistributedSystem.getReconnectAttemptCounter() != 0) {
                 excuse = "reconnectCount is " + reconnectTries
                     + " waiting for it to be zero";
                 return false;
@@ -1023,6 +1016,67 @@ public class ReconnectDUnitTest extends CacheTestCase
     }; // roleloss runnable
   }
 
+  /**
+   * auto-reconnect was found to stop attempting to reconnect and rebuild
+   * the cache if another forced-disconnect was triggered after reconnect
+   * but before cache creation was completed.  This test uses a region
+   * listener to crash the reconnecting distributed system during cache
+   * creation and asserts that it then reconnects and rebuilds the cache.
+   */
+  public void testReconnectFailsInCacheCreation() throws Exception {
+
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    final int locPort = locatorPort;
+
+    final String xmlFileLoc = (new File(".")).getAbsolutePath();
+
+    SerializableRunnable createCache = new SerializableRunnable(
+            "Create Cache and Regions") {
+      public void run()  {
+        locatorPort = locPort;
+        final Properties props = getDistributedSystemProperties();
+        props.put("max-wait-time-reconnect", "1000");
+//        props.put("log-file", "");
+        dsProperties = props;
+        ReconnectDUnitTest.savedSystem = getSystem(props);
+        ReconnectDUnitTest.savedCache = (GemFireCacheImpl)getCache();
+        Region myRegion = createRegion("myRegion", createAtts());
+        myRegion.put("MyKey", "MyValue");
+        myRegion.getAttributesMutator().addCacheListener(new CacheKillingListener());
+      }
+    };
+
+    vm0.invoke(createCache);  // vm0 keeps the locator from losing quorum when vm1 crashes
+
+    vm1.invoke(createCache);
+    IgnoredException.addIgnoredException("DistributedSystemDisconnectedException|ForcedDisconnectException", vm1);
+    forceDisconnect(vm1);
+
+    vm1.invoke(new SerializableRunnable("wait for reconnect") {
+      public void run() {
+        final GemFireCacheImpl cache = ReconnectDUnitTest.savedCache;
+        Wait.waitForCriterion(new WaitCriterion() {
+          public boolean done() {
+            return cache.isReconnecting();
+          }
+          public String description() {
+            return "waiting for cache to begin reconnecting";
+          }
+        }, 30000, 100, true);
+        System.out.println("entering reconnect wait for " + cache);
+        try {
+          cache.waitUntilReconnected(20, TimeUnit.SECONDS);
+        } catch (InterruptedException e) {
+          fail("interrupted");
+        }
+        assertNotNull(cache.getReconnectedCache());
+      }
+    });
+  }
+
   private CacheSerializableRunnable getRoleAPlayerRunnable(
       final int locPort, final String regionName, final String myKey, final String myValue,
       final String startupMessage) {
@@ -1099,7 +1153,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         LogWriterUtils.getLogWriter().info("STARTED THE REQUIREDROLES CACHE");
         initialRolePlayerStarted = true;
 
-        while(!((Boolean)otherVM.invoke(() -> ReconnectDUnitTest.isInitialized())).booleanValue()){
+        while(!otherVM.invoke(() -> ReconnectDUnitTest.isInitialized()).booleanValue()){
           try{
             Thread.sleep(15);
           }catch(InterruptedException ignor){
@@ -1169,4 +1223,35 @@ public class ReconnectDUnitTest extends CacheTestCase
     return 0;
   }
 
+  /**
+   * CacheKillingListener crashes the distributed system when it is invoked
+   * for the first time.  After that it ignores any notifications.
+   */
+  public static class CacheKillingListener extends CacheListenerAdapter implements Declarable {
+    public static int crashCount = 0;
+
+    @Override
+    public void afterRegionCreate(final RegionEvent event) {
+      if (crashCount == 0) {
+        crashCount += 1;
+        // we crash the system in a different thread than the ReconnectThread
+        // to simulate receiving a ForcedDisconnect from the membership manager
+        // in the UDP reader thread
+        Thread t = new Thread("crash reconnecting system (ReconnectDUnitTest)") {
+          public void run() {
+            System.out.println("crashing distributed system");
+            GemFireCacheImpl cache = (GemFireCacheImpl)event.getRegion().getCache();
+            MembershipManagerHelper.crashDistributedSystem(cache.getDistributedSystem());
+          }
+        };
+        t.setDaemon(true);
+        t.start();
+      }
+    }
+
+    @Override
+    public void init(Properties props) {
+    }
+
+  }
 }



[13/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java
deleted file mode 100644
index 82e2bf9..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.internal.DataSerializableFixedID;
-import com.gemstone.gemfire.internal.cache.CachedDeserializable;
-import com.gemstone.gemfire.internal.cache.CachedDeserializableFactory;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.lru.Sizeable;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-import com.gemstone.gemfire.internal.Version;
-
-/**
- * Event that is persisted in HDFS. As we need to persist some of the EntryEventImpl
- * variables, we have created this class and have overridden toData and fromData functions.  
- * 
- *  There are subclasses of this class of the different types of persisted events
- *  sorted vs. unsorted, and the persisted events we keep in the region
- *  queue, which need to hold the region key.
- *   
- *
- */
-public abstract class PersistedEventImpl {
-  protected Operation op = Operation.UPDATE;
-  
-  protected Object valueObject;
-
-  /**
-   * A field with flags decribing the event
-   */
-  protected byte flags;
-
-   //FLags indicating the type of value
-   //if the value is not a byte array or object, is is an internal delta.
-  private static final byte VALUE_IS_BYTE_ARRAY= 0x01;
-  private static final byte VALUE_IS_OBJECT= (VALUE_IS_BYTE_ARRAY << 1);
-  private static final byte POSSIBLE_DUPLICATE = (VALUE_IS_OBJECT << 1);
-  private static final byte HAS_VERSION_TAG = (POSSIBLE_DUPLICATE << 1);
-  
-
-  /** for deserialization */
-  public PersistedEventImpl() {
-  }
-  
-  public PersistedEventImpl(Object value, Operation op, byte valueIsObject,
-      boolean isPossibleDuplicate, boolean hasVersionTag) throws IOException,
-      ClassNotFoundException {
-    this.op = op;
-    this.valueObject = value;
-    setFlag(VALUE_IS_BYTE_ARRAY, valueIsObject == 0x00);
-    setFlag(VALUE_IS_OBJECT, valueIsObject == 0x01);
-    setFlag(POSSIBLE_DUPLICATE, isPossibleDuplicate);
-    setFlag(HAS_VERSION_TAG, hasVersionTag);
-  }
-  
-  private void setFlag(byte flag, boolean set) {
-    flags = (byte) (set ?  flags | flag :  flags & ~flag);
-  }
-  
-  private boolean getFlag(byte flag) {
-    return (flags & flag) != 0x0;
-  }
-
-  public void toData(DataOutput out) throws IOException {
-    out.writeByte(this.op.ordinal);
-    out.writeByte(this.flags);
-    
-    if (getFlag(VALUE_IS_BYTE_ARRAY)) { 
-      DataSerializer.writeByteArray((byte[])this.valueObject, out);
-    } else if (getFlag(VALUE_IS_OBJECT)) {
-      if(valueObject instanceof CachedDeserializable) {
-        CachedDeserializable cd = (CachedDeserializable)valueObject;
-        DataSerializer.writeObjectAsByteArray(cd.getValue(), out);
-      } else {
-        DataSerializer.writeObjectAsByteArray(valueObject, out);
-      }
-    }
-    else {
-      DataSerializer.writeObject(valueObject, out);
-    }
-  }
-
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    this.op = Operation.fromOrdinal(in.readByte());
-    this.flags = in.readByte();
-    
-    if (getFlag(VALUE_IS_BYTE_ARRAY)) { 
-      this.valueObject = DataSerializer.readByteArray(in);
-    } else if (getFlag(VALUE_IS_OBJECT)) {
-      byte[] newValueBytes = DataSerializer.readByteArray(in);
-      if(newValueBytes == null) {
-        this.valueObject = null;
-      } else {
-        if(CachedDeserializableFactory.preferObject()) {
-          this.valueObject =  EntryEventImpl.deserialize(newValueBytes);
-        } else {
-          this.valueObject = CachedDeserializableFactory.create(newValueBytes);
-        }
-      }
-    }
-    else {
-      this.valueObject = DataSerializer.readObject(in);
-    }
-    
-  }
-  
-  /**
-   * Return the timestamp of this event. Depending on the subclass,
-   * this may be part of the version tag, or a separate field.
-   */
-  public abstract long getTimstamp();
-
-  protected boolean hasVersionTag() {
-    return getFlag(HAS_VERSION_TAG);
-  }
-
-  public Operation getOperation()
-  {
-    return this.op;
-  }
-  
-  public Object getValue() {
-    return this.valueObject;
-  }
-  
-  public boolean isPossibleDuplicate()
-  {
-    return getFlag(POSSIBLE_DUPLICATE);
-  }
-
-  /**
-   * returns deserialized value. 
-   * 
-   */
-  public Object getDeserializedValue() throws IOException, ClassNotFoundException {
-    Object retVal = null;
-    if (getFlag(VALUE_IS_BYTE_ARRAY)) { 
-      // value is a byte array
-      retVal = this.valueObject;
-    } else if (getFlag(VALUE_IS_OBJECT)) {
-      if(valueObject instanceof CachedDeserializable) {
-        retVal = ((CachedDeserializable)valueObject).getDeserializedForReading();
-      } else {
-        retVal = valueObject;
-      }
-    }
-    else {
-      // value is a object
-      retVal = this.valueObject;
-    }
-    return retVal;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder str = new StringBuilder(PersistedEventImpl.class.getSimpleName());
-    str.append("@").append(System.identityHashCode(this))
-    .append(" op:").append(op)
-    .append(" valueObject:").append(valueObject)
-    .append(" isPossibleDuplicate:").append(getFlag(POSSIBLE_DUPLICATE));
-    return str.toString();
-  }
-
-  public void copy(PersistedEventImpl usersValue) {
-    this.op = usersValue.op;
-    this.valueObject = usersValue.valueObject;
-    this.flags = usersValue.flags;
-  }
-  
-  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
-    int size = 0;
-    
-    // value length
-    size += valueSize; 
-
-    // one byte for op and one byte for flag
-    size += 2;
-    
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java
deleted file mode 100644
index bd7994c..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataOutput;
-import java.io.IOException;
-
-public interface QueuedPersistentEvent {
-  
-  public byte[] getRawKey();
-  
-  public void toHoplogEventBytes(DataOutput out) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java
deleted file mode 100644
index b97bdb7..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Tracks flushes using a queue of latches.
- * 
- */
-public class SignalledFlushObserver implements FlushObserver {
-  private static class FlushLatch extends CountDownLatch {
-    private final long seqnum;
-    
-    public FlushLatch(long seqnum) {
-      super(1);
-      this.seqnum = seqnum;
-    }
-    
-    public long getSequence() {
-      return seqnum;
-    }
-  }
-  
-  // assume the number of outstanding flush requests is small so we don't
-  // need to organize by seqnum
-  private final List<FlushLatch> signals;
-  
-  private final AtomicLong eventsReceived;
-  private final AtomicLong eventsDelivered;
-  
-  public SignalledFlushObserver() {
-    signals = new ArrayList<FlushLatch>();
-    eventsReceived = new AtomicLong(0);
-    eventsDelivered = new AtomicLong(0);
-  }
-  
-  @Override
-  public boolean shouldDrainImmediately() {
-    synchronized (signals) {
-      return !signals.isEmpty();
-    }
-  }
-  
-  @Override
-  public AsyncFlushResult flush() {
-    final long seqnum = eventsReceived.get();
-    synchronized (signals) {
-      final FlushLatch flush;
-      if (seqnum <= eventsDelivered.get()) {
-        flush = null;
-      } else {
-        flush = new FlushLatch(seqnum);
-        signals.add(flush);
-      }
-      
-      return new AsyncFlushResult() {
-        @Override
-        public boolean waitForFlush(long timeout, TimeUnit unit) throws InterruptedException {
-          return flush == null ? true : flush.await(timeout, unit);
-        }
-      };
-    }
-  }
-
-  /**
-   * Invoked when an event is received.
-   */
-  public void push() {
-    eventsReceived.incrementAndGet();
-  }
-
-  /**
-   * Invoked when a batch has been dispatched.
-   */
-  public void pop(int count) {
-    long highmark = eventsDelivered.addAndGet(count);
-    synchronized (signals) {
-      for (ListIterator<FlushLatch> iter = signals.listIterator(); iter.hasNext(); ) {
-        FlushLatch flush = iter.next();
-        if (flush.getSequence() <= highmark) {
-          flush.countDown();
-          iter.remove();
-        }
-      }
-    }
-  }
-  
-  /**
-   * Invoked when the queue is cleared.
-   */
-  public void clear() {
-    synchronized (signals) {
-      for (FlushLatch flush : signals) {
-        flush.countDown();
-      }
-
-      signals.clear();
-      eventsReceived.set(0);
-      eventsDelivered.set(0);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java
deleted file mode 100644
index c725ce5..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.gemfire.internal.DataSerializableFixedID;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-import com.gemstone.gemfire.internal.Version;
-
-/**
- * A persistent event that is stored in the hoplog queue. This class is only used
- * temporarily to copy the data from the HDFSGatewayEventImpl to the persisted
- * record in the file.
- * 
- *
- */
-public class SortedHDFSQueuePersistedEvent extends SortedHoplogPersistedEvent implements QueuedPersistentEvent {
-  
-  
-  /**key stored in serialized form*/
-  protected byte[] keyBytes = null;
-  
-  public SortedHDFSQueuePersistedEvent(HDFSGatewayEventImpl in) throws IOException,
-  ClassNotFoundException {
-    this(in.getSerializedValue(), in.getOperation(), in.getValueIsObject(), in
-        .getPossibleDuplicate(), in.getVersionTag(), in.getSerializedKey(), in
-        .getCreationTime());
-  }
-
-  public SortedHDFSQueuePersistedEvent(Object valueObject, Operation operation,
-      byte valueIsObject, boolean possibleDuplicate, VersionTag versionTag,
-      byte[] serializedKey, long timestamp) throws ClassNotFoundException, IOException {
-    super(valueObject, operation, valueIsObject, possibleDuplicate, versionTag, timestamp);
-    this.keyBytes = serializedKey;
-    // TODO Auto-generated constructor stub
-  }
-
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    super.toData(out);
-    DataSerializer.writeByteArray(this.keyBytes, out);
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    super.fromData(in);
-    this.keyBytes = DataSerializer.readByteArray(in);
-  }
-
-  @Override
-  public void toHoplogEventBytes(DataOutput out) throws IOException {
-    super.toData(out);
-  }
-
-  public byte[] getRawKey() {
-    return this.keyBytes;
-  }
-  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
-    
-    int size = SortedHoplogPersistedEvent.getSizeInBytes(keySize, valueSize, versionTag);
-    
-    size += keySize;
-    
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java
deleted file mode 100644
index e8be7b8..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.internal.ByteArrayDataInput;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-
-/**
- * A persistent event that is stored in a sorted hoplog. In addition
- * to the fields of PersistentEventImpl, this event has a version tag.
- * 
- * This class should only be serialized by directly calling toData,
- * which is why it does not implement DataSerializable
- * 
- */
-public class SortedHoplogPersistedEvent extends PersistedEventImpl {
-  /** version tag for concurrency checks */
-  protected VersionTag versionTag;
-
-  /** timestamp of the event. Used when version checks are disabled*/
-  protected long timestamp;
-
-  public SortedHoplogPersistedEvent(Object valueObject, Operation operation,
-      byte valueIsObject, boolean possibleDuplicate, VersionTag tag, long timestamp) throws ClassNotFoundException, IOException {
-    super(valueObject, operation, valueIsObject, possibleDuplicate, tag != null);
-    this.versionTag = tag;
-    this.timestamp = timestamp;
-  }
-
-  public SortedHoplogPersistedEvent() {
-    //for deserialization
-  }
-
-  @Override
-  public long getTimstamp() {
-    return versionTag == null ? timestamp : versionTag.getVersionTimeStamp();
-  }
-  
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    super.toData(out);
-    if (versionTag == null) {
-      out.writeLong(timestamp);
-    } else {
-      //TODO optimize these
-      DataSerializer.writeObject(this.versionTag, out);
-    }
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    super.fromData(in);
-    if (hasVersionTag()) {
-      this.versionTag = (VersionTag)DataSerializer.readObject(in);
-    } else {
-      this.timestamp = in.readLong();
-    }
-  }
-  
-  /**
-   * @return the concurrency versioning tag for this event, if any
-   */
-  public VersionTag getVersionTag() {
-    return this.versionTag;
-  }
-  
-  public static SortedHoplogPersistedEvent fromBytes(byte[] val)
-      throws IOException, ClassNotFoundException {
-    ByteArrayDataInput in = new ByteArrayDataInput();
-    in.initialize(val, null);
-    SortedHoplogPersistedEvent event = new SortedHoplogPersistedEvent();
-    event.fromData(in);
-    return event;
-  }
-  
-  public void copy(PersistedEventImpl usersValue) {
-    super.copy(usersValue);
-    this.versionTag = ((SortedHoplogPersistedEvent) usersValue).versionTag;
-    this.timestamp = ((SortedHoplogPersistedEvent) usersValue).timestamp;
-  }
-  
-  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
-    int size = PersistedEventImpl.getSizeInBytes(keySize, valueSize, versionTag);
-    
-    if (versionTag != null) {
-      size +=  versionTag.getSizeInBytes();
-    } else {
-      // size of Timestamp
-      size += 8;
-    }
-    
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java
deleted file mode 100644
index 93d596b..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-
-
-/**
- * A persistent event that is stored in the hoplog queue. This class is only used
- * temporarily to copy the data from the HDFSGatewayEventImpl to the persisted
- * record in the file. 
- * 
- *
- */
-public class UnsortedHDFSQueuePersistedEvent extends UnsortedHoplogPersistedEvent implements QueuedPersistentEvent {
-  
-  /**the bytes of the key for this entry */
-  protected byte[] keyBytes = null;
-  
-  public UnsortedHDFSQueuePersistedEvent(HDFSGatewayEventImpl in) throws IOException,
-  ClassNotFoundException {
-    super(in.getValue(), in.getOperation(), in.getValueIsObject(), in.getPossibleDuplicate(), 
-        in.getVersionTimeStamp() == 0 ? in.getCreationTime() : in.getVersionTimeStamp());
-    this.keyBytes = in.getSerializedKey();
-    
-  }
-
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    super.toData(out);
-    DataSerializer.writeByteArray(this.keyBytes, out);
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    super.fromData(in);
-    this.keyBytes = DataSerializer.readByteArray(in);
-  }
-  
-  @Override
-  public void toHoplogEventBytes(DataOutput out) throws IOException {
-    super.toData(out);
-  }
-  
-  public byte[] getRawKey() {
-    return this.keyBytes;
-  }
-  
-  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
-    
-    int size = UnsortedHoplogPersistedEvent.getSizeInBytes(keySize, valueSize, versionTag);
-    
-    size += keySize;
-    
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java
deleted file mode 100644
index 9b9a04d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.internal.ByteArrayDataInput;
-import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-
-/**
- * A persisted event that is sorted in an unsorted (sequential hoplog). This
- * does not have a version stamp, but just a timestamp for the entry.
- * 
- * This class should only be serialized by calling toData directly, which
- * is why it does not implement DataSerializable.
- * 
- *
- */
-public class UnsortedHoplogPersistedEvent extends PersistedEventImpl {
-  long timestamp;
-  
-  
-
-  public UnsortedHoplogPersistedEvent() {
-    //for deserialization
-  }
-
-  public UnsortedHoplogPersistedEvent(Object value, Operation op,
-      byte valueIsObject, boolean isPossibleDuplicate, long timestamp) throws IOException,
-      ClassNotFoundException {
-    super(value, op, valueIsObject, isPossibleDuplicate, false/*hasVersionTag*/);
-    this.timestamp = timestamp;
-  }
-
-  @Override
-  public long getTimstamp() {
-    return timestamp;
-  }
-
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    super.toData(out);
-    DataSerializer.writeLong(timestamp, out);
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    super.fromData(in);
-    this.timestamp = DataSerializer.readLong(in);
-  }
-  
-  public static UnsortedHoplogPersistedEvent fromBytes(byte[] val)
-      throws IOException, ClassNotFoundException {
-    ByteArrayDataInput in = new ByteArrayDataInput();
-    in.initialize(val, null);
-    UnsortedHoplogPersistedEvent event = new UnsortedHoplogPersistedEvent();
-    event.fromData(in);
-    return event;
-  }
-  
-  public void copy(PersistedEventImpl usersValue) {
-    super.copy(usersValue);
-    this.timestamp = ((UnsortedHoplogPersistedEvent) usersValue).timestamp;
-  }
-  
-  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
-    int size = PersistedEventImpl.getSizeInBytes(keySize, valueSize, versionTag);
-    
-    // size of Timestamp
-    size += 8;
-    
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java
deleted file mode 100644
index d2fdbe7..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.regex.Matcher;
-
-import com.gemstone.gemfire.internal.hll.ICardinality;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.io.compress.Lz4Codec;
-import org.apache.hadoop.io.compress.SnappyCodec;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile;
-import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.CompressionType;
-import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.Version;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-import org.apache.logging.log4j.Logger;
-
-/**
- * Abstract class for {@link Hoplog} with common functionality
- */
-public abstract class AbstractHoplog implements Hoplog {
-  protected final FSProvider fsProvider;
-  
-  // path of the oplog file
-  protected volatile Path path;
-  private volatile HoplogDescriptor hfd;
-  protected Configuration conf;
-  protected SortedOplogStatistics stats;
-  protected Long hoplogModificationTime;
-  protected Long hoplogSize;
-
-  protected HoplogReaderActivityListener readerListener;
-  
-  // logger instance
-  protected static final Logger logger = LogService.getLogger();
-  
-  protected static String logPrefix;
-  // THIS CONSTRUCTOR SHOULD BE USED FOR LONER ONLY
-  AbstractHoplog(FileSystem inputFS, Path filePath, SortedOplogStatistics stats)
-      throws IOException {
-    logPrefix = "<" + filePath.getName() + "> ";
-    this.fsProvider = new FSProvider(inputFS);
-    initialize(filePath, stats, inputFS);
-  }
-
-  public AbstractHoplog(HDFSStoreImpl store, Path filePath,
-      SortedOplogStatistics stats) throws IOException {
-    logPrefix = "<" + filePath.getName() + "> ";
-    this.fsProvider = new FSProvider(store);
-    initialize(filePath, stats, store.getFileSystem());
-  }
-
-  private void initialize(Path path, SortedOplogStatistics stats, FileSystem fs) {
-    this.conf = fs.getConf();
-    this.stats = stats;
-    this.path = fs.makeQualified(path);
-    this.hfd = new HoplogDescriptor(this.path.getName());
-  }
-  
-  @Override
-  public abstract void close() throws IOException; 
-  @Override
-  public abstract HoplogReader getReader() throws IOException;
-
-  @Override
-  public abstract HoplogWriter createWriter(int keys) throws IOException;
-
-  @Override
-  abstract public void close(boolean clearCache) throws IOException;
-
-  @Override
-  public void setReaderActivityListener(HoplogReaderActivityListener listener) {
-    this.readerListener = listener;
-  }
-  
-  @Override
-  public String getFileName() {
-    return this.hfd.getFileName();
-  }
-  
-  public final int compareTo(Hoplog o) {
-    return hfd.compareTo( ((AbstractHoplog)o).hfd);
-  }
-
-  @Override
-  public ICardinality getEntryCountEstimate() throws IOException {
-    return null;
-  }
-  
-  @Override
-  public synchronized void rename(String name) throws IOException {
-    if (logger.isDebugEnabled())
-      logger.debug("{}Renaming hoplog to " + name, logPrefix);
-    Path parent = path.getParent();
-    Path newPath = new Path(parent, name);
-    fsProvider.getFS().rename(path, new Path(parent, newPath));
-
-    // close the old reader and let the new one get created lazily
-    close();
-    
-    // update path to point to the new path
-    path = newPath;
-    this.hfd = new HoplogDescriptor(this.path.getName());
-    logPrefix = "<" + path.getName() + "> ";
-  }
-  
-  @Override
-  public synchronized void delete() throws IOException {
-    if (logger.isDebugEnabled())
-      logger.debug("{}Deleting hoplog", logPrefix);
-    close();
-    this.hoplogModificationTime = null;
-    this.hoplogSize = null;
-    fsProvider.getFS().delete(path, false);
-  }
-
-  @Override
-  public long getModificationTimeStamp() {
-    initHoplogSizeTimeInfo();
-
-    // modification time will not be null if this hoplog is existing. Otherwise
-    // invocation of this method should is invalid
-    if (hoplogModificationTime == null) {
-      throw new IllegalStateException();
-    }
-    
-    return hoplogModificationTime;
-  }
-
-  @Override
-  public long getSize() {
-    initHoplogSizeTimeInfo();
-    
-    // size will not be null if this hoplog is existing. Otherwise
-    // invocation of this method should is invalid
-    if (hoplogSize == null) {
-      throw new IllegalStateException();
-    }
-    
-    return hoplogSize;
-  }
-  
-  private synchronized void initHoplogSizeTimeInfo() {
-    if (hoplogSize != null && hoplogModificationTime != null) {
-      // time and size info is already initialized. no work needed here
-      return;
-    }
-
-    try {
-      FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
-      if (filesInfo != null && filesInfo.length == 1) {
-        this.hoplogModificationTime = filesInfo[0].getModificationTime();
-        this.hoplogSize = filesInfo[0].getLen();
-      }
-      // TODO else condition may happen if user deletes hoplog from the file system.
-    } catch (IOException e) {
-      logger.error(LocalizedMessage.create(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path), e);
-      throw new HDFSIOException(
-          LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
-    }
-  }
-  public static SequenceFile.Writer getSequenceFileWriter(Path path, 
-      Configuration conf, Logger logger) throws IOException {
-    return getSequenceFileWriter(path,conf, logger, null); 
-  }
-  
-  /**
-   * 
-   * @param path
-   * @param conf
-   * @param logger
-   * @param version - is being used only for testing. Should be passed as null for other purposes. 
-   * @return SequenceFile.Writer 
-   * @throws IOException
-   */
-  public static SequenceFile.Writer getSequenceFileWriter(Path path, 
-    Configuration conf, Logger logger, Version version) throws IOException {
-    Option optPath = SequenceFile.Writer.file(path);
-    Option optKey = SequenceFile.Writer.keyClass(BytesWritable.class);
-    Option optVal = SequenceFile.Writer.valueClass(BytesWritable.class);
-    Option optCom = withCompression(logger);
-    if (logger.isDebugEnabled())
-      logger.debug("{}Started creating hoplog " + path, logPrefix);
-    
-    if (version == null)
-      version = Version.CURRENT;
-    //Create a metadata option with the gemfire version, for future versioning
-    //of the key and value format
-    SequenceFile.Metadata metadata = new SequenceFile.Metadata();
-    metadata.set(new Text(Meta.GEMFIRE_VERSION.name()), new Text(String.valueOf(version.ordinal())));
-    Option optMeta = SequenceFile.Writer.metadata(metadata);
-    
-    SequenceFile.Writer writer = SequenceFile.createWriter(conf, optPath, optKey, optVal, optCom, optMeta);
-    
-    return writer;
-  }
-  
-  private static Option withCompression(Logger logger) {
-    String prop = System.getProperty(HoplogConfig.COMPRESSION);
-    if (prop != null) {
-      CompressionCodec codec;
-      if (prop.equalsIgnoreCase("SNAPPY")) {
-        codec = new SnappyCodec();
-      } else if (prop.equalsIgnoreCase("LZ4")) {
-        codec = new Lz4Codec();
-      } else if (prop.equals("GZ")) {
-        codec = new GzipCodec();
-      } else {
-        throw new IllegalStateException("Unsupported codec: " + prop);
-      }
-      if (logger.isDebugEnabled())
-        logger.debug("{}Using compression codec " + codec, logPrefix);
-      return SequenceFile.Writer.compression(CompressionType.BLOCK, codec);
-    }
-    return SequenceFile.Writer.compression(CompressionType.NONE, null);
-  }
-  
-  public static final class HoplogDescriptor implements Comparable<HoplogDescriptor> {
-     private final String fileName;
-     private final String bucket;
-     private final int sequence;
-     private final long timestamp;
-     private final String extension;
-     
-     HoplogDescriptor(final String fileName) {
-       this.fileName = fileName;
-       final Matcher matcher = AbstractHoplogOrganizer.HOPLOG_NAME_PATTERN.matcher(fileName);
-       final boolean matched = matcher.find();
-       assert matched;
-       this.bucket = matcher.group(1);
-       this.sequence = Integer.valueOf(matcher.group(3));
-       this.timestamp = Long.valueOf(matcher.group(2)); 
-       this.extension = matcher.group(4);
-     }
-     
-     public final String getFileName() {
-       return fileName;
-     }
-     
-     @Override
-     public boolean equals(Object o) {
-       if (this == o) {
-         return true;
-       }
-       
-       if (!(o instanceof HoplogDescriptor)) {
-         return false;
-       }
-       
-       final HoplogDescriptor other = (HoplogDescriptor)o;
-       // the two files should belong to same bucket
-       assert this.bucket.equals(other.bucket);
-       
-       // compare sequence first
-       if (this.sequence != other.sequence) {
-         return false;
-       }
-       
-       // sequence is same, compare timestamps
-       if (this.timestamp != other.timestamp) {
-         return false;
-       }
-       
-       return extension.equals(other.extension);
-     }
-
-    @Override
-    public int compareTo(HoplogDescriptor o) {
-      if (this == o) {
-        return 0;
-      }
-      
-      // the two files should belong to same bucket
-      assert this.bucket.equals(o.bucket);
-      
-      // compare sequence first
-      if (sequence > o.sequence) {
-        return -1;
-      } else if (sequence < o.sequence) {
-        return 1;
-      }
-      
-      // sequence is same, compare timestamps
-      if(timestamp > o.timestamp) {
-        return -1; 
-      } else if (timestamp < o.timestamp) {
-        return 1;
-      }
-      
-      //timestamp is the same, compare the file extension. It's
-      //possible a major compaction and minor compaction could finish
-      //at the same time and create the same timestamp and sequence number
-      //it doesn't matter which file we look at first in that case.
-      return extension.compareTo(o.extension);
-    }
-     
-     
-  }
-  
-  protected static final class FSProvider {
-    final FileSystem fs;
-    final HDFSStoreImpl store;
-    
-    // THIS METHOD IS FOR TESTING ONLY
-    FSProvider(FileSystem fs) {
-      this.fs = fs;
-      this.store = null;
-    }
-    
-    FSProvider(HDFSStoreImpl store) {
-      this.store = store;
-      fs = null;
-    }
-    
-    public FileSystem getFS() throws IOException {
-      if (store != null) {
-        return store.getFileSystem();
-      }
-      return fs;
-    }
-
-    public FileSystem checkFileSystem() {
-      store.checkAndClearFileSystem();
-      return store.getCachedFileSystem();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java
deleted file mode 100644
index 4f078d8..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog.HoplogDescriptor;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import org.apache.logging.log4j.Logger;
-
-
-public abstract class AbstractHoplogOrganizer<T extends PersistedEventImpl> implements HoplogOrganizer<T> {
-
-  public static final String MINOR_HOPLOG_EXTENSION = ".ihop";
-  public static final String MAJOR_HOPLOG_EXTENSION = ".chop";
-  public static final String EXPIRED_HOPLOG_EXTENSION = ".exp";
-  public static final String TEMP_HOPLOG_EXTENSION = ".tmp";
-
-  public static final String FLUSH_HOPLOG_EXTENSION = ".hop";
-  public static final String SEQ_HOPLOG_EXTENSION = ".shop";
-
-  // all valid hoplogs will follow the following name pattern
-  public static final String HOPLOG_NAME_REGEX = "(.+?)-(\\d+?)-(\\d+?)";
-  public static final Pattern HOPLOG_NAME_PATTERN = Pattern.compile(HOPLOG_NAME_REGEX
-      + "\\.(.*)");
-  
-  public static boolean JUNIT_TEST_RUN = false; 
-
-  protected static final boolean ENABLE_INTEGRITY_CHECKS = Boolean
-      .getBoolean("gemfire.HdfsSortedOplogOrganizer.ENABLE_INTEGRITY_CHECKS")
-      || assertionsEnabled();
-
-  private static boolean assertionsEnabled() {
-    boolean enabled = false;
-    assert enabled = true;
-    return enabled;
-  }
-
-  protected HdfsRegionManager regionManager;
-  // name or id of bucket managed by this organizer
-  protected final String regionFolder;
-  protected final int bucketId;
-
-  // path of the region directory
-  protected final Path basePath;
-  // identifies path of directory containing a bucket's oplog files
-  protected final Path bucketPath;
-
-  protected final HDFSStoreImpl store;
-
-  // assigns a unique increasing number to each oplog file
-  protected AtomicInteger sequence;
-
-  //logger instance
-  protected static final Logger logger = LogService.getLogger();
-  protected final String logPrefix;
-
-  protected SortedOplogStatistics stats;
-  AtomicLong bucketDiskUsage = new AtomicLong(0);
-
-  // creation of new files and expiration of files will be synchronously
-  // notified to the listener
-  protected HoplogListener listener;
-
-  private volatile boolean closed = false;
-  
-  protected Object changePrimarylockObject = new Object();
-  
-  public AbstractHoplogOrganizer(HdfsRegionManager region, int bucketId) {
-
-    assert region != null;
-
-    this.regionManager = region;
-    this.regionFolder = region.getRegionFolder();
-    this.store = region.getStore();
-    this.listener = region.getListener();
-    this.stats = region.getHdfsStats();
-    
-    this.bucketId = bucketId;
-
-    this.basePath = new Path(store.getHomeDir());
-    this.bucketPath = new Path(basePath, regionFolder + "/" + bucketId);
-
-    this.logPrefix = "<" + getRegionBucketStr() + "> ";
-    
-  }
-
-  @Override
-  public boolean isClosed() {
-    return closed || regionManager.isClosed();
-  }
-  
-  @Override
-  public void close() throws IOException {
-    closed = true;
-    
-    // this bucket is closed and may be owned by a new node. So reduce the store
-    // usage stat, as the new owner adds the usage metric
-    incrementDiskUsage((-1) * bucketDiskUsage.get());
-  }
-
-  @Override
-  public abstract void flush(Iterator<? extends QueuedPersistentEvent> bufferIter,
-      int count) throws IOException, ForceReattemptException;
-
-  @Override
-  public abstract void clear() throws IOException;
-
-  protected abstract Hoplog getHoplog(Path hoplogPath) throws IOException;
-
-  @Override
-  public void hoplogCreated(String region, int bucketId, Hoplog... oplogs)
-      throws IOException {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public void hoplogDeleted(String region, int bucketId, Hoplog... oplogs)
-      throws IOException {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public void compactionCompleted(String region, int bucket, boolean isMajor) {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-  
-  @Override
-  public T read(byte[] key) throws IOException {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public HoplogIterator<byte[], T> scan() throws IOException {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public HoplogIterator<byte[], T> scan(byte[] from, byte[] to)
-      throws IOException {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public HoplogIterator<byte[], T> scan(byte[] from,
-      boolean fromInclusive, byte[] to, boolean toInclusive) throws IOException {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public long sizeEstimate() {
-    throw new UnsupportedOperationException("Not supported for "
-        + this.getClass().getSimpleName());
-  }
-
-  /**
-   * @return returns an oplogs full path after prefixing bucket path to the file
-   *         name
-   */
-  protected String getPathStr(Hoplog oplog) {
-    return bucketPath.toString() + "/" + oplog.getFileName();
-  }
-
-  protected String getRegionBucketStr() {
-    return regionFolder + "/" + bucketId;
-  }
-
-  protected SortedHoplogPersistedEvent deserializeValue(byte[] val) throws IOException {
-    try {
-      return SortedHoplogPersistedEvent.fromBytes(val);
-    } catch (ClassNotFoundException e) {
-      logger
-          .error(
-              LocalizedStrings.GetMessage_UNABLE_TO_DESERIALIZE_VALUE_CLASSNOTFOUNDEXCEPTION,
-              e);
-      return null;
-    }
-  }
-
-  /**
-   * @return true if the entry belongs to an destroy event
-   */
-  protected boolean isDeletedEntry(byte[] value, int offset) throws IOException {
-    // Read only the first byte of PersistedEventImpl for the operation
-    assert value != null && value.length > 0 && offset >= 0 && offset < value.length;
-    Operation op = Operation.fromOrdinal(value[offset]);
-
-    if (op.isDestroy() || op.isInvalidate()) {
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * @param seqNum
-   *          desired sequence number of the hoplog. If null a highest number is
-   *          choosen
-   * @param extension
-   *          file extension representing the type of file, e.g. ihop for
-   *          intermediate hoplog
-   * @return a new temporary file for a new sorted oplog. The name consists of
-   *         bucket name, a sequence number for ordering the files followed by a
-   *         timestamp
-   */
-  Hoplog getTmpSortedOplog(Integer seqNum, String extension) throws IOException {
-    if (seqNum == null) {
-      seqNum = sequence.incrementAndGet();
-    }
-    String name = bucketId + "-" + System.currentTimeMillis() + "-" + seqNum 
-        + extension;
-    Path soplogPath = new Path(bucketPath, name + TEMP_HOPLOG_EXTENSION);
-    return getHoplog(soplogPath);
-  }
-  
-  /**
-   * renames a temporary hoplog file to a legitimate name.
-   */
-  static void makeLegitimate(Hoplog so) throws IOException {
-    String name = so.getFileName();
-    assert name.endsWith(TEMP_HOPLOG_EXTENSION);
-
-    int index = name.lastIndexOf(TEMP_HOPLOG_EXTENSION);
-    name = name.substring(0, index);
-    so.rename(name);
-  }
-
-  /**
-   * creates a expiry marker for a file on file system
-   * 
-   * @param hoplog
-   * @throws IOException
-   */
-  protected void addExpiryMarkerForAFile(Hoplog hoplog) throws IOException {
-    FileSystem fs = store.getFileSystem();
-
-    // TODO optimization needed here. instead of creating expired marker
-    // file per file, create a meta file. the main thing to worry is
-    // compaction of meta file itself
-    Path expiryMarker = getExpiryMarkerPath(hoplog.getFileName());
-
-    // uh-oh, why are we trying to expire an already expired file?
-    if (ENABLE_INTEGRITY_CHECKS) {
-      Assert.assertTrue(!fs.exists(expiryMarker),
-          "Expiry marker already exists: " + expiryMarker);
-    }
-
-    FSDataOutputStream expiryMarkerFile = fs.create(expiryMarker);
-    expiryMarkerFile.close();
-
-    if (logger.isDebugEnabled())
-      logger.debug("Hoplog marked expired: " + getPathStr(hoplog));
-  }
-
-  protected Path getExpiryMarkerPath(String name) {
-    return new Path(bucketPath, name + EXPIRED_HOPLOG_EXTENSION);
-  }
-  
-  protected String truncateExpiryExtension(String name) {
-    if (name.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
-      return name.substring(0, name.length() - EXPIRED_HOPLOG_EXTENSION.length());
-    }
-    
-    return name;
-  }
-  
-  /**
-   * updates region stats and a local copy of bucket level store usage metric.
-   * 
-   * @param delta
-   */
-  protected void incrementDiskUsage(long delta) {
-    long newSize = bucketDiskUsage.addAndGet(delta);
-    if (newSize < 0 && delta < 0) {
-      if (logger.isDebugEnabled()){
-        logger.debug("{}Invalid diskUsage size:" + newSize + " caused by delta:"
-            + delta + ", parallel del & close?" + isClosed(), logPrefix);
-      }
-      if (isClosed()) {
-        // avoid corrupting disk usage size during close by reducing residue
-        // size only
-        delta = delta + (-1 * newSize);
-      }
-    }
-    stats.incStoreUsageBytes(delta);
-  }
-
-  /**
-   * Utility method to remove a file from valid file list if a expired marker
-   * for the file exists
-   * 
-   * @param valid
-   *          list of valid files
-   * @param expired
-   *          list of expired file markers
-   * @return list f valid files that do not have a expired file marker
-   */
-  public static FileStatus[] filterValidHoplogs(FileStatus[] valid,
-      FileStatus[] expired) {
-    if (valid == null) {
-      return null;
-    }
-
-    if (expired == null) {
-      return valid;
-    }
-
-    ArrayList<FileStatus> result = new ArrayList<FileStatus>();
-    for (FileStatus vs : valid) {
-      boolean found = false;
-      for (FileStatus ex : expired) {
-        if (ex
-            .getPath()
-            .getName()
-            .equals(
-                vs.getPath().getName()
-                    + HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
-          found = true;
-        }
-      }
-      if (!found) {
-        result.add(vs);
-      }
-    }
-
-    return result.toArray(new FileStatus[result.size()]);
-  }
-
-  protected void pingSecondaries() throws ForceReattemptException {
-
-    if (JUNIT_TEST_RUN)
-      return;
-    BucketRegion br = ((PartitionedRegion)this.regionManager.getRegion()).getDataStore().getLocalBucketById(this.bucketId);
-    boolean secondariesPingable = false;
-    try {
-      secondariesPingable = br.areSecondariesPingable();
-    } catch (Throwable e) {
-      throw new ForceReattemptException("Failed to ping secondary servers of bucket: " + 
-          this.bucketId + ", region: " + ((PartitionedRegion)this.regionManager.getRegion()), e);
-    }
-    if (!secondariesPingable)
-      throw new ForceReattemptException("Failed to ping secondary servers of bucket: " + 
-          this.bucketId + ", region: " + ((PartitionedRegion)this.regionManager.getRegion()));
-  }
-
-  
-
-  
-  /**
-   * A comparator for ordering soplogs based on the file name. The file names
-   * are assigned incrementally and hint at the age of the file
-   */
-  public static final class HoplogComparator implements
-      Comparator<TrackedReference<Hoplog>> {
-    /**
-     * a file with a higher sequence or timestamp is the younger and hence the
-     * smaller
-     */
-    @Override
-    public int compare(TrackedReference<Hoplog> o1, TrackedReference<Hoplog> o2) {
-      return o1.get().compareTo(o2.get());
-    }
-
-    /**
-     * Compares age of files based on file names and returns 1 if name1 is
-     * older, -1 if name1 is yonger and 0 if the two files are same age
-     */
-    public static int compareByName(String name1, String name2) {
-      HoplogDescriptor hd1 = new HoplogDescriptor(name1);
-      HoplogDescriptor hd2 = new HoplogDescriptor(name2);
-      
-      return hd1.compareTo(hd2);
-    }
-  }
-
-  /**
-   * @param matcher
-   *          A preinitialized / matched regex pattern
-   * @return Timestamp of the
-   */
-  public static long getHoplogTimestamp(Matcher matcher) {
-    return Long.valueOf(matcher.group(2));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java
deleted file mode 100644
index 86e66a1..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-public interface BloomFilter {
-  /**
-   * Returns true if the bloom filter might contain the supplied key. The nature of the bloom filter
-   * is such that false positives are allowed, but false negatives cannot occur.
-   */
-  boolean mightContain(byte[] key);
-
-  /**
-   * Returns true if the bloom filter might contain the supplied key. The nature of the bloom filter
-   * is such that false positives are allowed, but false negatives cannot occur.
-   */
-  boolean mightContain(byte[] key, int keyOffset, int keyLength);
-
-  /**
-   * @return Size of the bloom, in bytes
-   */
-  long getBloomSize();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java
deleted file mode 100644
index 3f67de8..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.Collection;
-
-import org.apache.hadoop.fs.FileSystem;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.internal.SystemTimer.SystemTimerTask;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-
-import org.apache.logging.log4j.Logger;
-
-/**
- * For streaming case, if the bucket traffic goes down after writing few batches of data, 
- * the flush doesn't get called. In that case, the file is left in tmp state
- * until the flush restarts. To avoid this issue, added this timer task 
- * that periodically iterates over the buckets and closes their writer 
- * if the time for rollover has passed.
- * 
- * It also has got an extra responsibility of fixing the file sizes of the files 
- * that weren't closed properly last time. 
- *
- *
- */
-class CloseTmpHoplogsTimerTask extends SystemTimerTask {
-  
-  private HdfsRegionManager hdfsRegionManager;
-  private static final Logger logger = LogService.getLogger();
-  private FileSystem filesystem; 
-  
-  public CloseTmpHoplogsTimerTask(HdfsRegionManager hdfsRegionManager) {
-    this.hdfsRegionManager = hdfsRegionManager;
-    
-    // Create a new filesystem 
-    // This is added for the following reason:
-    // For HDFS, if a file wasn't closed properly last time, 
-    // while calling FileSystem.append for this file, FSNamesystem.startFileInternal->
-    // FSNamesystem.recoverLeaseInternal function gets called. 
-    // This function throws AlreadyBeingCreatedException if there is an open handle, to any other file, 
-    // created using the same FileSystem object. This is a bug and is being tracked at: 
-    // https://issues.apache.org/jira/browse/HDFS-3848?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
-    // 
-    // The fix for this bug is not yet part of Pivotal HD. So to overcome the bug, 
-    // we create a new file system for the timer task so that it does not encounter the bug. 
-    this.filesystem = this.hdfsRegionManager.getStore().createFileSystem();
-    if (logger.isDebugEnabled()) 
-      logger.debug("created a new file system specifically for timer task");
-  }
-
-  
-  /**
-   * Iterates over all the bucket organizers and closes their writer if the time for 
-   * rollover has passed. It also has the additional responsibility of fixing the tmp
-   * files that were left over in the last unsuccessful run. 
-   */
-  @Override
-  public void run2() {
-    Collection<HoplogOrganizer> organizers =  hdfsRegionManager.getBucketOrganizers();
-    if (logger.isDebugEnabled()) 
-      logger.debug("Starting the close temp logs run.");
-    
-    for (HoplogOrganizer organizer: organizers) {
-      
-      HDFSUnsortedHoplogOrganizer unsortedOrganizer = (HDFSUnsortedHoplogOrganizer)organizer;
-      long timeSinceLastFlush = (System.currentTimeMillis() - unsortedOrganizer.getLastFlushTime())/1000 ;
-      try {
-        this.hdfsRegionManager.getRegion().checkReadiness();
-      } catch (Exception e) {
-        break;
-      }
-      
-      try {
-        // the time since last flush has exceeded file rollover interval, roll over the 
-        // file. 
-        if (timeSinceLastFlush >= unsortedOrganizer.getfileRolloverInterval()) {
-          if (logger.isDebugEnabled()) 
-            logger.debug("Closing writer for bucket: " + unsortedOrganizer.bucketId);
-          unsortedOrganizer.synchronizedCloseWriter(false, timeSinceLastFlush, 0);
-        }
-        
-        // fix the tmp hoplogs, if any. Pass the new file system here. 
-        unsortedOrganizer.identifyAndFixTmpHoplogs(this.filesystem);
-        
-      } catch (Exception e) {
-        logger.warn(LocalizedStrings.HOPLOG_CLOSE_FAILED, e);
-      }
-    }
-    
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java
deleted file mode 100644
index 55d8f87..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.internal.VersionedDataSerializable;
-import com.gemstone.gemfire.internal.Version;
-
-/**
- * Status of the compaction task reported in the future
- * 
- */
-public class CompactionStatus implements VersionedDataSerializable {
-  /**MergeGemXDHDFSToGFE check and verify serializationversions **/
- 
-  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
-  private int bucketId;
-  private boolean status;
-
-  public CompactionStatus() {
-  }
-
-  public CompactionStatus(int bucketId, boolean status) {
-    this.bucketId = bucketId;
-    this.status = status;
-  }
-  public int getBucketId() {
-    return bucketId;
-  }
-  public boolean isStatus() {
-    return status;
-  }
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    out.writeInt(bucketId);
-    out.writeBoolean(status);
-  }
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    this.bucketId = in.readInt();
-    this.status = in.readBoolean();
-  }
-  @Override
-  public Version[] getSerializationVersions() {
-    return serializationVersions;
-  }
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(getClass().getCanonicalName()).append("@")
-    .append(System.identityHashCode(this)).append(" Bucket:")
-    .append(bucketId).append(" status:").append(status);
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java
deleted file mode 100644
index 84beded..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import com.gemstone.gemfire.internal.VersionedDataSerializable;
-import com.gemstone.gemfire.internal.Version;
-
-/**
- * Reports the result of a flush request.
- * 
- */
-public class FlushStatus implements VersionedDataSerializable {
-  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
-  private int bucketId;
-
-  private final static int LAST = -1;
-  
-  public FlushStatus() {
-  }
-
-  public static FlushStatus last() {
-    return new FlushStatus(LAST);
-  }
-  
-  public FlushStatus(int bucketId) {
-    this.bucketId = bucketId;
-  }
-  public int getBucketId() {
-    return bucketId;
-  }
-  public boolean isLast() {
-    return bucketId == LAST;
-  }
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    out.writeInt(bucketId);
-  }
-  @Override
-  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-    this.bucketId = in.readInt();
-  }
-  @Override
-  public Version[] getSerializationVersions() {
-    return serializationVersions;
-  }
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(getClass().getCanonicalName()).append("@")
-    .append(System.identityHashCode(this)).append(" Bucket:")
-    .append(bucketId);
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java
deleted file mode 100644
index ba191c2..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-
-/**
- * A singleton which schedules compaction of hoplogs owned by this node as primary and manages
- * executor of ongoing compactions. Ideally the number of pending request will not exceed the number
- * of buckets in the node as hoplog organizer avoids creating a new request if compaction on the
- * bucket is active. Moreover separate queues for major and minor compactions are maintained to
- * prevent long running major compactions from preventing minor compactions.
- */
-public class HDFSCompactionManager {
-  /*
-   * Each hdfs store has its own concurrency configuration. Concurrency
-   * configuration is used by compaction manager to manage threads. This member
-   * holds hdsf-store to compaction manager mapping
-   */
-  private static final ConcurrentHashMap<String, HDFSCompactionManager> storeToManagerMap = 
-                                        new ConcurrentHashMap<String, HDFSCompactionManager>();
-
-  // hdfs store configuration used to initialize this instance
-  HDFSStore storeConfig;
-  
-  // Executor for ordered execution of minor compaction requests.
-  private final CompactionExecutor minorCompactor;
-  // Executor for ordered execution of major compaction requests.
-  private final CompactionExecutor majorCompactor;
-
-  private static final Logger logger = LogService.getLogger();
-  protected final static String logPrefix =  "<" + "HDFSCompactionManager" + "> ";;
-  
-  private HDFSCompactionManager(HDFSStore config) {
-    this.storeConfig = config;
-    // configure hdfs compaction manager
-    int capacity = Integer.getInteger(HoplogConfig.COMPCATION_QUEUE_CAPACITY,
-        HoplogConfig.COMPCATION_QUEUE_CAPACITY_DEFAULT);
-
-    minorCompactor = new CompactionExecutor(config.getMinorCompactionThreads(), capacity, "MinorCompactor_"
-        + config.getName());
-
-    majorCompactor = new CompactionExecutor(config.getMajorCompactionThreads(), capacity, "MajorCompactor_"
-        + config.getName());
-
-    minorCompactor.allowCoreThreadTimeOut(true);
-    majorCompactor.allowCoreThreadTimeOut(true);
-  }
-
-  public static synchronized HDFSCompactionManager getInstance(HDFSStore config) {
-    HDFSCompactionManager instance = storeToManagerMap.get(config.getName());
-    if (instance == null) {
-      instance = new HDFSCompactionManager(config);
-      storeToManagerMap.put(config.getName(), instance);
-    }
-    
-    return instance;
-  }
-
-  /**
-   * Accepts compaction request for asynchronous compaction execution.
-   * 
-   * @param request
-   *          compaction request with region and bucket id
-   * @return true if the request is accepted, false if the compactor is overlaoded and there is a
-   *         long wait queue
-   */
-  public synchronized Future<CompactionStatus> submitRequest(CompactionRequest request) {
-    if (!request.isForced && request.compactor.isBusy(request.isMajor)) {
-      if (logger.isDebugEnabled()) {
-        fineLog("Compactor is busy. Ignoring ", request);
-      }
-      return null;
-    }
-    
-    CompactionExecutor executor = request.isMajor ? majorCompactor : minorCompactor;
-    
-    try {
-      return executor.submit(request);
-    } catch (Throwable e) {
-      if (e instanceof CompactionIsDisabled) {
-        if (logger.isDebugEnabled()) {
-          fineLog("{}" +e.getMessage(), logPrefix);
-        }
-      } else {
-        logger.info(LocalizedMessage.create(LocalizedStrings.ONE_ARG, "Compaction request submission failed"), e);
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Removes all pending compaction requests. Programmed for TESTING ONLY
-   */
-  public void reset() {
-    minorCompactor.shutdownNow();
-    majorCompactor.shutdownNow();
-    HDFSCompactionManager.storeToManagerMap.remove(storeConfig.getName());
-  }
-  
-  /**
-   * Returns minor compactor. Programmed for TESTING AND MONITORING ONLY  
-   */
-  public ThreadPoolExecutor getMinorCompactor() {
-    return minorCompactor;
-  }
-
-  /**
-   * Returns major compactor. Programmed for TESTING AND MONITORING ONLY  
-   */
-  public ThreadPoolExecutor getMajorCompactor() {
-    return majorCompactor;
-  }
-  
-  /**
-   * Contains important details needed for executing a compaction cycle.
-   */
-  public static class CompactionRequest implements Callable<CompactionStatus> {
-    String regionFolder;
-    int bucket;
-    Compactor compactor;
-    boolean isMajor;
-    final boolean isForced;
-    final boolean versionUpgrade;
-
-    public CompactionRequest(String regionFolder, int bucket, Compactor compactor, boolean major) {
-      this(regionFolder, bucket, compactor, major, false);
-    }
-
-    public CompactionRequest(String regionFolder, int bucket, Compactor compactor, boolean major, boolean isForced) {
-      this(regionFolder, bucket, compactor, major, isForced, false);
-    }
-
-    public CompactionRequest(String regionFolder, int bucket, Compactor compactor, boolean major, boolean isForced, boolean versionUpgrade) {
-      this.regionFolder = regionFolder;
-      this.bucket = bucket;
-      this.compactor = compactor;
-      this.isMajor = major;
-      this.isForced = isForced;
-      this.versionUpgrade = versionUpgrade;
-    }
-
-    @Override
-    public CompactionStatus call() throws Exception {
-      HDFSStore store = compactor.getHdfsStore();
-      if (!isForced) {
-        // this is a auto generated compaction request. If auto compaction is
-        // disabled, ignore this call.
-        if (isMajor && !store.getMajorCompaction()) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("{}Major compaction is disabled. Ignoring request",logPrefix);
-          }
-          return new CompactionStatus(bucket, false);
-        } else if (!isMajor && !store.getMinorCompaction()) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("{}Minor compaction is disabled. Ignoring request", logPrefix);
-          }
-          return new CompactionStatus(bucket, false);
-        }
-      }
-
-      // all hurdles passed, execute compaction now
-      try {
-        boolean status = compactor.compact(isMajor, versionUpgrade);
-        return new CompactionStatus(bucket, status);
-      } catch (IOException e) {
-        logger.error(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_COMPACTION_ERROR, bucket), e);
-      }
-      return new CompactionStatus(bucket, false);
-    }
-
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result + bucket;
-      result = prime * result
-          + ((regionFolder == null) ? 0 : regionFolder.hashCode());
-      return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj)
-        return true;
-      if (obj == null)
-        return false;
-      if (getClass() != obj.getClass())
-        return false;
-      CompactionRequest other = (CompactionRequest) obj;
-      if (bucket != other.bucket)
-        return false;
-      if (regionFolder == null) {
-        if (other.regionFolder != null)
-          return false;
-      } else if (!regionFolder.equals(other.regionFolder))
-        return false;
-      return true;
-    }
-
-    @Override
-    public String toString() {
-      return "CompactionRequest [regionFolder=" + regionFolder + ", bucket="
-          + bucket + ", isMajor=" + isMajor + ", isForced="+isForced+"]";
-    }
-  }
-
-  /**
-   * Helper class for creating named instances of comapction threads and managing compaction
-   * executor. All threads wait infinitely
-   */
-  private class CompactionExecutor extends ThreadPoolExecutor implements ThreadFactory {
-    final AtomicInteger count = new AtomicInteger(1);
-    private String name;
-
-    CompactionExecutor(int max, int capacity, String name) {
-      super(max, max, 5, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(capacity));
-      allowCoreThreadTimeOut(true);
-      setThreadFactory(this);
-      this.name = name;
-    }
-    
-    private void throwIfStopped(CompactionRequest req, HDFSStore storeConfig) {
-      // check if compaction is enabled everytime. Alter may change this value
-      // so this check is needed everytime
-      boolean isEnabled = true;
-      isEnabled = storeConfig.getMinorCompaction();
-      if (req.isMajor) {
-        isEnabled = storeConfig.getMajorCompaction();
-      }
-      if (isEnabled || req.isForced) {
-        return;
-      }
-      throw new CompactionIsDisabled(name + " is disabled");
-    }
-
-    private void throwIfPoolSizeChanged(CompactionRequest task, HDFSStore config) {
-      int threadCount = config.getMinorCompactionThreads();
-      if (task.isMajor) {
-        threadCount = config.getMajorCompactionThreads();
-      }
-      
-      if (getCorePoolSize() < threadCount) {
-        setCorePoolSize(threadCount);
-      } else if (getCorePoolSize() > threadCount) {
-        setCorePoolSize(threadCount);
-      }
-      
-      if (!task.isForced && getActiveCount() > threadCount) {
-        // the number is active threads is more than new max pool size. Throw
-        // error is this is system generated compaction request
-        throw new CompactionIsDisabled(
-            "Rejecting to reduce the number of threads for " + name
-            + ", currently:" + getActiveCount() + " target:"
-            + threadCount);
-      }
-    }
-    
-    @Override
-    public <T> Future<T> submit(Callable<T> task) {
-      throwIfStopped((CompactionRequest) task, HDFSCompactionManager.this.storeConfig);
-      throwIfPoolSizeChanged((CompactionRequest) task, HDFSCompactionManager.this.storeConfig);
-      
-      if (logger.isDebugEnabled()) {
-        fineLog("New:", task, " pool:", getPoolSize(), " active:", getActiveCount());
-      }
-      return super.submit(task);
-    }
-
-    @Override
-    public Thread newThread(Runnable r) {
-      Thread thread = new Thread(r, name + ":" + count.getAndIncrement());
-      thread.setDaemon(true);
-      if (logger.isDebugEnabled()) {
-        fineLog("New thread:", name, " poolSize:", getPoolSize(),
-            " active:", getActiveCount());
-      }
-      return thread;
-    }
-  }
-  
-  public static class CompactionIsDisabled extends RejectedExecutionException {
-    private static final long serialVersionUID = 1L;
-    public CompactionIsDisabled(String name) {
-      super(name);
-    }
-  }
-  
-  
-  private void fineLog(Object... strings) {
-    if (logger.isDebugEnabled()) {
-      StringBuffer sb = new StringBuffer();
-      for (Object str : strings) {
-        sb.append(str.toString());
-      }
-      logger.debug("{}"+sb.toString(), logPrefix);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java
deleted file mode 100644
index 36e171b..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.internal.VersionedDataSerializable;
-import com.gemstone.gemfire.internal.Version;
-
-/**
- * Defines the arguments to the flush queue request.
- * 
- */
-@SuppressWarnings("serial")
-public class HDFSFlushQueueArgs implements VersionedDataSerializable {
-
-  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
-
-  private HashSet<Integer> buckets;
-
-  private long maxWaitTimeMillis;
-
-  public HDFSFlushQueueArgs() {
-  }
-
-  public HDFSFlushQueueArgs(Set<Integer> buckets, long maxWaitTime) {
-    this.buckets = new HashSet<Integer>(buckets);
-    this.maxWaitTimeMillis = maxWaitTime;
-  }
-
-  @Override
-  public void toData(DataOutput out) throws IOException {
-    DataSerializer.writeHashSet(buckets, out);
-    out.writeLong(maxWaitTimeMillis);
-  }
-
-  @Override
-  public void fromData(DataInput in) throws IOException,
-      ClassNotFoundException {
-    this.buckets = DataSerializer.readHashSet(in);
-    this.maxWaitTimeMillis = in.readLong();
-  }
-
-  @Override
-  public Version[] getSerializationVersions() {
-    return serializationVersions;
-  }
-
-  public Set<Integer> getBuckets() {
-    return (Set<Integer>) buckets;
-  }
-
-  public void setBuckets(Set<Integer> buckets) {
-    this.buckets = new HashSet<Integer>(buckets);
-  }
-
-  public boolean isSynchronous() {
-    return maxWaitTimeMillis == 0;
-  }
-
-  public long getMaxWaitTime() {
-    return this.maxWaitTimeMillis;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(getClass().getCanonicalName()).append("@")
-    .append(System.identityHashCode(this))
-    .append(" buckets:").append(buckets)
-    .append(" maxWaitTime:").append(maxWaitTimeMillis);
-    return sb.toString();
-  }
-}


[47/63] [abbrv] incubator-geode git commit: Merge branch 'develop' of https://git-wip-us.apache.org/repos/asf/incubator-geode into develop

Posted by kl...@apache.org.
Merge branch 'develop' of https://git-wip-us.apache.org/repos/asf/incubator-geode into develop


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a3f308af
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a3f308af
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a3f308af

Branch: refs/heads/feature/GEODE-1276
Commit: a3f308af174996cb964853ec1787cbe3a353dd8a
Parents: c06a795 ce43082
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 14:26:18 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 14:26:18 2016 -0700

----------------------------------------------------------------------
 .../cli/commands/FunctionCommandsDUnitTest.java | 72 +++++++++++++++++++-
 .../cli/commands/ToUpperResultCollector.java    | 65 ++++++++++++++++++
 2 files changed, 135 insertions(+), 2 deletions(-)
----------------------------------------------------------------------



[20/63] [abbrv] incubator-geode git commit: GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
deleted file mode 100755
index c67cefe..0000000
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
+++ /dev/null
@@ -1,504 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.query.partitioned;
-
-import java.io.Serializable;
-
-import com.gemstone.gemfire.LogWriter;
-import com.gemstone.gemfire.cache.query.data.PortfolioData;
-import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
-import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.VM;
-
-/**
- *This tests executes an array of queries to be executed over the PR ,
- * benchmarking them over the time taken by the same when executed over the
- * Local Region 
- * The performance difference is reported for scenarios 
- * encompassing various permutations of the PR attributes
- * like Redundancy / No. of D.S / No. of Accessors etc 
- *
- */
-
-public class PRQueryPerfDUnitTest extends PartitionedRegionDUnitTestCase {
-  public static final int SLEEP = 0;
-
-  /**
-   * Constructor
-   * 
-   * @param name
-   */
-
-  public PRQueryPerfDUnitTest(String name) {
-    super(name);
-  }
-
-  int totalNumBuckets = 100;
-
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
-  final String name = "Portfolios";
-
-  final String localName = "LocalPortfolios";
-
-  final int cnt = 0, cntDest = 5000;
-
-  /**
-   * A nuthing test to make DUnit happy.
-   * The rest of these tests shouldn't be run 
-   * as part of our CruiseControl or precheckin
-   * since they don't assert any behavior.
-   */
-  public void testNuthin() {}
-    
-  /**
-   * This tests executes an array of queries to be executed over the PR ,
-   * benchmarking them over the time taken by the same when executed over the
-   * Local Region with
-   * One Accessor
-   * One Datastore
-   * Redundancy =0
-   *  
-   */
-  public void norun_testBenchmarkingQueryingOneAccessorOneDS_Redundancy0()
-  throws Exception
- {
-
-    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
-    log.info("BenchMarking PR Querying Test Started*****");
-    Host host = Host.getHost(0);
-
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-
-    int redundancy = 0;
-    
-    try {
-      setVMInfoLogLevel();
-  
-      // Creating Accessor PR's on the participating VM's
-      log.info("Creating Accessor node on VM0");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-          redundancy));
-      log.info("Successfully Created Accessor node on VM0");
-  
-      log.info("Creating Datastores across  VM1");
-      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      log.info("Successfully Created Datastores on VM1");
-  
-      // creating a local region on one of the JVM's
-      log.info("Creating Local Region on VM0");
-      vm0.invoke(PRQHelp
-          .getCacheSerializableRunnableForLocalRegionCreation(localName));
-      log.info("Successfully Created Local Region on VM0");
-  
-      // Generating portfolio object array to be populated across the PR's & Local
-      // Regions
-  
-      final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
-  
-      // Putting the data into the accessor node
-      log.info("Inserting Portfolio data through the accessor node");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
-          cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data through the accessor node");
-  
-      // Putting the same data in the local region created
-      log
-          .info("Inserting Portfolio data on local node  VM0 for result " +
-                        "set comparison");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
-          portfolio, cnt, cntDest));
-      log
-          .info("Successfully Inserted Portfolio data on local node  VM0 for" +
-                        " result set comparison");
-  
-      ResultsObject perfR = new ResultsObject();
-      perfR.OperationDescription = "PR with 1 Accessor, 1 D.S., Redundancy =0,";
-      perfR.NumberOfAccessors = 1;
-      perfR.NumberOfDataStores = 1;
-      perfR.redundancy = 0;
-      
-      if (SLEEP > 0) {
-        Thread.sleep(SLEEP);
-      }
-      
-      // querying the VM for data
-      log.info("Querying on VM0 both on PR Region & local, also comparing the " +
-                "Results sets from both");
-      vm0.invoke(PRQHelp.PRQueryingVsLocalQuerying(name, localName, perfR));
-  
-      log.info("Benchmarking Querying between PR & local  ENDED*****");
-    }
-    finally {
-      resetVMLogLevel();
-    }
-}
-  
-  
-  /**
-   * This tests executes an array of queries to be executed over the PR ,
-   * benchmarking them over the time taken by the same when executed over the
-   * Local Region with
-   * One Accessor
-   * Two Datastore
-   * Redundancy =0
-   *  
-   */
-
-
-  public void norun_testBenchmarkingQueryingOneAccessorTwoDS_Redundancy0()
-      throws Exception
-  {
-    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
-
-    log.info("BenchMarking PR Querying Test Started*****");
-    Host host = Host.getHost(0);
-
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-
-    int redundancy = 0;
-
-    try {
-      setVMInfoLogLevel();
-      
-      // Creating Accessor PR's on the participating VM's
-      log.info("Creating Accessor node on VM0");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-          redundancy));
-      log.info("Successfully Created Accessor node on VM0");
-  
-      log.info("Creating Datastores across  VM1 , VM2");
-      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      log.info("Successfully Created Datastores on VM1 , VM2");
-  
-      // creating a local region on one of the JVM's
-      log.info("Creating Local Region on VM0");
-      vm0.invoke(PRQHelp
-          .getCacheSerializableRunnableForLocalRegionCreation(localName));
-      log.info("Successfully Created Local Region on VM0");
-  
-      // Generating portfolio object array to be populated across the PR's & Local
-      // Regions
-  
-      final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
-  
-      // Putting the data into the accessor node
-      log.info("Inserting Portfolio data through the accessor node");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
-          cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data through the accessor node");
-  
-      // Putting the same data in the local region created
-      log
-          .info("Inserting Portfolio data on local node  VM0 for result set comparison");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
-          portfolio, cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data on local node  VM0 for " +
-                "result set comparison");
-  
-      ResultsObject perfR = new ResultsObject();
-      perfR.OperationDescription = "PR with 1 Accessor, 2 D.S., Redundancy=0";
-      perfR.NumberOfAccessors = 1;
-      perfR.NumberOfDataStores = 2;
-      perfR.redundancy = 0;
-
-      if (SLEEP > 0) {
-        Thread.sleep(SLEEP);
-      }
-      
-      
-      // querying the VM for data
-      log.info("Querying on VM0 both on PR Region & local, also comparing the " +
-                "results sets from both");
-      vm0.invoke(PRQHelp.PRQueryingVsLocalQuerying(name, localName, perfR));
-  
-      log.info("Benchmarking Querying between PR & local  ENDED*****");
-    }
-    finally {
-      resetVMLogLevel();
-    }
-  }
-  
-
-  /**
-   * This tests executes an array of queries to be executed over the PR ,
-   * benchmarking them over the time taken by the same when executed over the
-   * Local Region with One Accessor Two Datastore  Redundancy =1
-   * 
-   */
-
-  public void norun_testBenchmarkingQueryingOneAccessorTwoDS_D_ACK_Redundancy1()
-      throws Exception
-  {
-    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
-    log.info("BenchMarking PR Querying Test Started*****");
-    Host host = Host.getHost(0);
-
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-
-    int redundancy = 1;
-
-    try {
-      setVMInfoLogLevel();
-
-      // Creating Accessor PR's on the participating VM'sw
-      log.info("Creating Accessor node on VM0");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-          redundancy));
-      log.info("Successfully Created Accessor node on VM0");
-  
-      log.info("Creating Datastores across  VM1 , VM2");
-      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      log.info("Successfully Created Datastores on VM1 , VM2");
-  
-      // creating a local region on one of the JVM's
-      log.info("Creating Local Region on VM0");
-      vm0.invoke(PRQHelp
-          .getCacheSerializableRunnableForLocalRegionCreation(localName));
-      log.info("Successfully Created Local Region on VM0");
-  
-      // Generating portfolio object array to be populated across the PR's & Local
-      // Regions
-  
-      final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
-  
-      // Putting the data into the accessor node
-      log.info("Inserting Portfolio data through the accessor node");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
-          cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data through the accessor node");
-  
-      // Putting the same data in the local region created
-      log.info("Inserting Portfolio data on local node VM0 for result " +
-                "set comparison");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
-          portfolio, cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data on local node VM0 for " +
-                "result set comparison");
-  
-      ResultsObject perfR = new ResultsObject();
-      perfR.OperationDescription = "PR with 1 Accessor, 2 D.S., Redundancy=1";
-      perfR.NumberOfAccessors = 1;
-      perfR.NumberOfDataStores = 2;
-      perfR.redundancy = 1;
-
-      if (SLEEP > 0) {
-        Thread.sleep(SLEEP);
-      }
-      
-      // querying the VM for data
-      log.info("Querying on VM0 both on PR Region & local, also comparing the " +
-                "results sets from both");
-      vm0.invoke(PRQHelp.PRQueryingVsLocalQuerying(name, localName, perfR));
-  
-      log.info("Benchmarking Querying between PR & local  ENDED*****");
-    }
-    finally {
-      resetVMLogLevel();
-    }
-  }
-
-  /**
-   * This tests executes an array of queries to be executed over the PR ,
-   * benchmarking them over the time taken by the same when executed over the
-   * Local Region with One Accessor Three Datastore Redundancy =1
-   * 
-   */
-
-  public void norun_testBenchmarkingQueryingOneAccessorThreeDS_Redundancy1()
-      throws Exception
-  {
-    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
-    log.info("BenchMarking PR Querying Test Started*****");
-    Host host = Host.getHost(0);
-
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-
-    int redundancy = 1;
-
-    try {
-      setVMInfoLogLevel();
-
-      // Creating Accessor PR's on the participating VM's
-      log.info("Creating Accessor node on VM0");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-          redundancy));
-      log.info("Successfully Created Accessor node on VM0");
-  
-      log.info("Creating Datastores across  VM1 , VM2 , VM3");
-      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-  
-      log.info("Successfully Created Datastores on VM1 , VM2 ,VM3");
-  
-      // creating a local region on one of the JVM's
-      log.info("Creating Local Region on VM0");
-      vm0.invoke(PRQHelp
-          .getCacheSerializableRunnableForLocalRegionCreation(localName));
-      log.info("Successfully Created Local Region on VM0");
-  
-      // Generating portfolio object array to be populated across the PR's & Local
-      // Regions
-  
-      final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
-  
-      // Putting the data into the accessor node
-      log.info("Inserting Portfolio data through the accessor node");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
-          cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data through the accessor node");
-  
-      // Putting the same data in the local region created
-      log.info("Inserting Portfolio data on local node  VM0 for result set comparison");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
-          portfolio, cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data on local node VM0 for " +
-                "result set comparison");
-  
-      ResultsObject perfR = new ResultsObject();
-      perfR.OperationDescription = "PR with 1 Accessor, 3 D.S., Redundancy=1";
-      perfR.NumberOfAccessors = 1;
-      perfR.NumberOfDataStores = 3;
-      perfR.redundancy = 1;
-      
-      if (SLEEP > 0) {
-        Thread.sleep(SLEEP);
-      }
-      
-      
-      // querying the VM for data
-      log.info("Querying on VM0 both on PR Region & local, also comparing the " +
-                "results sets from both");
-      vm0.invoke(PRQHelp.PRQueryingVsLocalQuerying(name, localName, perfR));
-  
-      log.info("Benchmarking Querying between PR & local  ENDED*****");
-    }
-    finally {
-      resetVMLogLevel();
-    }
-  }
-  
-  
-  /**
-   * This tests executes an array of queries to be executed over the PR ,
-   * benchmarking them over the time taken by the same when executed over the
-   * Local Region with One Accessor Three Datastore  Redundancy =2
-   * 
-   */
-
-  public void norun_testBenchmarkingQueryingOneAccessorThreeDS_Redundancy2()
-      throws Exception
-  {
-    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
-    log.info("BenchMarking PR Querying Test Started*****");
-    Host host = Host.getHost(0);
-
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-
-    int redundancy = 2;
-
-    try {
-      setVMInfoLogLevel();
-
-      // Creating Accessor PR's on the participating VM's
-      log.info("Creating Accessor node on VM0");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-          redundancy));
-      log.info("Successfully Created Accessor node on VM0");
-  
-      log.info("Creating Datastores across  VM1 , VM2 , VM3");
-      vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-      vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy));
-  
-      log.info("Successfully Created Datastores on VM1 , VM2 , VM3");
-  
-      // creating a local region on one of the JVM's
-      log.info("Creating Local Region on VM0");
-      vm0.invoke(PRQHelp
-          .getCacheSerializableRunnableForLocalRegionCreation(localName));
-      log.info("Successfully Created Local Region on VM0");
-  
-      // Generating portfolio object array to be populated across the PR's & Local
-      // Regions
-  
-      final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
-  
-      // Putting the data into the accessor node
-      log.info("Inserting Portfolio data through the accessor node");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
-          cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data through the accessor node");
-  
-      // Putting the same data in the local region created
-      log.info("Inserting Portfolio data on local node  VM0 for result set comparison");
-      vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
-          portfolio, cnt, cntDest));
-      log.info("Successfully Inserted Portfolio data on local node VM0 for " +
-                "result set comparison");
-  
-      ResultsObject perfR = new ResultsObject();
-      perfR.OperationDescription = "PR with 1 Accessor, 3 D.S., Redundancy=2";
-      perfR.NumberOfAccessors = 1;
-      perfR.NumberOfDataStores = 3;
-      perfR.redundancy = 2;
-      
-      if (SLEEP > 0) {
-        Thread.sleep(SLEEP);
-      }
-      
-      
-      // querying the VM for data
-      log.info("Querying on VM0 both on PR Region & local, also comparing the " +
-                "results sets from both");
-      vm0.invoke(PRQHelp.PRQueryingVsLocalQuerying(name, localName, perfR));
-  
-      log.info("Benchmarking Querying between PR & local  ENDED*****");
-    }
-    finally {
-      resetVMLogLevel();
-    }
-  }
-  
-  /*
-   * Inner class to for the ResultObject , displaying various attributes of the
-   * Performance Report
-   */
-  class ResultsObject implements Serializable {
-    String OperationDescription;
-    long QueryingTimeLocal;
-    long QueryingTimePR;
-    int NumberOfDataStores = 0;
-    int NumberOfAccessors = 0;
-    int redundancy = 0;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
index 5240ebc..c8975db 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
@@ -24,6 +24,8 @@ package com.gemstone.gemfire.cache.query.partitioned;
  * 
  */
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Properties;
@@ -50,21 +52,21 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
    */
 
   public PRQueryRegionCloseDUnitTest(String name) {
-
     super(name);
   }
 
-  static Properties props = new Properties();
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
 
-  int totalNumBuckets = 100;
 
   int threadSleepTime = 500;
 
-  int querySleepTime = 2000;
-
   int queryTestCycle = 10;
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
 
@@ -98,7 +100,7 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
-
+    setCacheInVMs(vm0,vm1,vm2);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm2);
@@ -107,7 +109,7 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating Accessor node on VM0");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created Accessor node on VM0");
@@ -116,9 +118,9 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating PR's across all VM1 , VM2");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created PR on VM1 , VM2");
@@ -128,7 +130,7 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created Local Region on VM0");
@@ -136,7 +138,7 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
@@ -175,7 +177,7 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
       int k = (random.nextInt(vmList.size()));
       if( 0 != k ) {
       ((VM)(vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForRegionClose(
-          name, redundancy));
+          name, redundancy, PortfolioData.class));
       Wait.pause(threadSleepTime);
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
index 40bac7b..1892be9 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
@@ -24,6 +24,8 @@ package com.gemstone.gemfire.cache.query.partitioned;
  * 
  */
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Properties;
@@ -53,18 +55,12 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
 
     super(name);
   }
-
-  static Properties props = new Properties();
-
-  int totalNumBuckets = 100;
-
-  int threadSleepTime = 500;
-
-  int querySleepTime = 2000;
-
-  int queryTestCycle = 10;
-
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
 
@@ -98,7 +94,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm2);
@@ -108,7 +104,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Creating Accessor node on VM0");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Created Accessor node on VM0");
@@ -117,11 +113,11 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Creating PR's across all VM1 , VM2, VM3");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     
     LogWriterUtils.getLogWriter()
@@ -133,7 +129,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Created Local Region on VM0");
@@ -141,7 +137,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
  
 
     // Putting the data into the accessor node
@@ -192,7 +188,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
       int k = (random.nextInt(vmList.size()));
       
       ((VM)(vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForRegionClose(
-          name, redundancy));
+          name, redundancy, PortfolioData.class));
     
     
       ThreadUtils.join(async0, 30 * 1000);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
index a112e2a..2fc1bc5 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
@@ -19,6 +19,8 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Properties;
@@ -66,17 +68,13 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     super(name);
   }
 
-  static Properties props = new Properties();
-
-  int totalNumBuckets = 100;
-
-  int threadSleepTime = 500;
-
-  int querySleepTime = 2000;
-
-  int queryTestCycle = 10;
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "Portfolios";
 
@@ -114,7 +112,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0,vm1);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm0);
@@ -136,7 +134,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
@@ -144,7 +142,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
@@ -244,7 +242,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0,vm1);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm0);
@@ -266,7 +264,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
@@ -274,7 +272,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
@@ -375,7 +373,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0,vm1);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm0);
@@ -397,7 +395,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
@@ -405,7 +403,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
@@ -508,7 +506,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-
+    setCacheInVMs(vm0,vm1);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm0);
@@ -530,7 +528,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
@@ -538,7 +536,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()
@@ -652,7 +650,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
-
+    setCacheInVMs(vm0,vm1,vm2);
     List vmList = new LinkedList();
     vmList.add(vm1);
     vmList.add(vm0);
@@ -677,7 +675,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
@@ -685,7 +683,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     // Generating portfolio object array to be populated across the PR's & Local
     // Regions
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
     LogWriterUtils.getLogWriter()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
index 10caa3c..ad26a05 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.management;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfoliosAndPositions;
+
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -111,8 +113,6 @@ public class QueryDataDUnitTest extends ManagementTestBase {
   static String repRegionName4 = "TestRepRegion4"; // default name
   static String localRegionName = "TestLocalRegion"; // default name
 
-  private static PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   public static String[] queries = new String[] {
       "select * from /" + PartitionedRegionName1 + " where ID>=0",
       "Select * from /" + PartitionedRegionName1 + " r1, /" + PartitionedRegionName2 + " r2 where r1.ID = r2.ID",
@@ -203,7 +203,7 @@ public class QueryDataDUnitTest extends ManagementTestBase {
 
   public void fillValuesInRegions() {
     // Create common Portflios and NewPortfolios
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
 
     // Fill local region
     managedNode1.invoke(getCacheSerializableRunnableForPRPuts(localRegionName, portfolio, cnt, cntDest));



[60/63] [abbrv] incubator-geode git commit: GEODE-11: Refactoring the LuceneFunctionReadPathDUnitTest

Posted by kl...@apache.org.
GEODE-11: Refactoring the LuceneFunctionReadPathDUnitTest

Refactoring this test into a framework for adding more tests with a
bunch of subclasses.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/0481732f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/0481732f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/0481732f

Branch: refs/heads/feature/GEODE-1276
Commit: 0481732f0223a38adf4084bc2afb977e20db364f
Parents: 4a6c779
Author: Dan Smith <up...@apache.org>
Authored: Mon May 2 13:56:10 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Tue May 3 16:41:29 2016 -0700

----------------------------------------------------------------------
 .../gemfire/cache/lucene/LuceneQueriesBase.java | 148 ++++++++++++
 .../cache/lucene/LuceneQueriesPRBase.java       |  75 ++++++
 .../lucene/LuceneQueriesPeerPRDUnitTest.java    |  36 +++
 .../LuceneQueriesPeerPROverflowDUnitTest.java   |  41 ++++
 .../LuceneFunctionReadPathDUnitTest.java        | 237 -------------------
 5 files changed, 300 insertions(+), 237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/0481732f/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesBase.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesBase.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesBase.java
new file mode 100644
index 0000000..c467a18
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesBase.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gemstone.gemfire.cache.lucene;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+
+import org.junit.Test;
+
+/**
+  * This test class is intended to contain basic integration tests
+  * of the lucene query class that should be executed against a number
+  * of different regions types and topologies.
+  *
+  */
+public abstract class LuceneQueriesBase extends JUnit4CacheTestCase {
+
+  protected static final String INDEX_NAME = "index";
+  protected static final String REGION_NAME = "index";
+  private static final long serialVersionUID = 1L;
+  protected VM dataStore1;
+  protected VM dataStore2;
+  protected VM accessor;
+
+  @Override
+  public final void postSetUp() throws Exception {
+    Host host = Host.getHost(0);
+    dataStore1 = host.getVM(0);
+    dataStore2 = host.getVM(1);
+    accessor = host.getVM(3);
+  }
+
+  protected abstract void initDataStore(SerializableRunnableIF createIndex) throws Exception;
+
+  protected abstract void initAccessor(SerializableRunnableIF createIndex) throws Exception;
+
+  @Test
+  public void returnCorrectResultsFromStringQueryWithDefaultAnalyzer() {
+    SerializableRunnableIF createIndex = () -> {
+      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      luceneService.createIndex(INDEX_NAME, REGION_NAME, "text");
+    };
+    dataStore1.invoke(() -> initDataStore(createIndex));
+    dataStore2.invoke(() -> initDataStore(createIndex));
+    accessor.invoke(() -> initAccessor(createIndex));
+
+    putDataInRegion(accessor);
+    executeTextSearch(accessor);
+  }
+
+  protected void executeTextSearch(VM vm) {
+    vm.invoke(() -> {
+      Cache cache = getCache();
+      Region<Object, Object> region = cache.getRegion(REGION_NAME);
+
+      LuceneService service = LuceneServiceProvider.get(cache);
+      LuceneQuery<Integer, TestObject> query;
+      query = service.createLuceneQueryFactory().create(INDEX_NAME, REGION_NAME, "text:world");
+      LuceneQueryResults<Integer, TestObject> results = query.search();
+      assertEquals(3, results.size());
+      List<LuceneResultStruct<Integer, TestObject>> page = results.getNextPage();
+
+      Map<Integer, TestObject> data = new HashMap<Integer, TestObject>();
+      for (LuceneResultStruct<Integer, TestObject> row : page) {
+        data.put(row.getKey(), row.getValue());
+      }
+
+      assertEquals(new HashMap(region),data);
+      return null;
+    });
+  }
+
+  protected void putDataInRegion(VM vm) {
+    vm.invoke(() -> {
+      final Cache cache = getCache();
+      Region<Object, Object> region = cache.getRegion(REGION_NAME);
+      region.put(1, new TestObject("hello world"));
+      region.put(113, new TestObject("hi world"));
+      region.put(2, new TestObject("goodbye world"));
+    });
+  }
+
+  private static class TestObject implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private String text;
+
+    public TestObject(String text) {
+      this.text = text;
+    }
+
+    @Override
+    public int hashCode() {
+      final int prime = 31;
+      int result = 1;
+      result = prime * result + ((text == null) ? 0 : text.hashCode());
+      return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj)
+        return true;
+      if (obj == null)
+        return false;
+      if (getClass() != obj.getClass())
+        return false;
+      TestObject other = (TestObject) obj;
+      if (text == null) {
+        if (other.text != null)
+          return false;
+      } else if (!text.equals(other.text))
+        return false;
+      return true;
+    }
+
+    @Override
+    public String toString() {
+      return "TestObject[" + text + "]";
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/0481732f/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPRBase.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPRBase.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPRBase.java
new file mode 100644
index 0000000..fbd101e
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPRBase.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.gemstone.gemfire.cache.lucene;
+
+import static org.junit.Assert.*;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.control.RebalanceOperation;
+import com.gemstone.gemfire.cache.control.RebalanceResults;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.cache.internal.JUnit4CacheTestCase;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+/**
+ * This test class adds more basic tests of lucene functionality
+ * for partitioned regions. These tests should work across all types
+ * of PRs and topologies.
+ *
+ */
+public abstract class LuceneQueriesPRBase extends LuceneQueriesBase {
+
+  @Test
+  public void returnCorrectResultsAfterRebalance() {
+    SerializableRunnableIF createIndex = () -> {
+      LuceneService luceneService = LuceneServiceProvider.get(getCache());
+      luceneService.createIndex(INDEX_NAME, REGION_NAME, "text");
+    };
+    dataStore1.invoke(() -> initDataStore(createIndex));
+    accessor.invoke(() -> initAccessor(createIndex));
+    putDataInRegion(accessor);
+    dataStore2.invoke(() -> initDataStore(createIndex));
+
+    rebalanceRegion(dataStore1);
+    executeTextSearch(accessor);
+  }
+
+  private void rebalanceRegion(VM vm) {
+    // Do a rebalance
+    vm.invoke(() -> {
+        RebalanceOperation op = getCache().getResourceManager().createRebalanceFactory().start();
+        RebalanceResults results = op.getResults();
+        assertTrue("Transferred " + results.getTotalBucketTransfersCompleted(), 1 < results.getTotalBucketTransfersCompleted());
+    });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/0481732f/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPRDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPRDUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPRDUnitTest.java
new file mode 100644
index 0000000..51d0a33
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPRDUnitTest.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.lucene;
+
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+import org.junit.experimental.categories.Category;
+
+@Category(DistributedTest.class)
+public class LuceneQueriesPeerPRDUnitTest extends LuceneQueriesPRBase {
+
+  @Override protected void initDataStore(final SerializableRunnableIF createIndex) throws Exception {
+    createIndex.run();
+    getCache().createRegionFactory(RegionShortcut.PARTITION).create(REGION_NAME);
+  }
+
+  @Override protected void initAccessor(final SerializableRunnableIF createIndex) throws Exception {
+    initDataStore(createIndex);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/0481732f/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPROverflowDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPROverflowDUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPROverflowDUnitTest.java
new file mode 100644
index 0000000..cf2bac7
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/LuceneQueriesPeerPROverflowDUnitTest.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.lucene;
+
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+import org.junit.experimental.categories.Category;
+
+@Category(DistributedTest.class)
+public class LuceneQueriesPeerPROverflowDUnitTest extends LuceneQueriesPRBase {
+
+  @Override protected void initDataStore(final SerializableRunnableIF createIndex) throws Exception {
+    createIndex.run();
+    EvictionAttributes evicAttr = EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK);
+    getCache().createRegionFactory(RegionShortcut.PARTITION_OVERFLOW)
+      .setEvictionAttributes(evicAttr)
+      .create(REGION_NAME);
+  }
+
+  @Override protected void initAccessor(final SerializableRunnableIF createIndex) throws Exception {
+    initDataStore(createIndex);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/0481732f/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
deleted file mode 100644
index bc62578..0000000
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.gemstone.gemfire.cache.lucene.internal.distributed;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.control.RebalanceOperation;
-import com.gemstone.gemfire.cache.control.RebalanceResults;
-import com.gemstone.gemfire.cache.lucene.LuceneQuery;
-import com.gemstone.gemfire.cache.lucene.LuceneQueryResults;
-import com.gemstone.gemfire.cache.lucene.LuceneResultStruct;
-import com.gemstone.gemfire.cache.lucene.LuceneService;
-import com.gemstone.gemfire.cache.lucene.LuceneServiceProvider;
-import com.gemstone.gemfire.cache.lucene.internal.LuceneServiceImpl;
-import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.SerializableCallable;
-import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.junit.categories.DistributedTest;
-import org.junit.Assert;
-import org.junit.experimental.categories.Category;
-
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CancellationException;
-
-@Category(DistributedTest.class)
-public class LuceneFunctionReadPathDUnitTest extends CacheTestCase {
-  private static final String INDEX_NAME = "index";
-
-  private static final long serialVersionUID = 1L;
-
-  private VM server1;
-  private VM server2;
-
-  public LuceneFunctionReadPathDUnitTest(String name) {
-    super(name);
-  }
-
-  @Override
-  public final void postSetUp() throws Exception {
-    Host host = Host.getHost(0);
-    server1 = host.getVM(0);
-    server2 = host.getVM(1);
-  }
-
-  public void testEnd2EndFunctionExecution() {
-    e2eTextSearchForRegionType(RegionShortcut.PARTITION);
-    e2eTextSearchForRegionType(RegionShortcut.PARTITION_PERSISTENT);
-    e2eTextSearchForRegionType(RegionShortcut.PARTITION_OVERFLOW);
-    e2eTextSearchForRegionType(RegionShortcut.PARTITION_PERSISTENT_OVERFLOW);
-  }
-
-  private void e2eTextSearchForRegionType(RegionShortcut type) {
-    final String regionName = type.toString();
-    createRegionAndIndex(server1, regionName, type);
-    putDataInRegion(server1, regionName);
-    createRegionAndIndex(server2, regionName, type);
-    // Make sure we can search from both members
-    executeTextSearch(server1, regionName);
-    executeTextSearch(server2, regionName);
-
-    rebalanceRegion(server1);
-    // Make sure the search still works
-    executeTextSearch(server1, regionName);
-    executeTextSearch(server2, regionName);
-    destroyPartitionRegion(server2, regionName);
-  }
-
-  private void rebalanceRegion(VM vm) {
-    // Do a rebalance
-    vm.invoke(new SerializableCallable<Object>() {
-      private static final long serialVersionUID = 1L;
-
-      @Override
-      public Object call() throws CancellationException, InterruptedException {
-        RebalanceOperation op = getCache().getResourceManager().createRebalanceFactory().start();
-        RebalanceResults results = op.getResults();
-        assertTrue(1 < results.getTotalBucketTransfersCompleted());
-        return null;
-      }
-    });
-  }
-
-  private void executeTextSearch(VM vm, final String regionName) {
-    SerializableCallable<Object> executeSearch = new SerializableCallable<Object>("executeSearch") {
-      private static final long serialVersionUID = 1L;
-
-      public Object call() throws Exception {
-        Cache cache = getCache();
-        assertNotNull(cache);
-        Region<Object, Object> region = cache.getRegion(regionName);
-        Assert.assertNotNull(region);
-
-        LuceneService service = LuceneServiceProvider.get(cache);
-        LuceneQuery<Integer, TestObject> query;
-        query = service.createLuceneQueryFactory().create(INDEX_NAME, regionName, "text:world");
-        LuceneQueryResults<Integer, TestObject> results = query.search();
-        assertEquals(3, results.size());
-        List<LuceneResultStruct<Integer, TestObject>> page = results.getNextPage();
-
-        Map<Integer, TestObject> data = new HashMap<Integer, TestObject>();
-        for (LuceneResultStruct<Integer, TestObject> row : page) {
-          data.put(row.getKey(), row.getValue());
-        }
-
-        assertEquals(data, region);
-        return null;
-      }
-    };
-
-    vm.invoke(executeSearch);
-  }
-
-  private void putDataInRegion(VM vm, final String regionName) {
-    SerializableCallable<Object> createSomeData = new SerializableCallable<Object>("putDataInRegion") {
-      private static final long serialVersionUID = 1L;
-
-      public Object call() throws Exception {
-        final Cache cache = getCache();
-        Region<Object, Object> region = cache.getRegion(regionName);
-        assertNotNull(region);
-        region.put(1, new TestObject("hello world"));
-        region.put(113, new TestObject("hi world"));
-        region.put(2, new TestObject("goodbye world"));
-
-        return null;
-      }
-    };
-
-    vm.invoke(createSomeData);
-  }
-
-  private void createRegionAndIndex(VM vm, final String regionName, final RegionShortcut type) {
-    SerializableCallable<Object> createRegion = new SerializableCallable<Object>("createRegionAndIndex") {
-      private static final long serialVersionUID = 1L;
-
-      public Object call() throws Exception {
-        final Cache cache = getCache();
-        assertNotNull(cache);
-        LuceneService service = LuceneServiceProvider.get(cache);
-        service.createIndex(INDEX_NAME, regionName, "text");
-        RegionFactory<Object, Object> regionFactory = cache.createRegionFactory(type);
-        if (regionName.contains("OVERFLOW")) {
-          EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
-          evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
-          regionFactory.setEvictionAttributes(evicAttr);
-        }
-        regionFactory.create(regionName);
-        return null;
-      }
-    };
-    vm.invoke(createRegion);
-  }
-
-  private void destroyPartitionRegion(VM vm, final String regionName) {
-    SerializableCallable<Object> createPartitionRegion = new SerializableCallable<Object>("destroyPartitionRegion") {
-      private static final long serialVersionUID = 1L;
-
-      public Object call() throws Exception {
-        final Cache cache = getCache();
-        assertNotNull(cache);
-        String aeqId = LuceneServiceImpl.getUniqueIndexName(INDEX_NAME, regionName);
-        PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
-        assertNotNull(chunkRegion);
-        chunkRegion.destroyRegion();
-        PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
-        assertNotNull(fileRegion);
-        fileRegion.destroyRegion();
-        Region<Object, Object> region = cache.getRegion(regionName);
-        assertNotNull(region);
-        region.destroyRegion();
-        return null;
-      }
-    };
-    vm.invoke(createPartitionRegion);
-  }
-
-  private static class TestObject implements Serializable {
-    private static final long serialVersionUID = 1L;
-    private String text;
-
-    public TestObject(String text) {
-      this.text = text;
-    }
-
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result + ((text == null) ? 0 : text.hashCode());
-      return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj)
-        return true;
-      if (obj == null)
-        return false;
-      if (getClass() != obj.getClass())
-        return false;
-      TestObject other = (TestObject) obj;
-      if (text == null) {
-        if (other.text != null)
-          return false;
-      } else if (!text.equals(other.text))
-        return false;
-      return true;
-    }
-  }
-}


[43/63] [abbrv] incubator-geode git commit: GEODE-17: have a more readable error message when authentication failed.

Posted by kl...@apache.org.
GEODE-17: have a more readable error message when authentication failed.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/53760ec8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/53760ec8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/53760ec8

Branch: refs/heads/feature/GEODE-1276
Commit: 53760ec866e22357b7d0acc9d612573ffd94d2ad
Parents: f04b669
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 12:19:35 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 12:19:35 2016 -0700

----------------------------------------------------------------------
 .../internal/security/GeodeSecurityUtil.java       |  2 ++
 .../internal/security/shiro/CustomAuthRealm.java   | 17 ++++++++---------
 .../internal/security/JSONAuthorization.java       |  4 +++-
 .../com/gemstone/gemfire/util/test/TestUtil.java   |  8 +-------
 4 files changed, 14 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/53760ec8/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
index 4fd92ed..1f1f4eb 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/GeodeSecurityUtil.java
@@ -53,6 +53,7 @@ public class GeodeSecurityUtil {
       logger.info("Logging in "+username+"/"+password);
       currentUser.login(token);
     } catch (ShiroException e) {
+      logger.info(e.getMessage(), e);
       throw new AuthenticationFailedException(e.getMessage(), e);
     }
   }
@@ -67,6 +68,7 @@ public class GeodeSecurityUtil {
       currentUser.logout();
     }
     catch(ShiroException e){
+      logger.info(e.getMessage(), e);
       throw new AuthenticationFailedException(e.getMessage(), e);
     }
     // clean out Shiro's thread local content

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/53760ec8/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
index afc3125..cb2b66b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/shiro/CustomAuthRealm.java
@@ -36,7 +36,6 @@ import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.internal.lang.StringUtils;
 import com.gemstone.gemfire.management.internal.security.ResourceConstants;
 import com.gemstone.gemfire.security.AccessControl;
-import com.gemstone.gemfire.security.AuthenticationFailedException;
 import com.gemstone.gemfire.security.Authenticator;
 
 import org.apache.logging.log4j.LogManager;
@@ -116,8 +115,8 @@ public class CustomAuthRealm extends AuthorizingRealm{
           cachedAuthZCallback.put(principal, authzCallback);
           return authzCallback;
         } catch (Exception ex) {
-          throw new AuthenticationFailedException(
-              LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
+          throw new AuthenticationException(
+              ex.toString(), ex);
         }
       }
     } else {
@@ -131,25 +130,25 @@ public class CustomAuthRealm extends AuthorizingRealm{
           cachedPostAuthZCallback.put(principal, postAuthzCallback);
           return postAuthzCallback;
         } catch (Exception ex) {
-          throw new AuthenticationFailedException(
-              LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
+          throw new AuthenticationException(
+              ex.toString(), ex);
         }
       }
     }
     return null;
   }
 
-  private Authenticator getAuthenticator(Properties gfSecurityProperties) throws AuthenticationFailedException {
+  private Authenticator getAuthenticator(Properties gfSecurityProperties) throws AuthenticationException {
     Authenticator auth;
     try {
       Method instanceGetter = ClassLoadUtil.methodFromName(this.authenticatorFactoryName);
       auth = (Authenticator) instanceGetter.invoke(null, (Object[]) null);
     } catch (Exception ex) {
-      throw new AuthenticationFailedException(
-          LocalizedStrings.HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
+      throw new AuthenticationException(
+          ex.toString(), ex);
     }
     if (auth == null) {
-      throw new AuthenticationFailedException(
+      throw new AuthenticationException(
           LocalizedStrings.HandShake_AUTHENTICATOR_INSTANCE_COULD_NOT_BE_OBTAINED.toLocalizedString());
     }
     auth.init(gfSecurityProperties);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/53760ec8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
index 83f4876..9670822 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
@@ -64,7 +64,9 @@ public class JSONAuthorization implements AccessControl, Authenticator {
     return new JSONAuthorization();
   }
 
-  public JSONAuthorization() {
+  public JSONAuthorization() throws IOException, JSONException {
+    // initialize with a default json file
+    setUpWithJsonFile("shiro-ini.json");
   }
 
   public JSONAuthorization(String jsonFileName) throws IOException, JSONException {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/53760ec8/geode-core/src/test/java/com/gemstone/gemfire/util/test/TestUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/util/test/TestUtil.java b/geode-core/src/test/java/com/gemstone/gemfire/util/test/TestUtil.java
index 45369be..7d402d6 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/util/test/TestUtil.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/util/test/TestUtil.java
@@ -21,8 +21,6 @@ import java.io.IOException;
 import java.net.URISyntaxException;
 import java.net.URL;
 
-import junit.framework.AssertionFailedError;
-
 import com.gemstone.gemfire.internal.FileUtil;
 
 public class TestUtil {
@@ -42,7 +40,7 @@ public class TestUtil {
   public static String getResourcePath(Class<?> clazz, String name) {
     URL resource = clazz.getResource(name);
     if(resource == null) {
-      throw new AssertionFailedError("Could not find resource " + name);
+      throw new RuntimeException("Could not find resource " + name);
     }
     try {
       String path = resource.toURI().getPath();
@@ -58,8 +56,4 @@ public class TestUtil {
       throw new RuntimeException("Failed getting path to resource " + name, e);
     }
   }
-
-  private TestUtil() {
-    
-  }
 }


[45/63] [abbrv] incubator-geode git commit: GEODE-1326: Add test for gfsh function execution with ResultCollector

Posted by kl...@apache.org.
GEODE-1326: Add test for gfsh function execution with ResultCollector


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ce43082f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ce43082f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ce43082f

Branch: refs/heads/feature/GEODE-1276
Commit: ce43082f9cf0cd1702bd0de5e6be6b314e063396
Parents: 72be65f
Author: Jens Deppe <jd...@pivotal.io>
Authored: Fri Apr 29 09:23:09 2016 -0700
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Fri Apr 29 14:13:36 2016 -0700

----------------------------------------------------------------------
 .../cli/commands/FunctionCommandsDUnitTest.java | 72 +++++++++++++++++++-
 .../cli/commands/ToUpperResultCollector.java    | 65 ++++++++++++++++++
 2 files changed, 135 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ce43082f/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
index b4a2cef..86c0273 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
@@ -163,7 +163,38 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
       getLogWriter().info("testExecuteFunctionOnRegion cmdResult=" + cmdResult);
       String stringResult = commandResultToString(cmdResult);
       getLogWriter().info("testExecuteFunctionOnRegion stringResult=" + stringResult);
-      assert (stringResult.contains("Execution summary"));
+      assertTrue(stringResult.contains("Execution summary"));
+    } else {
+      fail("testExecuteFunctionOnRegion did not return CommandResult");
+    }
+  }
+
+  @Test
+  public void testExecuteFunctionOnRegionWithCustomResultCollector() {
+    createDefaultSetup(null);
+
+    final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + function.getId() + " --region=" + REGION_NAME +
+        " --arguments=arg1,arg2" +
+        " --result-collector=" + ToUpperResultCollector.class.getName();
+    getLogWriter().info("testExecuteFunctionOnRegion command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testExecuteFunctionOnRegion cmdResult=" + cmdResult);
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionOnRegion stringResult=" + stringResult);
+      assertTrue(stringResult.contains("Execution summary"));
+      assertTrue(stringResult.contains("ARG1"));
     } else {
       fail("testExecuteFunctionOnRegion did not return CommandResult");
     }
@@ -240,7 +271,7 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       String stringResult = commandResultToString(cmdResult);
       getLogWriter().info("testExecuteFunctionOnRegionBug51480 stringResult=" + stringResult);
-      assert (stringResult.contains("Execution summary"));
+      assertTrue(stringResult.contains("Execution summary"));
     } else {
       fail("testExecuteFunctionOnRegionBug51480 did not return CommandResult");
 
@@ -349,6 +380,43 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
+  public void testExecuteFunctionOnMembersWithArgsAndCustomResultCollector() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
+    FunctionService.registerFunction(function);
+
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + function.getId() + " --arguments=\"arg1,arg2\"" +
+        " --result-collector=" + ToUpperResultCollector.class.getName();
+
+    getLogWriter().info("testExecuteFunctionOnMembersWithArgs command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testExecuteFunctionOnMembersWithArgs cmdResult:" + cmdResult);
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionOnMembersWithArgs stringResult:" + stringResult);
+      assertTrue(stringResult.contains("Execution summary"));
+      assertTrue(stringResult.contains("ARG1"));
+    } else {
+      fail("testExecuteFunctionOnMembersWithArgs did not return CommandResult");
+    }
+  }
+
+  @Test
   public void testExecuteFunctionOnGroups() {
     Properties localProps = new Properties();
     localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ce43082f/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ToUpperResultCollector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ToUpperResultCollector.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ToUpperResultCollector.java
new file mode 100644
index 0000000..613463a
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ToUpperResultCollector.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.execute.FunctionException;
+import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.distributed.DistributedMember;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+public class ToUpperResultCollector implements ResultCollector {
+
+  private List<Object> results = new ArrayList<>();
+
+  private CountDownLatch latch = new CountDownLatch(1);
+
+  @Override
+  public Object getResult() throws FunctionException {
+    try {
+      latch.await();
+    } catch (InterruptedException e) {
+      throw new FunctionException("Interrupted waiting for results", e);
+    }
+    return results;
+  }
+
+  @Override
+  public Object getResult(long timeout, TimeUnit unit) throws FunctionException, InterruptedException {
+    latch.await(timeout, unit);
+    return results;
+  }
+
+  @Override
+  public void addResult(DistributedMember memberID, Object resultOfSingleExecution) {
+    results.add(resultOfSingleExecution.toString().toUpperCase());
+  }
+
+  @Override
+  public void endResults() {
+    latch.countDown();
+  }
+
+  @Override
+  public void clearResults() {
+    results.clear();
+    latch = new CountDownLatch(1);
+  }
+}


[58/63] [abbrv] incubator-geode git commit: GEODE-17 - use null instead of "NULL" for regionName

Posted by kl...@apache.org.
GEODE-17 - use null instead of "NULL" for regionName

* create an example JSONAuthorization that initialize with a default security.json file.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b8fc3c70
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b8fc3c70
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b8fc3c70

Branch: refs/heads/feature/GEODE-1276
Commit: b8fc3c706ef672c48a04f7c6ec4bf593414c6494
Parents: 51e4e71
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Mon May 2 07:26:27 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Tue May 3 07:55:25 2016 -0700

----------------------------------------------------------------------
 .../security/ResourceOperationContext.java      |   9 +-
 .../security/ExampleJSONAuthorization.java      | 197 +++++++++++++++++++
 .../internal/security/JSONAuthorization.java    |   5 +-
 .../ResourceOperationContextJUnitTest.java      |   8 +-
 4 files changed, 203 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b8fc3c70/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
index 7f6f72e..2e46104 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
@@ -18,8 +18,6 @@ package com.gemstone.gemfire.management.internal.security;
 
 import com.gemstone.gemfire.cache.operations.OperationContext;
 
-import org.apache.shiro.authz.Permission;
-
 /**
  * This is base class for OperationContext for resource (JMX and CLI) operations
  */
@@ -30,7 +28,7 @@ public class ResourceOperationContext extends OperationContext {
   private Resource resource = Resource.NULL;
   private OperationCode operation = OperationCode.NULL;
 
-  private String regionName = "NULL";
+  private String regionName = null;
 
   public ResourceOperationContext() {
     this(null, null, null);
@@ -81,9 +79,4 @@ public class ResourceOperationContext extends OperationContext {
   public Object getOperationResult() {
     return this.opResult;
   }
-
-  @Override
-  public boolean implies(Permission p){
-    return super.implies(p);
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b8fc3c70/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ExampleJSONAuthorization.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ExampleJSONAuthorization.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ExampleJSONAuthorization.java
new file mode 100644
index 0000000..f34be0b
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ExampleJSONAuthorization.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.security;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.StringWriter;
+import java.security.Principal;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import javax.management.remote.JMXPrincipal;
+
+import com.gemstone.gemfire.LogWriter;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.operations.OperationContext;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.security.AccessControl;
+import com.gemstone.gemfire.security.AuthenticationFailedException;
+import com.gemstone.gemfire.security.Authenticator;
+import com.gemstone.gemfire.security.NotAuthorizedException;
+
+import org.apache.commons.io.IOUtils;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+public class ExampleJSONAuthorization implements AccessControl, Authenticator {
+
+  public static class Role {
+    List<OperationContext> permissions = new ArrayList<>();
+    String name;
+    String serverGroup;
+  }
+
+  public static class User {
+    String name;
+    Set<Role> roles = new HashSet<>();
+    String pwd;
+  }
+
+  private static Map<String, User> acl = null;
+
+  public static ExampleJSONAuthorization create() throws IOException, JSONException {
+    return new ExampleJSONAuthorization();
+  }
+
+  public ExampleJSONAuthorization() throws IOException, JSONException {
+    setUpWithJsonFile("security.json");
+  }
+
+  public static void setUpWithJsonFile(String jsonFileName) throws IOException, JSONException {
+    InputStream input = ExampleJSONAuthorization.class.getResourceAsStream(jsonFileName);
+    if(input==null){
+      throw new RuntimeException("Could not find resource " + jsonFileName);
+    }
+
+    StringWriter writer = new StringWriter();
+    IOUtils.copy(input, writer, "UTF-8");
+    String json = writer.toString();
+    readSecurityDescriptor(json);
+  }
+
+  private static void readSecurityDescriptor(String json) throws IOException, JSONException {
+    JSONObject jsonBean = new JSONObject(json);
+    acl = new HashMap<>();
+    Map<String, Role> roleMap = readRoles(jsonBean);
+    readUsers(acl, jsonBean, roleMap);
+  }
+
+  private static void readUsers(Map<String, User> acl, JSONObject jsonBean, Map<String, Role> roleMap)
+      throws JSONException {
+    JSONArray array = jsonBean.getJSONArray("users");
+    for (int i = 0; i < array.length(); i++) {
+      JSONObject obj = array.getJSONObject(i);
+      User user = new User();
+      user.name = obj.getString("name");
+      if (obj.has("password")) {
+        user.pwd = obj.getString("password");
+      } else {
+        user.pwd = user.name;
+      }
+
+      JSONArray ops = obj.getJSONArray("roles");
+      for (int j = 0; j < ops.length(); j++) {
+        String roleName = ops.getString(j);
+        user.roles.add(roleMap.get(roleName));
+      }
+      acl.put(user.name, user);
+    }
+  }
+
+  private static Map<String, Role> readRoles(JSONObject jsonBean) throws JSONException {
+    Map<String, Role> roleMap = new HashMap<>();
+    JSONArray array = jsonBean.getJSONArray("roles");
+    for (int i = 0; i < array.length(); i++) {
+      JSONObject obj = array.getJSONObject(i);
+      Role role = new Role();
+      role.name = obj.getString("name");
+      String regionNames = null;
+      if(obj.has("regions")) {
+        regionNames = obj.getString("regions");
+      }
+      JSONArray ops = obj.getJSONArray("operationsAllowed");
+      for (int j = 0; j < ops.length(); j++) {
+        String[] parts = ops.getString(j).split(":");
+        if(regionNames!=null) {
+          role.permissions.add(new ResourceOperationContext(parts[0], parts[1], regionNames));
+        }
+        else
+          role.permissions.add(new ResourceOperationContext(parts[0], parts[1], "*"));
+      }
+
+      roleMap.put(role.name, role);
+
+      if (obj.has("serverGroup")) {
+        role.serverGroup = obj.getString("serverGroup");
+      }
+    }
+
+    return roleMap;
+  }
+
+  public static Map<String, User> getAcl() {
+    return acl;
+  }
+
+  private Principal principal = null;
+
+  @Override
+  public void close() {
+
+  }
+
+  @Override
+  public boolean authorizeOperation(String region, OperationContext context) {
+    if (principal == null)
+      return false;
+
+    User user = acl.get(principal.getName());
+    if(user == null)
+      return false; // this user is not authorized to do anything
+
+    // check if the user has this permission defined in the context
+    for(Role role:acl.get(user.name).roles) {
+      for (OperationContext permitted : role.permissions) {
+        if (permitted.implies(context)) {
+          return true;
+        }
+      }
+    }
+
+    return false;
+  }
+
+  @Override
+  public void init(Principal principal, DistributedMember arg1, Cache arg2) throws NotAuthorizedException {
+    this.principal = principal;
+  }
+
+  @Override
+  public Principal authenticate(Properties props, DistributedMember arg1) throws AuthenticationFailedException {
+    String user = props.getProperty(ResourceConstants.USER_NAME);
+    String pwd = props.getProperty(ResourceConstants.PASSWORD);
+    User userObj = acl.get(user);
+    if (userObj == null) throw new AuthenticationFailedException("Wrong username/password");
+    LogService.getLogger().info("User=" + user + " pwd=" + pwd);
+    if (user != null && !userObj.pwd.equals(pwd) && !"".equals(user))
+      throw new AuthenticationFailedException("Wrong username/password");
+    return new JMXPrincipal(user);
+  }
+
+  @Override
+  public void init(Properties arg0, LogWriter arg1, LogWriter arg2) throws AuthenticationFailedException {
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b8fc3c70/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
index 7f1d2bf..e14d1de 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
@@ -64,10 +64,7 @@ public class JSONAuthorization implements AccessControl, Authenticator {
     return new JSONAuthorization();
   }
 
-  public JSONAuthorization() throws IOException, JSONException {
-    // initialize with a default json file
-    //setUpWithJsonFile("shiro-ini.json");
-  }
+  public JSONAuthorization() throws IOException, JSONException {}
 
   public JSONAuthorization(String jsonFileName) throws IOException, JSONException {
     setUpWithJsonFile(jsonFileName);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b8fc3c70/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
index 318d327..9e2e41a 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContextJUnitTest.java
@@ -37,7 +37,7 @@ public class ResourceOperationContextJUnitTest {
     context = new ResourceOperationContext();
     assertEquals(Resource.NULL, context.getResource());
     assertEquals(OperationCode.NULL, context.getOperationCode());
-    assertEquals("NULL", context.getRegionName());
+    assertEquals(null, context.getRegionName());
   }
 
   @Test
@@ -51,17 +51,17 @@ public class ResourceOperationContextJUnitTest {
     context = new ResourceOperationContext(null, null, null);
     assertEquals(Resource.NULL, context.getResource());
     assertEquals(OperationCode.NULL, context.getOperationCode());
-    assertEquals("NULL", context.getRegionName());
+    assertEquals(null, context.getRegionName());
 
     context = new ResourceOperationContext(null, null);
     assertEquals(Resource.NULL, context.getResource());
     assertEquals(OperationCode.NULL, context.getOperationCode());
-    assertEquals("NULL", context.getRegionName());
+    assertEquals(null, context.getRegionName());
 
     context = new ResourceOperationContext("DATA", null, null);
     assertEquals(Resource.DATA, context.getResource());
     assertEquals(OperationCode.NULL, context.getOperationCode());
-    assertEquals("NULL", context.getRegionName());
+    assertEquals(null, context.getRegionName());
 
     context = new ResourceOperationContext(null, "MANAGE", "REGIONA");
     assertEquals(Resource.NULL, context.getResource());


[62/63] [abbrv] incubator-geode git commit: GEODE-11: Closing cache in @After of LuceneIndexXmlGenerator...Test

Posted by kl...@apache.org.
GEODE-11: Closing cache in @After of LuceneIndexXmlGenerator...Test

This test case was not cleaning up the cache after the tear down,
resulting in potential problems for later tests.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8dc2d303
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8dc2d303
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8dc2d303

Branch: refs/heads/feature/GEODE-1276
Commit: 8dc2d30317d6592af95ba62b95eba784a68a626f
Parents: b3ef791
Author: Dan Smith <up...@apache.org>
Authored: Tue May 3 15:54:35 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Tue May 3 16:48:42 2016 -0700

----------------------------------------------------------------------
 .../xml/LuceneIndexXmlGeneratorIntegrationJUnitTest.java  | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8dc2d303/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGeneratorIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGeneratorIntegrationJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGeneratorIntegrationJUnitTest.java
index 8272522..0c9d500 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGeneratorIntegrationJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGeneratorIntegrationJUnitTest.java
@@ -26,6 +26,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintWriter;
 import java.nio.charset.Charset;
 
+import org.junit.After;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -40,13 +41,20 @@ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
 public class LuceneIndexXmlGeneratorIntegrationJUnitTest {
+
+  private Cache cache;
+
+  @After
+  public void closeCache() {
+    cache.close();
+  }
   
   /**
    * Test of generating and reading cache configuration back in.
    */
   @Test
   public void generateWithFields() {
-    Cache cache = new CacheFactory().set("mcast-port", "0").create();
+    cache = new CacheFactory().set("mcast-port", "0").create();
     LuceneService service = LuceneServiceProvider.get(cache);
     service.createIndex("index", "region", "a", "b", "c");
     cache.createRegionFactory(RegionShortcut.PARTITION).create("region");


[02/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
index e6c0b60..f344938 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
@@ -87,7 +87,6 @@ import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.PoolFactory;
 import com.gemstone.gemfire.cache.execute.Function;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
 import com.gemstone.gemfire.cache.partition.PartitionListener;
 import com.gemstone.gemfire.cache.query.IndexType;
 import com.gemstone.gemfire.cache.query.internal.index.IndexCreationData;
@@ -1020,161 +1019,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
 
     stack.push(attrs);
   }
-  /**
-   * When a <code>hdfs-store</code> element is first encountered, we
-   * create a {@link HDFSStoreCreation}, populate it accordingly, and
-   * push it on the stack.
-   * <pre>
-   * {@code
-   * <hdfs-store name="" gemfire-home-dir="" namenode-url="" hdfs-client-config-file="">
-   * ...
-   * </hdfs-store>
-   * }
-   * 
-   */
-  private void startHDFSStore(Attributes atts) {
-    // this is the only place to create DSAC objects
-    HDFSStoreCreation attrs = new HDFSStoreCreation();
-    String name = atts.getValue(NAME);
-    if (name == null) {
-      throw new InternalGemFireException(
-          LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
-    } else {
-      attrs.setName(name);
-    }
 
-    String namenode = atts.getValue(HDFS_NAMENODE_URL);
-    if (namenode == null) {
-      throw new InternalGemFireException(
-          LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
-    } else {
-      attrs.setNameNodeURL(namenode);
-    }
-
-    String clientConfig = atts.getValue(HDFS_CLIENT_CONFIG_FILE);
-    if (clientConfig != null) {
-      attrs.setHDFSClientConfigFile(clientConfig);
-    }
-    
-    String folderPath = atts.getValue(HDFS_HOME_DIR);
-    if (folderPath != null) {
-      attrs.setHomeDir(folderPath);
-    }
-   
-    String readCacheSize = atts.getValue(HDFS_READ_CACHE_SIZE);
-    if (readCacheSize != null) {
-      try {
-        attrs.setBlockCacheSize(Float.valueOf(readCacheSize));
-      } catch (NumberFormatException e) {
-        throw new CacheXmlException(
-            LocalizedStrings.DistributedSystemConfigImpl_0_IS_NOT_A_VALID_INTEGER_1
-            .toLocalizedString(new Object[] { readCacheSize, HDFS_READ_CACHE_SIZE }),
-            e);
-      }
-    }
-    
-    Integer maxMemory = getIntValue(atts, HDFS_MAX_MEMORY);
-    if (maxMemory != null) {
-      attrs.setMaxMemory(maxMemory);
-    }
-    
-    Integer batchSize = getIntValue(atts, HDFS_BATCH_SIZE);
-    if (batchSize != null) {
-      attrs.setBatchSize(batchSize);
-    }
-    
-    Integer batchInterval = getIntValue(atts, HDFS_BATCH_INTERVAL);
-    if (batchInterval != null) {
-      attrs.setBatchInterval(batchInterval);
-    }
-    
-    Integer dispatcherThreads = getIntValue(atts, HDFS_DISPATCHER_THREADS);
-    if (dispatcherThreads != null) {
-      attrs.setDispatcherThreads(dispatcherThreads);
-    }
-    
-    Boolean bufferPersistent = getBoolean(atts, HDFS_BUFFER_PERSISTENT);
-    if (bufferPersistent != null) {
-      attrs.setBufferPersistent(bufferPersistent);
-    }
-    
-    Boolean synchronousDiskWrite = getBoolean(atts, HDFS_SYNCHRONOUS_DISK_WRITE);
-    if (synchronousDiskWrite != null) {
-      attrs.setSynchronousDiskWrite(synchronousDiskWrite);
-    }
-    
-    String diskstoreName = atts.getValue(HDFS_DISK_STORE);
-    if (diskstoreName != null) {
-      attrs.setDiskStoreName(diskstoreName);
-    }
-    
-    Integer purgeInterval = getInteger(atts, HDFS_PURGE_INTERVAL);
-    if (purgeInterval != null) {
-      attrs.setPurgeInterval(purgeInterval);
-    }
-    Boolean majorCompaction = getBoolean(atts, HDFS_MAJOR_COMPACTION);
-    if (majorCompaction != null) {
-      attrs.setMajorCompaction(Boolean.valueOf(majorCompaction));
-    }
-    
-    // configure major compaction interval
-    Integer majorCompactionInterval = getIntValue(atts, HDFS_MAJOR_COMPACTION_INTERVAL);
-    if (majorCompactionInterval != null) {
-      attrs.setMajorCompactionInterval(majorCompactionInterval);
-    }
-    
-    // configure compaction concurrency
-    Integer value = getIntValue(atts, HDFS_MAJOR_COMPACTION_THREADS);
-    if (value != null)
-      attrs.setMajorCompactionThreads(value);
-    
-    Boolean minorCompaction = getBoolean(atts, HDFS_MINOR_COMPACTION);
-    if (minorCompaction != null) {
-      attrs.setMinorCompaction(Boolean.valueOf(minorCompaction));
-    }
-    
-    // configure compaction concurrency
-    value = getIntValue(atts, HDFS_MINOR_COMPACTION_THREADS);
-    if (value != null)
-      attrs.setMinorCompactionThreads(value);
-    
-    String maxFileSize = atts.getValue(HDFS_MAX_WRITE_ONLY_FILE_SIZE);
-    if (maxFileSize != null) {
-      attrs.setWriteOnlyFileRolloverSize(parseInt(maxFileSize));
-    }
-    
-    String fileRolloverInterval = atts.getValue(HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL);
-    if (fileRolloverInterval != null) {
-      attrs.setWriteOnlyFileRolloverInterval(parseInt(fileRolloverInterval));
-    }
-    stack.push(name);
-    stack.push(attrs);
-  }
-  
-  /**
-   * After popping the current <code>HDFSStoreCreation</code> off the
-   * stack, we add it to the <code>HDFSStoreCreation</code> that should be on the
-   * top of the stack.
-   */
-  private void endHDFSStore() {
-    HDFSStoreCreation hsc = (HDFSStoreCreation) stack.pop();
-    String name = (String) stack.pop();
-    CacheCreation cache;
-    Object top = stack.peek();
-    if (top instanceof CacheCreation) {
-      cache = (CacheCreation) top;
-    }
-    else {
-      String s = "Did not expect a " + top.getClass().getName()
-          + " on top of the stack.";
-      Assert.assertTrue(false, s);
-      cache = null; // Dead code
-    }
-    if (name != null) {
-      cache.addHDFSStore(name, hsc);
-    }
-  }
-	
   private Integer getIntValue(Attributes atts, String param) {
     String maxInputFileSizeMB = atts.getValue(param);
     if (maxInputFileSizeMB != null) {
@@ -1389,16 +1234,7 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     if(offHeapStr != null) {
       attrs.setOffHeap(Boolean.valueOf(offHeapStr).booleanValue());
     }
-    String hdfsStoreName = atts.getValue(HDFS_STORE_NAME);
-    if (hdfsStoreName != null) {
-      attrs.setHDFSStoreName(hdfsStoreName);
-    }
-    String hdfsWriteOnly= atts.getValue(HDFS_WRITE_ONLY);
-    if (hdfsWriteOnly != null) {
-      attrs.setHDFSWriteOnly(Boolean.valueOf(hdfsWriteOnly).booleanValue());
-    }
 
-    
     stack.push(attrs);
   }
   
@@ -3000,9 +2836,6 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else if(qName.equals(PDX_SERIALIZER)) {
       //do nothing
     }
-	else if (qName.equals(HDFS_STORE)) {
-        startHDFSStore(atts);
-    }
     else if (qName.equals(COMPRESSOR)) {
     }
     else {
@@ -3411,9 +3244,6 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       else if (qName.equals(PDX_SERIALIZER)) {
         endPdxSerializer();
       }
-      else if (qName.equals(HDFS_STORE)) {
-          endHDFSStore();
-      }
       else if (qName.equals(COMPRESSOR)) {
         endCompressor();
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
index d0f5676..4dfe6ae 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
@@ -28,7 +28,6 @@ import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheListener;
 import com.gemstone.gemfire.cache.CacheLoader;
 import com.gemstone.gemfire.cache.CacheWriter;
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.CustomExpiry;
 import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskStoreFactory;
@@ -123,8 +122,6 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
   * @since prPersistPrint2 
   * */
   private String diskStoreName;
-  private String hdfsStoreName;
-  private boolean hdfsWriteOnly = false;
   private boolean isDiskSynchronous = AttributesFactory.DEFAULT_DISK_SYNCHRONOUS;
   
   private boolean cloningEnabled = false;
@@ -271,8 +268,7 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
     this.poolName = attrs.getPoolName();
     this.multicastEnabled = attrs.getMulticastEnabled();
     this.cloningEnabled = attrs.getCloningEnabled();
-	this.hdfsStoreName = attrs.getHDFSStoreName();
-    
+
     this.compressor = attrs.getCompressor();
     this.offHeap = attrs.getOffHeap();
     if (attrs instanceof UserSpecifiedRegionAttributes) {
@@ -500,10 +496,6 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
     if(this.cloningEnabled != other.getCloningEnabled()){
       throw new RuntimeException(LocalizedStrings.RegionAttributesCreation__CLONING_ENABLE_IS_NOT_THE_SAME_THIS_0_OTHER_1.toLocalizedString(new Object[] {Boolean.valueOf(this.cloningEnabled), Boolean.valueOf(other.getCloningEnabled())}));
     }
- 	if (! equal(this.hdfsStoreName, other.getHDFSStoreName())) {
-      //TODO:HDFS write a new exception string
-      throw new RuntimeException(" HDFS Store name does not match");
-    }
     if(! equal(this.compressor, other.getCompressor())) {
       throw new RuntimeException("Compressors are not the same.");
     }
@@ -1448,25 +1440,7 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
         setDiskSynchronous(parent.isDiskSynchronous());
       }
     }
-    if (!hasHDFSStoreName()) {
-      if (parentIsUserSpecified) {
-        if (parentWithHas.hasHDFSStoreName()) {
-          setHDFSStoreName(parent.getHDFSStoreName());
-        }
-      } else {
-        setHDFSStoreName(parent.getHDFSStoreName());
-      }
-    }
-    if (!hasHDFSWriteOnly()) {
-      if (parentIsUserSpecified) {
-        if (parentWithHas.hasHDFSWriteOnly()) {
-          setHDFSWriteOnly(parent.getHDFSWriteOnly());
-        }
-      } else {
-        setHDFSWriteOnly(parent.getHDFSWriteOnly());
-      }
-    }
-    
+
     if(!hasCompressor()) {
       if (parentIsUserSpecified) {
         if (parentWithHas.hasCompressor()) {
@@ -1554,15 +1528,6 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
     return this.evictionAttributes;
   }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CustomEvictionAttributes getCustomEvictionAttributes() {
-    // TODO: HDFS: no support for configuring this from XML yet
-    return null;
-  }
-
   public void setPoolName(String poolName) {
     if ("".equals(poolName)) {
       poolName = null;
@@ -1655,20 +1620,4 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
   public Set<String> getGatewaySenderIds() {
     return this.gatewaySenderIds;
   }
-  public String getHDFSStoreName() {
-    return this.hdfsStoreName;
-  }
-  public void setHDFSStoreName(String hdfsStoreName) {
-    //TODO:HDFS : throw an exception if a disk store is already configured
-    // and vice versa
-    this.hdfsStoreName = hdfsStoreName;
-    setHasHDFSStoreName(true);
-  }
-  public void setHDFSWriteOnly(boolean writeOnly) {
-    this.hdfsWriteOnly= writeOnly;
-    setHasHDFSWriteOnly(true);
-  }
-  public boolean getHDFSWriteOnly() {
-    return hdfsWriteOnly;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
index 2a939b4..ff960ca 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
@@ -1997,32 +1997,6 @@ public class LocalizedStrings extends ParentLocalizedStrings {
   public static final StringId SnappyCompressor_UNABLE_TO_LOAD_NATIVE_SNAPPY_LIBRARY = new StringId(5502, "Unable to load native Snappy library.");
   public static final StringId SnappyCompressor_UNABLE_TO_LOAD_NATIVE_SNAPPY_LIBRARY_MISSING_LIBRARY = new StringId(5503, "Unable to load native Snappy library from: {0}");
   
-  /** HOPLOG STRINGS, 5505 - 5600 **/
-  public static final StringId HOPLOG_REGION_CLOSE_FAILED = new StringId(5505, "IO error while trying to close region and release hdfs connection: {0}");
-  public static final StringId HOPLOG_HDFS_CLIENT_CONFIG_FILE_ABSENT = new StringId(5506, "HDFS client config file does not exist: {0}");
-  public static final StringId HOPLOG_IO_ERROR = new StringId(5507, "IO Exception while executing HDFS operation: {0}");
-  public static final StringId HOPLOG_UNABLE_TO_DELETE_FILE = new StringId(5508, "Unable to delete file: {0}");
-  public static final StringId HOPLOG_UNABLE_TO_DELETE_HDFS_DATA = new StringId(5509, "Unable to delete HDFS data while destroying region");
-  public static final StringId HOPLOG_CLOSE_FAILED = new StringId(5510, "IO error while trying to close hoplog.");
-  public static final StringId HOPLOG_FLUSH_FOR_BATCH_FAILED = new StringId(5511, "A batch of data could not be persisted on HDFS. It will be retried.");
-  public static final StringId HOPLOG_HDFS_STORE_NOT_FOUND = new StringId(5512, "HDFS store ''{0}'' does not exist.");
-  public static final StringId HOPLOG_TRYING_TO_CREATE_STANDALONE_SYSTEM = new StringId(5513, "The namenode url {0} is not valid. Please use the format hdfs://HOST:PORT");
-  public static final StringId HOPLOG_DOES_NOT_USE_HDFSSTORE = new StringId(5514, "{0} does not use HDFSSTORE");
-  public static final StringId HOPLOG_CONFIGURED_AS_WRITEONLY = new StringId(5515, "{0} is defined as WRITEONLY");
-  public static final StringId HOPLOG_MISSING_IN_BUCKET_FORCED_CLOSED = new StringId(5516, "A hoplog file, {0}, was not found in bucket lists. Closing it now, it may impact active reads.");
-  public static final StringId HOPLOG_MIN_IS_MORE_THAN_MAX = new StringId(5517, "Value of {0} is {1}. It should not be more than {2} value {3}");
-  public static final StringId HOPLOG_NOT_STARTED_YET = new StringId(5518, "HDFS store is not started yet. Gemfire is running without HDFS.");
-  public static final StringId HOPLOG_0_COLOCATE_WITH_REGION_1_NOT_INITIALIZED_YET = new StringId(5519, "Current region: {0} colocated with region {1} is yet initialized.");
-  public static final StringId HOPLOG_SUSPEND_OF_0_FAILED_IN_1 = new StringId(5520, "Failed to suspend active {0} in {1}");
-  public static final StringId HOPLOG_CLEANED_UP_BY_JANITOR = new StringId(5521, "Hoplog is cleaned up by janitor task.");
-  public static final StringId HOPLOG_HDFS_UNREACHABLE = new StringId(5522, "HDFS at {0} is unreachable.");
-  public static final StringId HOPLOG_MAJOR_COMPACTION_SCHEDULED_FOR_BETTER_ESTIMATE = new StringId(5523, "A major compaction has been automatically scheduled for better accuracy of count_estimate() function");
-  public static final StringId HOPLOG_FAILED_TO_READ_HDFS_FILE = new StringId(5524, "Exception while reading file on HDFS: {0}");
-  public static final StringId HOPLOG_HDFS_COMPACTION_ERROR = new StringId(5525, "Error while compacting files of bucket {0}");
-  public static final StringId HOPLOG_HDFS_COMPACTION_OVERLOADED = new StringId(5526, "Too many pending tasks for {0}. Skipping compaction request for {1}");
-  public static final StringId HOPLOG_FLUSH_OPERATION_FAILED = new StringId(5527, "IO error while trying to flush buffer and create hoplog.");
-  public static final StringId HOPLOG_HOPLOG_REMOVE_FAILED = new StringId(5528, "IO error while trying to remove hoplog.");
-  /** HOPLOG STRINGS, 5505 - 5600 **/
 
   public static final StringId PartitionAttributesImpl_CANNOT_DETERMINE_LOCAL_MAX_MEMORY_FOR_PARTITION_ATTRIBUTE_SINCE_NO_CACHE_IS_AVAILABLE_FROM_WHICH_TO_FETCH_THE_OFF_HEAP_MEMORY_ALLOCATOR = new StringId(5600, "Cannot determine local max memory for partition attribute since no cache is available from which to fetch the off-heap memory allocator");
 
@@ -2096,10 +2070,6 @@ public class LocalizedStrings extends ParentLocalizedStrings {
   public static final StringId ParallelAsyncEventQueue_0_CAN_NOT_BE_USED_WITH_REPLICATED_REGION_1 = new StringId(5716,"Parallel Async Event Queue {0} can not be used with replicated region {1}");
   public static final StringId ParallelGatewaySender_0_CAN_NOT_BE_USED_WITH_REPLICATED_REGION_1 = new StringId(5717,"Parallel gateway sender {0} can not be used with replicated region {1}");
 
-  public static final StringId HDFSSTORE_IS_USED_IN_NONHDFS_REGION = new StringId(5808, "Only regions with HDFS_PARTITION or HDFS_PERSISTENT_PARTITION data policies can specify a HDFS Store");
-  public static final StringId EVICTORSERVICE_CAUGHT_EXCEPTION_0 = new StringId(5809, "Evictor Service caught following exception : {0}");
-  public static final StringId HDFSSTORE_IS_USED_IN_REPLICATED_TABLE = new StringId(5810, "HDFS Store cannot be used for REPLICATED TABLE");
-  public static final StringId HDFS_USER_IS_SAME_AS_GF_USER = new StringId(5811, "Gemfire user is the same as HDFS user, may cause security risks: {0}");
   public static final StringId GF_KERBEROS_KEYTAB_FILE_ABSENT = new StringId(5812, "Gemfire kerberos keytab file is missing: {0}");
   public static final StringId GF_KERBEROS_NAMENODE_PRINCIPAL_UNDEF = new StringId(5813, "Namenode principal must be configured when using kerberos authentication");
   public static final StringId GF_KERBEROS_KEYTAB_UNDEF = new StringId(5814, "Gemfire kerberos keytab file is not configured");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
index 3003827..f087c89 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
@@ -130,7 +130,6 @@ public interface DistributedRegionMXBean {
   /**
    * Returns the number of entries in the Region.
    * 
-   * For HDFS regions it will be count of only in memory data.
    */
   public long getSystemRegionEntryCount();
 
@@ -305,14 +304,4 @@ public interface DistributedRegionMXBean {
    * Returns the number of members whose entry count is 0.
    */
   public int getEmptyNodes();
-  
-  
-  /**
-   * An estimated entry count for HDFS Read-Write region.This may not be accurate but acts
-   * as an indicative value.
-   * 
-   * For other regions it will be -1 ( Not Available)
-   */
-  public long getEstimatedSizeForHDFSRegion();
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
index a6f65d4..88c4058 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
@@ -130,14 +130,6 @@ public interface DistributedSystemMXBean {
    */
   public Map<String, String[]> listMemberDiskstore();
 
-  
-  /**
-   *  @return A map of all {@link DistributedMember}s and their HDFSStore's.
-   */
-  
-  public Map<String, String[]> listMemberHDFSStore();
-  
-  
   /**
    * Returns a list of IDs for all gateway senders.
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
index ed27569..4b849e0 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
@@ -227,13 +227,6 @@ public interface MemberMXBean {
   public String[] listDiskStores(boolean includeRegionOwned);
 
   /**
-   * 
-   * @return  list of HDFSStore's present in the Cache
-   */
-  
-  public String[] getHDFSStores();
-
-  /**
    * Returns the GemFire specific properties for this member.
    */
   public GemFireProperties listGemFireProperties();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
index 8c11d00..a913105 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
@@ -127,8 +127,6 @@ public interface RegionMXBean {
    * partitioned regions it will be the entry count for the primary buckets
    * hosted within this member.
    *
-   * For HDFS regions it will be count of only in memory data.
-   * 
    */
   public long getEntryCount();
 
@@ -350,12 +348,4 @@ public interface RegionMXBean {
    */
   public int getLocalMaxMemory();
   
-  /**
-   * Estimated entry count for HDFS Read-Write regions.This may not be accurate but
-   * acts as an indicative value. All HDFS Read-Write regions regions are PartitionedRegions. Hence
-   * the estimated value will be for primary buckets hosted within the member.
-   * 
-   * For other regions it will be -1 ( Not Available)
-   */
-  public long getEstimatedSizeForHDFSRegion();
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
index 5fbbc61..48b899b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
@@ -674,9 +674,4 @@ public class DistributedRegionBridge {
       return false;
     }
   }
-  
-  public long getEstimatedSizeForHDFSRegion() {
-    return monitor.getEstimatedSizeForHDFSRegion();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
index 549acc7..4580e7f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
@@ -321,9 +321,4 @@ public class DistributedRegionMBean implements DistributedRegionMXBean {
     return bridge.getEntrySize();
   }
 
-  @Override
-  public long getEstimatedSizeForHDFSRegion() {
-    return bridge.getEstimatedSizeForHDFSRegion();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
index bcacc41..632415a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
@@ -821,25 +821,6 @@ public class DistributedSystemBridge {
     return Collections.emptyMap();
   }
   
-  
-  /**
-   *  @return A map of all {@link DistributedMember}s and their HDFSStore's.
-   */  
-  
-  public Map<String, String[]> getMemberHDFSStoreMap() {
-    Iterator<MemberMXBean> memberIterator = mapOfMembers.values().iterator();    
-    if (memberIterator != null) {
-      Map<String, String[]> mapOfHdfs = new HashMap<String, String[]>();
-      while (memberIterator.hasNext()) {
-        MemberMXBean bean = memberIterator.next();
-        mapOfHdfs.put(bean.getMember(), bean.getHDFSStores());
-      }
-
-      return mapOfHdfs;
-    }
-    return Collections.emptyMap();
-  }
-
   /**
    *
    * @param member

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
index bd92f9f..3458bf5 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
@@ -450,11 +450,4 @@ public class DistributedSystemMBean extends NotificationBroadcasterSupport
   public void setQueryCollectionsDepth(int queryCollectionsDepth) {
     bridge.setQueryCollectionsDepth(queryCollectionsDepth);;
   }
-
-  @Override
-  public Map<String, String[]> listMemberHDFSStore() {
-    return bridge.getMemberHDFSStoreMap();
-  }
-
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java
deleted file mode 100644
index 29bc246..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.management.internal.beans;
-
-import java.util.Map;
-import java.util.Set;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion.SizeEntry;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.management.internal.ManagementConstants;
-import com.gemstone.gemfire.management.internal.beans.stats.MBeanStatsMonitor;
-import com.gemstone.gemfire.management.internal.beans.stats.StatType;
-import com.gemstone.gemfire.management.internal.beans.stats.StatsRate;
-
-/**
- * 
- * 
- * MBean Bridge for HDFS region which is a type of Partitioned Region
- */
-public class HDFSRegionBridge<K, V> extends PartitionedRegionBridge<K, V> {
-
-  private SortedOplogStatistics soplogStats;
-
-  private MBeanStatsMonitor hdfsRegionMonitor;
-
-  private static final String WRITTEN_BYTES = "writeBytes";
-
-  private static final String READ_BYTES = "readBytes";
-
-  private static final String SCANNED_BYTES = "scanBytes";
-
-  public static final String HDFS_REGION_MONITOR = "HDFSRegionMonitor";
-
-  private StatsRate diskWritesRate;
-
-  private StatsRate diskReadsRate;
-  
-  private PartitionedRegion parRegion;
-
-  public HDFSRegionBridge(Region<K, V> region) {
-    super(region);
-
-    HDFSRegionDirector director = HDFSRegionDirector.getInstance();
-
-    String regionFullPath = region.getFullPath();
-    this.soplogStats = director.getHdfsRegionStats(regionFullPath);
-    this.hdfsRegionMonitor = new MBeanStatsMonitor(HDFS_REGION_MONITOR + "_" + regionFullPath);
-    hdfsRegionMonitor.addStatisticsToMonitor(soplogStats.getStats());
-    this.parRegion = (PartitionedRegion)region;
-    configureHDFSRegionMetrics();
-  }
-
-  private void configureHDFSRegionMetrics() {
-
-    diskWritesRate = new StatsRate(WRITTEN_BYTES, StatType.INT_TYPE, hdfsRegionMonitor);
-
-    String[] readsRates = new String[] { READ_BYTES, SCANNED_BYTES };
-
-    diskReadsRate = new StatsRate(readsRates, StatType.INT_TYPE, hdfsRegionMonitor);
-  }
-
-  
-  private long estimatedEntryCount = 0;
-  
-
-  /**
-   * Initialized skipCount to 10 as for the first time we want to compute size
-   * of HDFS region.
-   */
-  private int skipCount = 10;
-
-  /**
-   * 
-   * An estimated entry count for HDFS region.This may not be accurate but acts
-   * as an indicative value.
-   * 
-   * 
-   * Even for estimating size we need to iterate over all BucketRegions and call
-   * BucketRegion.size(). This is expensive as compared to reading directly from
-   * a statistics value. Hence we are skipping 10 samples.
-   * 
-   */
-  public long getEstimatedSizeForHDFSRegion() {
-    if(parRegion.isHDFSReadWriteRegion()){
-      if(skipCount % 10 == 0) {
-        computeEntryCount();
-        skipCount = 1;
-      } else {
-        skipCount++;
-      }
-      return estimatedEntryCount;
-    }else{
-      return ManagementConstants.NOT_AVAILABLE_LONG;
-    }
-    
-  }
-  
-  private void computeEntryCount() {
-
-    if (parRegion.isDataStore()) { //if not a DataStore do nothing and keep the entryCount as 0;
-      int numLocalEntries = 0;
-      Map<Integer, SizeEntry> localPrimaryBucketRegions = parRegion.getDataStore()
-          .getSizeEstimateForLocalPrimaryBuckets();
-      if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) {
-        for (Map.Entry<Integer, SizeEntry> me : localPrimaryBucketRegions.entrySet()) {
-          numLocalEntries += me.getValue().getSize();
-
-        }
-      }
-      this.estimatedEntryCount = numLocalEntries;
-    }
-  }
-  
-  @Override
-  public long getEntryCount() {
-    if (parRegion.isDataStore()) {
-      int numLocalEntries = 0;
-      Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
-      if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) {
-        for (BucketRegion br : localPrimaryBucketRegions) {
-          // TODO soplog, fix this for griddb regions
-          numLocalEntries += br.getRegionMap().sizeInVM() - br.getTombstoneCount();
-
-        }
-      }
-      return numLocalEntries;
-    } else {
-      return  ManagementConstants.ZERO;
-    }
-  }
-
-
-  @Override
-  public long getEntrySize() {
-    return ManagementConstants.NOT_AVAILABLE_LONG;
-  }
-
-  @Override
-  public long getDiskUsage() {
-    if (soplogStats != null) {
-      return soplogStats.getStoreUsageBytes();
-    }
-    return ManagementConstants.NOT_AVAILABLE_LONG;
-  }
-
-  @Override
-  public float getDiskReadsRate() {
-    return diskReadsRate.getRate();
-  }
-
-  @Override
-  public float getDiskWritesRate() {
-    return diskWritesRate.getRate();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
index 21d7140..b82b94d 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
@@ -455,11 +455,6 @@ public class MemberMBean extends NotificationBroadcasterSupport implements
   }
 
   @Override
-  public String[] getHDFSStores() {
-    return bridge.getHDFSStores();
-  }
-  
-  @Override
   public long getGetsAvgLatency() {
     return bridge.getGetsAvgLatency();
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
index 1425572..638ba06 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
@@ -49,7 +49,6 @@ import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.execute.FunctionService;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
 import com.gemstone.gemfire.cache.persistence.PersistentID;
 import com.gemstone.gemfire.cache.wan.GatewayReceiver;
 import com.gemstone.gemfire.cache.wan.GatewaySender;
@@ -1010,32 +1009,6 @@ public class MemberMBeanBridge {
     return listDiskStores(true);
   }
 
-  
-
-  
-  /**
-   * @return list all the HDFSStore's name at cache level
-   */
-  
-  public String[] getHDFSStores() {
-    GemFireCacheImpl cacheImpl = (GemFireCacheImpl) cache;
-    String[] retStr = null;
-    Collection<HDFSStoreImpl> hdfsStoreCollection = null;
-    hdfsStoreCollection = cacheImpl.getHDFSStores();
-      
-    if (hdfsStoreCollection != null && hdfsStoreCollection.size() > 0) {
-      retStr = new String[hdfsStoreCollection.size()];
-      Iterator<HDFSStoreImpl> it = hdfsStoreCollection.iterator();
-      int i = 0;
-      while (it.hasNext()) {
-        retStr[i] = it.next().getName();
-        i++;
-
-      }
-    }
-    return retStr;
-  }
-      
   /**
    * 
    * @return log of the member.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
index 3a8440a..7450746 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
@@ -76,14 +76,7 @@ public class PartitionedRegionBridge<K, V>  extends RegionMBeanBridge<K, V> {
   
   
   public static <K, V> PartitionedRegionBridge<K, V> getInstance(Region<K, V> region) {
-
-    if (region.getAttributes().getDataPolicy().withHDFS()) {
-      PartitionedRegionBridge<K, V> bridge = new HDFSRegionBridge<K, V>(region);
-      return bridge;
-    } else {
-      return new PartitionedRegionBridge<K, V> (region);
-    }
-
+    return new PartitionedRegionBridge<K, V> (region);
   }
   
   
@@ -309,8 +302,4 @@ public class PartitionedRegionBridge<K, V>  extends RegionMBeanBridge<K, V> {
   public int getLocalMaxMemory() {
     return partitionAttributesData.getLocalMaxMemory();
   }
-
-  public long getEstimatedSizeForHDFSRegion() {
-    return -1;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
index 1c7dcf7..86fe73e 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
@@ -314,9 +314,4 @@ public class RegionMBean<K, V> extends NotificationBroadcasterSupport implements
     return bridge.getLocalMaxMemory(); 
   }
 
-  @Override
-  public long getEstimatedSizeForHDFSRegion() {
-    return bridge.getEstimatedSizeForHDFSRegion();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
index cd3cb90..66f61e2 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
@@ -590,9 +590,4 @@ public class RegionMBeanBridge<K, V> {
   public int getLocalMaxMemory() {
     return -1;
   }
-
-  
-  public long getEstimatedSizeForHDFSRegion() {
-    return -1;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
index c855171..7a4d9b4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
@@ -111,8 +111,6 @@ public class RegionClusterStatsMonitor {
 
   private static final String PERSISTENT_ENABLED = "PersistentEnabled";
   
-  private static final String ESTIMATED_SIZE_FOR_HDFS_REGION = "EstimatedSizeForHDFSRegion";
-
   private volatile long lastAccessedTime = 0;
 
   private volatile long lastModifiedTime = 0;
@@ -192,7 +190,6 @@ public class RegionClusterStatsMonitor {
     typeMap.put(AVERAGE_READS, Float.TYPE);
     typeMap.put(AVERAGE_WRITES, Float.TYPE);
     typeMap.put(ENTRY_SIZE, Long.TYPE);
-    typeMap.put(ESTIMATED_SIZE_FOR_HDFS_REGION, Long.TYPE);
 
   }
 
@@ -336,10 +333,6 @@ public class RegionClusterStatsMonitor {
   public long getTotalEntriesOnlyOnDisk() {
     return aggregator.getLongValue(TOTAL_ENTRIES_ONLY_ON_DISK);
   }
-  
-  public long getEstimatedSizeForHDFSRegion() {
-    return aggregator.getLongValue(ESTIMATED_SIZE_FOR_HDFS_REGION);
-  }
 
   public int getAvgBucketSize() {
     int bucketNum = getBucketCount();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
index c4588f6..5a51b62 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
@@ -83,11 +83,7 @@ public class RegionAttributesInfo implements Serializable{
 	private String regionIdleTimeoutAction = ExpirationAction.INVALIDATE.toString();
 	
 	private boolean offHeap;
-	private String hdfsStoreName;
-	private Boolean hdfsWriteOnly;
-	
-	
-	
+
 	/***
 	 * Non-default-attribute map in the constructor
 	 */
@@ -183,8 +179,6 @@ public class RegionAttributesInfo implements Serializable{
 		
 		}
 		this.offHeap = ra.getOffHeap();
-		this.hdfsStoreName = ra.getHDFSStoreName();
-		this.hdfsWriteOnly = ra.getHDFSWriteOnly();
 	}
 	
 	
@@ -314,15 +308,6 @@ public class RegionAttributesInfo implements Serializable{
 	  return this.offHeap;
 	}
 	
-	public String getHdfsStoreName() {
-		return hdfsStoreName;
-	}
-
-
-	public Boolean getHdfsWriteOnly() {
-		return hdfsWriteOnly;
-	}
-	
 	@Override
 	public boolean equals(Object arg0) {
 		return super.equals(arg0);
@@ -482,10 +467,6 @@ public class RegionAttributesInfo implements Serializable{
             if (this.offHeap != RegionAttributesDefault.OFF_HEAP) {
                 nonDefaultAttributes.put(RegionAttributesNames.OFF_HEAP, Boolean.toString(this.offHeap));
              }            
-            if (this.hdfsStoreName != null ) {
-                nonDefaultAttributes.put(RegionAttributesNames.HDFSSTORE, this.hdfsStoreName);
-                nonDefaultAttributes.put(RegionAttributesNames.HDFS_WRITEONLY, Boolean.toString(this.hdfsWriteOnly));
-             }
 		}
 		return this.nonDefaultAttributes;
 	}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java
deleted file mode 100644
index e6828bc..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-
-/**
- *  Function used by the 'describe hdfs-store' gfsh command to collect information
- * and details about a particular hdfs store for a particular GemFire distributed system member.
- * 
- */
-public class DescribeHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
-  private static final long serialVersionUID = 1L;
-
-  private static final Logger logger = LogService.getLogger();
-
-  public static DescribeHDFSStoreFunction INSTANCE = new DescribeHDFSStoreFunction();
-
-  private static final String ID = DescribeHDFSStoreFunction.class.getName();
-  
-  protected Cache getCache() {
-    return CacheFactory.getAnyInstance();
-  }
-  
-  protected DistributedMember getDistributedMemberId(Cache cache){
-    return ((InternalCache)cache).getMyId();
-  }
-  
-  public void execute(final FunctionContext context) {
-    try {
-      Cache cache = getCache();
-      final DistributedMember member = getDistributedMemberId(cache);      
-      if (cache instanceof GemFireCacheImpl) {
-        GemFireCacheImpl cacheImpl = (GemFireCacheImpl)cache;
-        final String hdfsStoreName = (String)context.getArguments();
-        final String memberName = member.getName();
-        HDFSStoreImpl hdfsStore = cacheImpl.findHDFSStore(hdfsStoreName);        
-        if (hdfsStore != null) {
-          HDFSStoreConfigHolder configHolder = new HDFSStoreConfigHolder (hdfsStore);
-          context.getResultSender().lastResult(configHolder);
-        }
-        else {
-          context.getResultSender().sendException(
-              new HDFSStoreNotFoundException(
-                  String.format("A hdfs store with name (%1$s) was not found on member (%2$s).",
-                  hdfsStoreName, memberName)));
-        }
-      }  
-    } catch (Exception e) {
-      logger.error("Error occurred while executing 'describe hdfs-store': {}!", e.getMessage(), e);
-      context.getResultSender().sendException(e);
-    }
-  }
-
-  @Override
-  public String getId() {
-    return ID;
-  }	
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java
deleted file mode 100644
index ad569f0..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.management.internal.cli.util;
-
-import com.gemstone.gemfire.GemFireException;
-
-/**
- * The HDFSStoreNotFoundException is a GemFireException class indicating that a hdfs store by name could not be found
- * on a member specified by name!
- * </p>
- * @see com.gemstone.gemfire.GemFireException
- */
-// TODO this GemFireException should be moved to a more appropriate package!
-  @SuppressWarnings("unused")
-public class HDFSStoreNotFoundException extends GemFireException {
-
-  public HDFSStoreNotFoundException() {
-  }
-
-  public HDFSStoreNotFoundException(final String message) {
-    super(message);
-  }
-
-  public HDFSStoreNotFoundException(final Throwable cause) {
-    super(cause);
-  }
-
-  public HDFSStoreNotFoundException(final String message, final Throwable cause) {
-    super(message, cause);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
index e842bee..e0db821 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
@@ -46,9 +46,7 @@ public class RegionAttributesNames {
 	public static final String POOL_NAME =  "pool-name";
 	public static final String COMPRESSOR = "compressor";
     public static final String OFF_HEAP = "off-heap";
-    public static final String HDFSSTORE = "hdfs-store";
-    public static final String HDFS_WRITEONLY = "hdfs-write-only";
-	
+
 	//Partition attributes
 	public static final String LOCAL_MAX_MEMORY =  "local-max-memory";
 	public static final String REDUNDANT_COPIES =  "redundant-copies";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
index f1e0d7a..a3d4cd0 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
@@ -243,11 +243,6 @@ public class MemberMXBeanAdapter implements MemberMXBean {
   }
 
   @Override
-  public String[] getHDFSStores() {
-    throw new UnsupportedOperationException("Not Implemented!");
-  }  
-  
-  @Override
   public String[] getRootRegionNames() {
     throw new UnsupportedOperationException("Not Implemented!");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
----------------------------------------------------------------------
diff --git a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
index 5ecd67d..cc6d189 100755
--- a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
+++ b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
@@ -274,7 +274,6 @@ declarative caching XML file elements unless indicated otherwise.
         </xsd:element>
         <xsd:element maxOccurs="unbounded" minOccurs="0" name="pool" type="gf:pool-type" />
         <xsd:element maxOccurs="unbounded" minOccurs="0" name="disk-store" type="gf:disk-store-type" />
-        <xsd:element maxOccurs="unbounded" minOccurs="0" name="hdfs-store" type="gf:hdfs-store-type" />
         <xsd:element maxOccurs="1" minOccurs="0" name="pdx" type="gf:pdx-type" />
         <xsd:element maxOccurs="unbounded" minOccurs="0" name="region-attributes" type="gf:region-attributes-type" />
         <xsd:choice maxOccurs="unbounded" minOccurs="0">
@@ -826,8 +825,6 @@ As of 6.5 disk-dirs is deprecated on region-attributes. Use disk-store-name inst
     <xsd:attribute name="pool-name" type="xsd:string" use="optional" />
     <xsd:attribute name="disk-store-name" type="xsd:string" use="optional" />
     <xsd:attribute name="disk-synchronous" type="xsd:boolean" use="optional" />
-    <xsd:attribute name="hdfs-store-name" type="xsd:string" use="optional" />
-    <xsd:attribute name="hdfs-write-only" type="xsd:boolean" use="optional" />
     <xsd:attribute name="publisher" type="xsd:boolean" use="optional" />
     <xsd:attribute name="refid" type="xsd:string" use="optional" />
     <xsd:attribute name="scope" type="gf:region-attributesScope" use="optional" />
@@ -1133,34 +1130,6 @@ As of 6.5 disk-dirs is deprecated on region-attributes. Use disk-store-name inst
     <xsd:attribute name="disk-usage-critical-percentage" type="xsd:string" use="optional" />
   </xsd:complexType>
 
-  <xsd:complexType mixed="true" name="hdfs-store-type">
-    <xsd:annotation>
-      <xsd:documentation>
-        A "hdfs-store" element specifies a HdfsStore for persistence.
-      </xsd:documentation>
-    </xsd:annotation>
-    <xsd:attribute name="name" type="xsd:string" use="required" />
-    <xsd:attribute name="namenode-url" type="xsd:string" use="optional" />
-    <xsd:attribute name="home-dir" type="xsd:string" use="optional" />
-    <xsd:attribute name="max-memory" type="xsd:string" use="optional" />
-    <xsd:attribute name="read-cache-size" type="xsd:string" use="optional" />
-    <xsd:attribute name="batch-size" type="xsd:string" use="optional" />
-    <xsd:attribute name="batch-interval" type="xsd:string" use="optional" />
-    <xsd:attribute name="dispatcher-threads" type="xsd:string" use="optional" />
-    <xsd:attribute name="buffer-persistent" type="xsd:boolean" use="optional" />
-    <xsd:attribute name="disk-store" type="xsd:string" use="optional" />
-    <xsd:attribute name="synchronous-disk-write" type="xsd:string" use="optional" />
-    <xsd:attribute name="hdfs-client-config-file" type="xsd:string" use="optional" />
-    <xsd:attribute name="purge-interval" type="xsd:string" use="optional" />
-    <xsd:attribute name="major-compaction" type="xsd:string" use="optional" />
-    <xsd:attribute name="major-compaction-interval" type="xsd:string" use="optional" />
-    <xsd:attribute name="major-compaction-threads" type="xsd:integer" use="optional" />
-    <xsd:attribute name="minor-compaction" type="xsd:string" use="optional" />
-    <xsd:attribute name="minor-compaction-threads" type="xsd:integer" use="optional" />
-    <xsd:attribute name="max-write-only-file-size" type="xsd:integer" use="optional" />
-    <xsd:attribute name="write-only-file-rollover-interval" type="xsd:string" use="optional" />    
-  </xsd:complexType>
-
   <xsd:complexType name="pdx-type">
     <xsd:annotation>
       <xsd:documentation>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
deleted file mode 100644
index 6f69427..0000000
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-import junit.framework.TestCase;
-
-@Category({IntegrationTest.class})
-public class SignalledFlushObserverJUnitTest extends TestCase {
-  private AtomicInteger events;
-  private AtomicInteger delivered;
-  
-  private SignalledFlushObserver sfo;
-  
-  public void testEmpty() throws InterruptedException {
-    assertFalse(sfo.shouldDrainImmediately());
-    assertTrue(sfo.flush().waitForFlush(0, TimeUnit.NANOSECONDS));
-    assertFalse(sfo.shouldDrainImmediately());
-  }
-  
-  public void testSingle() throws InterruptedException {
-    sfo.push();
-    AsyncFlushResult result = sfo.flush();
-
-    assertTrue(sfo.shouldDrainImmediately());
-    sfo.pop(1);
-    
-    assertTrue(result.waitForFlush(0, TimeUnit.MILLISECONDS));
-    assertFalse(sfo.shouldDrainImmediately());
-  }
-
-  public void testDouble() throws InterruptedException {
-    sfo.push();
-    sfo.push();
-
-    AsyncFlushResult result = sfo.flush();
-    assertTrue(sfo.shouldDrainImmediately());
-
-    sfo.pop(1);
-    assertFalse(result.waitForFlush(0, TimeUnit.MILLISECONDS));
-
-    sfo.pop(1);
-    assertTrue(result.waitForFlush(0, TimeUnit.MILLISECONDS));
-    assertFalse(sfo.shouldDrainImmediately());
-  }
-
-  public void testTimeout() throws InterruptedException {
-    sfo.push();
-    AsyncFlushResult result = sfo.flush();
-
-    assertTrue(sfo.shouldDrainImmediately());
-    assertFalse(result.waitForFlush(100, TimeUnit.MILLISECONDS));
-    sfo.pop(1);
-    
-    assertTrue(result.waitForFlush(0, TimeUnit.MILLISECONDS));
-    assertFalse(sfo.shouldDrainImmediately());
-  }
-  
-  @Override
-  protected void setUp() {
-    events = new AtomicInteger(0);
-    delivered = new AtomicInteger(0);
-    sfo = new SignalledFlushObserver();
-    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
-  }
-  
-  private int push() {
-    return events.incrementAndGet();
-  }
-  
-  private int pop() {
-    return delivered.incrementAndGet();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
deleted file mode 100644
index fc0232f..0000000
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ConcurrentSkipListSet;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.internal.ParallelAsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.KeyToSeqNumObject;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.MultiRegionSortedQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.SortedEventQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
-import com.gemstone.gemfire.internal.cache.EventID;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
-import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-import junit.framework.TestCase;
-
-/**
- * A test class for testing whether the functionalities of sorted Aysync Queue.
- * 
- */
-@Category({IntegrationTest.class})
-public class SortedListForAsyncQueueJUnitTest extends TestCase {
-  
-  public SortedListForAsyncQueueJUnitTest() {
-    super();
-  }
-
-  private GemFireCacheImpl c;
-
-  @Override
-  public void setUp() {
-    
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
- // make it a loner
-    this.c = createCache();
-    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
-  }
-
-  protected GemFireCacheImpl createCache() {
-    return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").set("log-level", "warning")
-        .create();
-  }
-
-  @Override
-  public void tearDown() {
-    this.c.close();
-  }
-  
-  public void testHopQueueWithOneBucket() throws Exception {
-    this.c.close();
-    this.c = createCache();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setTotalNumBuckets(1);
-    
-    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-    PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
-    r1.put("K9", "x1");
-    r1.put("K8", "x2");
-    // hack to get the queue. 
-    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
-    HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
-    
-    EntryEventImpl ev1 = EntryEventImpl.create((LocalRegion)r1, Operation.CREATE,
-        (Object)"K1", (Object)"V1", null,
-        false, (DistributedMember)c.getMyId());
-    // put some keys with multiple updates.
-    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2) );
-    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8) );
-    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7) );
-    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3) );
-    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6) );
-    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9) );
-    
-    assertTrue(" skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);
-    
-    
-    // peek a key. it should be the lowesy
-    Object[] l = hopqueue.peek(1, 0).toArray();
-    
-    assertTrue("First key should be K1 but is " + ((HDFSGatewayEventImpl)l[0]).getKey(), ((HDFSGatewayEventImpl)l[0]).getKey().equals("K1"));
-    assertTrue(" Peeked skip list size should be  0 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
-    assertTrue(" skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
-    
-    // try to fetch the key. it would be in peeked skip list but still available
-    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
-    assertTrue("First key should be K1", ((HDFSGatewayEventImpl)o).getKey().equals("K1"));
-    
-    assertTrue(" skip lists size should be  6"  , ( getSortedEventQueue(hdfsBQ).getPeeked().size() + getSortedEventQueue(hdfsBQ).currentSkipList.size() ) == 6);
-    
-    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
-    Object v = ((HDFSGatewayEventImpl)o).getDeserializedValue();
-    assertTrue(" key should K2 with value V2a but the value was " + v , ((String)v).equals("V2a"));
-    
-    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
-    v = ((HDFSGatewayEventImpl)o).getDeserializedValue();
-    assertTrue(" key should K3 with value V3b but the value was " + v , ((String)v).equals("V3b"));
-  }
-
-  protected SortedEventQueue getSortedEventQueue(HDFSBucketRegionQueue hdfsBQ) {
-    MultiRegionSortedQueue multiQueue = (MultiRegionSortedQueue)(hdfsBQ.hdfsEventQueue);
-    return multiQueue.regionToEventQueue.values().iterator().next();
-  }
-  
-  public void testPeekABatch() throws Exception {
-    this.c.close();
-    this.c = createCache();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setTotalNumBuckets(1);
-    
-    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-    PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
-    r1.put("K9", "x1");
-    r1.put("K8", "x2");
-    // hack to get the queue. 
-    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
-    HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
-    
-    
-    // put some keys with multiple updates.
-    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2) );
-    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8) );
-    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7) );
-    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3) );
-    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6) );
-    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9) );
-    
-    getSortedEventQueue(hdfsBQ).rollover(true);
-    
-    hopqueue.put(getNewEvent("K1", "V12", r1, 0, 11) );
-    hopqueue.put(getNewEvent("K5", "V3a", r1, 0, 12) );
-    hopqueue.put(getNewEvent("K5", "V3b", r1, 0, 13) );
-    
-    assertTrue(" skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(), getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
-    assertTrue(" skip list size should be  6 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(), getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 6);
-    
-    Object o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);;
-    Object o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);;
-    Object v1 = ((HDFSGatewayEventImpl)o1).getDeserializedValue();
-    Object v2 = ((HDFSGatewayEventImpl)o2).getDeserializedValue();
-    assertTrue(" key should K3 with value V3b but the value was " + v1 , ((String)v1).equals("V3b"));
-    assertTrue(" key should K1 with value V12 but the value was " + v2 , ((String)v2).equals("V12"));
-    
-    
-    ArrayList a = hdfsBQ.peekABatch();
-    assertTrue("First key should be K1 but is " + ((HDFSGatewayEventImpl)a.get(0)).getKey(), ((HDFSGatewayEventImpl)a.get(0)).getKey().equals("K1"));
-    assertTrue("Second key should be K2 but is " + ((HDFSGatewayEventImpl)a.get(1)).getKey(), ((HDFSGatewayEventImpl)a.get(1)).getKey().equals("K2"));
-    assertTrue("Third key should be K2 but is " + ((HDFSGatewayEventImpl)a.get(2)).getKey(), ((HDFSGatewayEventImpl)a.get(2)).getKey().equals("K2"));
-    
-    
-    assertTrue(" Peeked skip list size should be 6 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
-    assertTrue(" queueOfLists size should be  2 ", getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
-    
-    assertTrue(" skip list size should be  3 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
-    
-    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);;
-    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);;
-    v1 = ((HDFSGatewayEventImpl)o1).getDeserializedValue();
-    v2 = ((HDFSGatewayEventImpl)o2).getDeserializedValue();
-    assertTrue(" key should K3 with value V3b but the value was " + v1 , ((String)v1).equals("V3b"));
-    assertTrue(" key should K1 with value V12 but the value was " + v2 , ((String)v2).equals("V12"));
-    
-    
-    java.util.Iterator<KeyToSeqNumObject> iter1 = getSortedEventQueue(hdfsBQ).getPeeked().iterator();
-    assertTrue("key in peeked list should be 3 ", iter1.next().getSeqNum() == 3);
-    assertTrue("key in peeked list should be 6 ", iter1.next().getSeqNum() == 6);
-    assertTrue("key in peeked list should be 2 ", iter1.next().getSeqNum() == 2);
-    assertTrue("key in peeked list should be 9 ", iter1.next().getSeqNum() == 9);
-    assertTrue("key in peeked list should be 8 ", iter1.next().getSeqNum() == 8);
-    assertTrue("key in peeked list should be 7 ", iter1.next().getSeqNum() == 7);
-    assertTrue(" Peeked list should not have any more elements. ", iter1.hasNext() == false);
-    
-    
-    java.util.Iterator<KeyToSeqNumObject> iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
-    assertTrue("key in peeked list should be 11", iter2.next().getSeqNum() == 11);
-    assertTrue("key in peeked list should be 13", iter2.next().getSeqNum() == 13);
-    assertTrue("key in peeked list should be 12 ", iter2.next().getSeqNum() == 12);
-    
-    iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
-    HashSet<Long> hs = new HashSet<Long>();
-    hs.add((long) 11);
-    hs.add((long) 13);
-    hs.add((long) 12);
-    hs.add((long) 3);
-    hs.add((long) 6);
-    hs.add((long) 2);
-    hs.add((long) 9);
-    hs.add((long) 8);
-    hs.add((long) 7);
-    
-    hdfsBQ.hdfsEventQueue.handleRemainingElements(hs);
-    
-    ArrayList a1 = hdfsBQ.peekABatch();
-    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);;
-    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);;
-    v2 = ((HDFSGatewayEventImpl)o2).getDeserializedValue();
-    assertTrue(" key should K3 should not have been found ",  o1 ==null);
-    assertTrue(" key should K1 with value V12 but the value was " + v2 , ((String)v2).equals("V12"));
-    
-    assertTrue("First key should be K1 but is " + ((HDFSGatewayEventImpl)a1.get(0)).getKey(), ((HDFSGatewayEventImpl)a1.get(0)).getKey().equals("K1"));
-    assertTrue("Second key should be K5 but is " + ((HDFSGatewayEventImpl)a1.get(1)).getKey(), ((HDFSGatewayEventImpl)a1.get(1)).getKey().equals("K5"));
-    assertTrue("Third key should be K5 but is " + ((HDFSGatewayEventImpl)a1.get(2)).getKey(), ((HDFSGatewayEventImpl)a1.get(2)).getKey().equals("K5"));
-    
-    assertTrue(" Peeked skip list size should be  3 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 3);
-    assertTrue(" skip list size should be  0 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(), getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
-    assertTrue(" skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(), getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 3);
-    assertTrue(" skip list size should be  2 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.size(), getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
-    
-  }
-  
-  private HDFSGatewayEventImpl getNewEvent(Object key, Object value, Region r1, int bid, int tailKey) throws Exception {
-    EntryEventImpl ev1 = EntryEventImpl.create((LocalRegion)r1, Operation.CREATE,
-        key, value, null,
-        false, (DistributedMember)c.getMyId());
-    ev1.setEventId(new EventID(this.c.getDistributedSystem()));
-    HDFSGatewayEventImpl event = null;
-    event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, ev1, null , true, bid);
-    event.setShadowKey((long)tailKey);
-    return event;
-  }
-  
-  /**
-   * Creates the HDFS Queue instance for a region (this skips the creation of 
-   * event processor)
-   */
-  private HDFSParallelGatewaySenderQueue getHDFSQueue(Region region, Cache c) {
-    GatewaySenderAttributes gattrs = new GatewaySenderAttributes();
-    gattrs.isHDFSQueue = true;
-    gattrs.id = "SortedListForAsyncQueueJUnitTest_test";
-    ParallelAsyncEventQueueImpl gatewaySender = new ParallelAsyncEventQueueImpl(c, gattrs);
-    HashSet<Region> set = new HashSet<Region>();
-    set.add(region);
-    HDFSParallelGatewaySenderQueue queue = new HDFSParallelGatewaySenderQueue(gatewaySender, set, 0, 1);
-    queue.start();
-    return queue;
-  }
-  
- // A test for testing whether the KeyToSeqNumObject compare function is in order.
-  public void testIfTheKeyToSeqNumIsKeptSortedWithoutConflation() throws Exception {
-    byte[] k1 = new byte[] { 1};
-    byte[] k2 = new byte[] { 2};
-    byte[] k3 = new byte[] { 3};
-    byte[] k4 = new byte[] { 4};
-    
-    KeyToSeqNumObject keyToSeq1 = new KeyToSeqNumObject(k1, new Long(2));
-    KeyToSeqNumObject keyToSeq2 = new KeyToSeqNumObject(k1, new Long(5));
-    KeyToSeqNumObject keyToSeq3 = new KeyToSeqNumObject(k1, new Long(8));
-    KeyToSeqNumObject keyToSeq4 = new KeyToSeqNumObject(k2, new Long(3));
-    KeyToSeqNumObject keyToSeq5 = new KeyToSeqNumObject(k2, new Long(7));
-    
-    ConcurrentSkipListSet<KeyToSeqNumObject> list = new ConcurrentSkipListSet<HDFSBucketRegionQueue.KeyToSeqNumObject>();
-    list.add(keyToSeq4);
-    list.add(keyToSeq3);
-    list.add(keyToSeq5);
-    list.add(keyToSeq1);
-    list.add(keyToSeq2);
-    list.add(keyToSeq5);
-    KeyToSeqNumObject k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq3));
-    list.remove(k);
-    
-    k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq2));
-    list.remove(k);
-    
-    k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq1));
-    list.remove(k);
-    
-    list.add(keyToSeq4);
-    list.add(keyToSeq3);
-    list.add(keyToSeq5);
-    list.add(keyToSeq1);
-    k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq3));
-    list.remove(k);
-    
-    k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq1));
-    list.remove(k);
-    
-    k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq5));
-    list.remove(k);
-    
-    k = list.pollFirst();
-    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
-    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq4));
-    
-    list.remove(k);
-  }
-  
-  public void testSingleGet() throws Exception {
-    checkQueueGet("K1", new KeyValue("K1", "V1"), "K1-V1");
-  }
-  
-  public void testMissingGet() throws Exception {
-    checkQueueGet("K1", null, 
-        "K0-V0",
-        "K2-V2");
-  }
-
-  public void testMultipleGet() throws Exception {
-    checkQueueGet("K1", new KeyValue("K1", "V1"), 
-        "K0-V0",
-        "K1-V1",
-        "K2-V2");
-  }
-
-  public void testDuplicateGet() throws Exception {
-    checkQueueGet("K1", new KeyValue("K1", "V1.4"), 
-        "K0-V0",
-        "K1-V1.0",
-        "K1-V1.1",
-        "K1-V1.2",
-        "K1-V1.3",
-        "K1-V1.4",
-        "K2-V2");
-  }
-
-  public void testEmptyIterator() throws Exception {
-    checkQueueIteration(Collections.<KeyValue>emptyList());
-  }
-  
-  public void testSingleIterator() throws Exception {
-    checkQueueIteration(getExpected(), 
-        "K0-V0",
-        "K1-V1",
-        "K2-V2",
-        "K3-V3",
-        "K4-V4",
-        "K5-V5",
-        "K6-V6",
-        "K7-V7",
-        "K8-V8",
-        "K9-V9"
-        );
-  }
-
-  public void testMultipleIterator() throws Exception {
-    checkQueueIteration(getExpected(), 
-        "K0-V0",
-        "K1-V1",
-        "K2-V2",
-        "roll",
-        "K3-V3",
-        "K4-V4",
-        "K5-V5",
-        "K6-V6",
-        "roll",
-        "K7-V7",
-        "K8-V8",
-        "K9-V9"
-        );
-  }
-
-  public void testMixedUpIterator() throws Exception {
-    checkQueueIteration(getExpected(), 
-        "K0-V0",
-        "K5-V5",
-        "K9-V9",
-        "roll",
-        "K3-V3",
-        "K2-V2",
-        "K6-V6",
-        "roll",
-        "K4-V4",
-        "K7-V7",
-        "K8-V8",
-        "K1-V1"
-        );
-  }
-
-  public void testMixedUpIterator2() throws Exception {
-    List<KeyValue> expected = new ArrayList<KeyValue>();
-    expected.add(new KeyValue("K0", "V0"));
-    expected.add(new KeyValue("K1", "V1.2"));
-    expected.add(new KeyValue("K2", "V2.1"));
-    expected.add(new KeyValue("K3", "V3.1"));
-    expected.add(new KeyValue("K4", "V4.2"));
-    expected.add(new KeyValue("K5", "V5.2"));
-    expected.add(new KeyValue("K6", "V6"));
-    expected.add(new KeyValue("K7", "V7"));
-    expected.add(new KeyValue("K8", "V8"));
-    expected.add(new KeyValue("K9", "V9"));
-    
-    checkQueueIteration(expected, 
-        "K1-V1.0",
-        "K2-V2.0",
-        "K3-V3.0",
-        "K4-V4.0",
-        "roll",
-        "K2-V2.1",
-        "K4-V4.1",
-        "K6-V6",
-        "K8-V8",
-        "roll",
-        "K1-V1.1",
-        "K3-V3.1",
-        "K5-V5.0",
-        "K7-V7",
-        "K9-V9",
-        "roll",
-        "K0-V0",
-        "K1-V1.2",
-        "K4-V4.2",
-        "K5-V5.1",
-        "K5-V5.2"
-        );
-  }
-
-  private List<KeyValue> getExpected() {
-    List<KeyValue> expected = new ArrayList<KeyValue>();
-    expected.add(new KeyValue("K0", "V0"));
-    expected.add(new KeyValue("K1", "V1"));
-    expected.add(new KeyValue("K2", "V2"));
-    expected.add(new KeyValue("K3", "V3"));
-    expected.add(new KeyValue("K4", "V4"));
-    expected.add(new KeyValue("K5", "V5"));
-    expected.add(new KeyValue("K6", "V6"));
-    expected.add(new KeyValue("K7", "V7"));
-    expected.add(new KeyValue("K8", "V8"));
-    expected.add(new KeyValue("K9", "V9"));
-    
-    return expected;
-  }
-  
-  private void checkQueueGet(String key, KeyValue expected, String... entries) throws Exception {
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setTotalNumBuckets(1);
-    
-    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-    PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
-
-    // create the buckets
-    r1.put("blah", "blah");
-
-    // hack to get the queue. 
-    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
-    HDFSBucketRegionQueue brq = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
-
-    
-    int seq = 0;
-    for (String s : entries) {
-      if (s.equals("roll")) {
-        brq.rolloverSkipList();
-      } else {
-        String[] kv = s.split("-");
-        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
-      }
-    }
-
-    byte[] bkey = EntryEventImpl.serialize(key);
-    HDFSGatewayEventImpl evt = hopqueue.get(r1, bkey, 0);
-    if (expected == null) {
-      assertNull(evt);
-      
-    } else {
-      assertEquals(expected.key, evt.getKey());
-      assertEquals(expected.value, evt.getDeserializedValue());
-    }
-  }
-  
-  private void checkQueueIteration(List<KeyValue> expected, String... entries) throws Exception {
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setTotalNumBuckets(1);
-    
-    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-    Region r1 = rf1.setPartitionAttributes(paf.create()).create("r1");
-
-    // create the buckets
-    r1.put("blah", "blah");
-
-    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
-    HDFSBucketRegionQueue brq = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
-    
-    int seq = 0;
-    for (String s : entries) {
-      if (s.equals("roll")) {
-        brq.rolloverSkipList();
-      } else {
-        String[] kv = s.split("-");
-        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
-        getSortedEventQueue(brq).rollover(true);
-      }
-    }
-    
-    Iterator<HDFSGatewayEventImpl> iter = brq.iterator(r1);
-    List<KeyValue> actual = new ArrayList<KeyValue>();
-    while (iter.hasNext()) {
-      HDFSGatewayEventImpl evt = iter.next();
-      actual.add(new KeyValue((String) evt.getKey(), (String) evt.getDeserializedValue()));
-    }
-    
-    assertEquals(expected, actual);
-  }
-  
-  public static class KeyValue {
-    public final String key;
-    public final String value;
-    
-    public KeyValue(String key, String value) {
-      this.key = key;
-      this.value = value;
-    }
-    
-    @Override
-    public boolean equals(Object o) {
-      if (o == null)
-        return false;
-
-      KeyValue obj = (KeyValue) o;
-      return key.equals(obj.key) && value.equals(obj.value);
-    }
-    
-    @Override
-    public String toString() {
-      return key + "=" + value;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
index f1b9746..7e4acbf 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
@@ -308,7 +308,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
             BucketRegion br = (BucketRegion) r;
             try {
               KeyInfo keyInfo = new KeyInfo(k1, null, bucketId);
-              RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false, false);
+              RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false);
               Object val = rv.getRawValue();
               assertTrue(val instanceof CachedDeserializable);
               CachedDeserializable cd = (CachedDeserializable)val;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
index a7daf98..b2399fd 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
@@ -67,7 +67,7 @@ public class ParallelGatewaySenderQueueJUnitTest {
     PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
     when(mockMetaRegion.getDataStore()).thenReturn(dataStore);
     when(dataStore.getSizeOfLocalPrimaryBuckets()).thenReturn(3); 
-    when(metaRegionFactory.newMetataRegion(any(), any(), any(), any(), anyBoolean())).thenReturn(mockMetaRegion);
+    when(metaRegionFactory.newMetataRegion(any(), any(), any(), any())).thenReturn(mockMetaRegion);
     when(cache.createVMRegion(any(), any(), any())).thenReturn(mockMetaRegion);
     
     queue.addShadowPartitionedRegionForUserPR(mockPR("region1"));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
index dca5d0b..57d1c7e 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
@@ -120,7 +120,6 @@ public class CacheElementJUnitTest {
     assertEntry("cache-server", order++, entries.next());
     assertEntry("pool", order++, entries.next());
     assertEntry("disk-store", order++, entries.next());
-    assertEntry("hdfs-store", order++, entries.next());
     assertEntry("pdx", order++, entries.next());
     assertEntry("region-attributes", order++, entries.next());
     assertEntry("jndi-bindings", order++, entries.next());



[07/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java
deleted file mode 100644
index b13f499..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java
+++ /dev/null
@@ -1,3726 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io;
-
-import java.io.*;
-import java.util.*;
-import java.rmi.server.UID;
-import java.security.MessageDigest;
-import org.apache.commons.logging.*;
-import org.apache.hadoop.util.Options;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.fs.Options.CreateOpts;
-import org.apache.hadoop.io.compress.CodecPool;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.CompressionInputStream;
-import org.apache.hadoop.io.compress.CompressionOutputStream;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.Decompressor;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.io.compress.zlib.ZlibFactory;
-import org.apache.hadoop.io.serializer.Deserializer;
-import org.apache.hadoop.io.serializer.Serializer;
-import org.apache.hadoop.io.serializer.SerializationFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.*;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.Progress;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.NativeCodeLoader;
-import org.apache.hadoop.util.MergeSort;
-import org.apache.hadoop.util.PriorityQueue;
-import org.apache.hadoop.util.Time;
-// ** Pivotal Changes Begin
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.UTF8;
-import org.apache.hadoop.io.VersionMismatchException;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.io.WritableName;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
-//** Pivotal Changes End
-
-/** 
- * <code>SequenceFile</code>s are flat files consisting of binary key/value 
- * pairs.
- * 
- * <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
- * {@link Sorter} classes for writing, reading and sorting respectively.</p>
- * 
- * There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
- * {@link CompressionType} used to compress key/value pairs:
- * <ol>
- *   <li>
- *   <code>Writer</code> : Uncompressed records.
- *   </li>
- *   <li>
- *   <code>RecordCompressWriter</code> : Record-compressed files, only compress 
- *                                       values.
- *   </li>
- *   <li>
- *   <code>BlockCompressWriter</code> : Block-compressed files, both keys & 
- *                                      values are collected in 'blocks' 
- *                                      separately and compressed. The size of 
- *                                      the 'block' is configurable.
- * </ol>
- * 
- * <p>The actual compression algorithm used to compress key and/or values can be
- * specified by using the appropriate {@link CompressionCodec}.</p>
- * 
- * <p>The recommended way is to use the static <tt>createWriter</tt> methods
- * provided by the <code>SequenceFile</code> to chose the preferred format.</p>
- *
- * <p>The {@link Reader} acts as the bridge and can read any of the above 
- * <code>SequenceFile</code> formats.</p>
- *
- * <h4 id="Formats">SequenceFile Formats</h4>
- * 
- * <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
- * depending on the <code>CompressionType</code> specified. All of them share a
- * <a href="#Header">common header</a> described below.
- * 
- * <h5 id="Header">SequenceFile Header</h5>
- * <ul>
- *   <li>
- *   version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual 
- *             version number (e.g. SEQ4 or SEQ6)
- *   </li>
- *   <li>
- *   keyClassName -key class
- *   </li>
- *   <li>
- *   valueClassName - value class
- *   </li>
- *   <li>
- *   compression - A boolean which specifies if compression is turned on for 
- *                 keys/values in this file.
- *   </li>
- *   <li>
- *   blockCompression - A boolean which specifies if block-compression is 
- *                      turned on for keys/values in this file.
- *   </li>
- *   <li>
- *   compression codec - <code>CompressionCodec</code> class which is used for  
- *                       compression of keys and/or values (if compression is 
- *                       enabled).
- *   </li>
- *   <li>
- *   metadata - {@link Metadata} for this file.
- *   </li>
- *   <li>
- *   sync - A sync marker to denote end of the header.
- *   </li>
- * </ul>
- * 
- * <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
- * <ul>
- * <li>
- * <a href="#Header">Header</a>
- * </li>
- * <li>
- * Record
- *   <ul>
- *     <li>Record length</li>
- *     <li>Key length</li>
- *     <li>Key</li>
- *     <li>Value</li>
- *   </ul>
- * </li>
- * <li>
- * A sync-marker every few <code>100</code> bytes or so.
- * </li>
- * </ul>
- *
- * <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
- * <ul>
- * <li>
- * <a href="#Header">Header</a>
- * </li>
- * <li>
- * Record
- *   <ul>
- *     <li>Record length</li>
- *     <li>Key length</li>
- *     <li>Key</li>
- *     <li><i>Compressed</i> Value</li>
- *   </ul>
- * </li>
- * <li>
- * A sync-marker every few <code>100</code> bytes or so.
- * </li>
- * </ul>
- * 
- * <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
- * <ul>
- * <li>
- * <a href="#Header">Header</a>
- * </li>
- * <li>
- * Record <i>Block</i>
- *   <ul>
- *     <li>Uncompressed number of records in the block</li>
- *     <li>Compressed key-lengths block-size</li>
- *     <li>Compressed key-lengths block</li>
- *     <li>Compressed keys block-size</li>
- *     <li>Compressed keys block</li>
- *     <li>Compressed value-lengths block-size</li>
- *     <li>Compressed value-lengths block</li>
- *     <li>Compressed values block-size</li>
- *     <li>Compressed values block</li>
- *   </ul>
- * </li>
- * <li>
- * A sync-marker every block.
- * </li>
- * </ul>
- * 
- * <p>The compressed blocks of key lengths and value lengths consist of the 
- * actual lengths of individual keys/values encoded in ZeroCompressedInteger 
- * format.</p>
- * 
- * @see CompressionCodec
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class SequenceFile {
-  private static final Log LOG = LogFactory.getLog(SequenceFile.class);
-
-  private SequenceFile() {}                         // no public ctor
-
-  private static final byte BLOCK_COMPRESS_VERSION = (byte)4;
-  private static final byte CUSTOM_COMPRESS_VERSION = (byte)5;
-  private static final byte VERSION_WITH_METADATA = (byte)6;
-  private static byte[] VERSION = new byte[] {
-    (byte)'S', (byte)'E', (byte)'Q', VERSION_WITH_METADATA
-  };
-
-  private static final int SYNC_ESCAPE = -1;      // "length" of sync entries
-  private static final int SYNC_HASH_SIZE = 16;   // number of bytes in hash 
-  private static final int SYNC_SIZE = 4+SYNC_HASH_SIZE; // escape + hash
-
-  /** The number of bytes between sync points.*/
-  public static final int SYNC_INTERVAL = 100*SYNC_SIZE; 
-
-  /** 
-   * The compression type used to compress key/value pairs in the 
-   * {@link SequenceFile}.
-   * 
-   * @see SequenceFile.Writer
-   */
-  public static enum CompressionType {
-    /** Do not compress records. */
-    NONE, 
-    /** Compress values only, each separately. */
-    RECORD,
-    /** Compress sequences of records together in blocks. */
-    BLOCK
-  }
-
-  /**
-   * Get the compression type for the reduce outputs
-   * @param job the job config to look in
-   * @return the kind of compression to use
-   */
-  static public CompressionType getDefaultCompressionType(Configuration job) {
-    String name = job.get("io.seqfile.compression.type");
-    return name == null ? CompressionType.RECORD : 
-      CompressionType.valueOf(name);
-  }
-  
-  /**
-   * Set the default compression type for sequence files.
-   * @param job the configuration to modify
-   * @param val the new compression type (none, block, record)
-   */
-  static public void setDefaultCompressionType(Configuration job, 
-                                               CompressionType val) {
-    job.set("io.seqfile.compression.type", val.toString());
-  }
-
-  /**
-   * Create a new Writer with the given options.
-   * @param conf the configuration to use
-   * @param opts the options to create the file with
-   * @return a new Writer
-   * @throws IOException
-   */
-  public static Writer createWriter(Configuration conf, Writer.Option... opts
-                                    ) throws IOException {
-    Writer.CompressionOption compressionOption = 
-      Options.getOption(Writer.CompressionOption.class, opts);
-    CompressionType kind;
-    if (compressionOption != null) {
-      kind = compressionOption.getValue();
-    } else {
-      kind = getDefaultCompressionType(conf);
-      opts = Options.prependOptions(opts, Writer.compression(kind));
-    }
-    switch (kind) {
-      default:
-      case NONE:
-        return new Writer(conf, opts);
-      case RECORD:
-        return new RecordCompressWriter(conf, opts);
-      case BLOCK:
-        return new BlockCompressWriter(conf, opts);
-    }
-  }
-
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem. 
-   * @param conf The configuration.
-   * @param name The name of the file. 
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer 
-    createWriter(FileSystem fs, Configuration conf, Path name, 
-                 Class keyClass, Class valClass) throws IOException {
-    return createWriter(conf, Writer.filesystem(fs),
-                        Writer.file(name), Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass));
-  }
-  
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem. 
-   * @param conf The configuration.
-   * @param name The name of the file. 
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer 
-    createWriter(FileSystem fs, Configuration conf, Path name, 
-                 Class keyClass, Class valClass, 
-                 CompressionType compressionType) throws IOException {
-    return createWriter(conf, Writer.filesystem(fs),
-                        Writer.file(name), Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass), 
-                        Writer.compression(compressionType));
-  }
-  
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem. 
-   * @param conf The configuration.
-   * @param name The name of the file. 
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param progress The Progressable object to track progress.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer
-    createWriter(FileSystem fs, Configuration conf, Path name, 
-                 Class keyClass, Class valClass, CompressionType compressionType,
-                 Progressable progress) throws IOException {
-    return createWriter(conf, Writer.file(name),
-                        Writer.filesystem(fs),
-                        Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass), 
-                        Writer.compression(compressionType),
-                        Writer.progressable(progress));
-  }
-
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem. 
-   * @param conf The configuration.
-   * @param name The name of the file. 
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer 
-    createWriter(FileSystem fs, Configuration conf, Path name, 
-                 Class keyClass, Class valClass, CompressionType compressionType, 
-                 CompressionCodec codec) throws IOException {
-    return createWriter(conf, Writer.file(name),
-                        Writer.filesystem(fs),
-                        Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass), 
-                        Writer.compression(compressionType, codec));
-  }
-  
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem. 
-   * @param conf The configuration.
-   * @param name The name of the file. 
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @param progress The Progressable object to track progress.
-   * @param metadata The metadata of the file.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer
-    createWriter(FileSystem fs, Configuration conf, Path name, 
-                 Class keyClass, Class valClass, 
-                 CompressionType compressionType, CompressionCodec codec,
-                 Progressable progress, Metadata metadata) throws IOException {
-    return createWriter(conf, Writer.file(name),
-                        Writer.filesystem(fs),
-                        Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass),
-                        Writer.compression(compressionType, codec),
-                        Writer.progressable(progress),
-                        Writer.metadata(metadata));
-  }
-
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem.
-   * @param conf The configuration.
-   * @param name The name of the file.
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param bufferSize buffer size for the underlaying outputstream.
-   * @param replication replication factor for the file.
-   * @param blockSize block size for the file.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @param progress The Progressable object to track progress.
-   * @param metadata The metadata of the file.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer
-    createWriter(FileSystem fs, Configuration conf, Path name,
-                 Class keyClass, Class valClass, int bufferSize,
-                 short replication, long blockSize,
-                 CompressionType compressionType, CompressionCodec codec,
-                 Progressable progress, Metadata metadata) throws IOException {
-    return createWriter(conf, Writer.file(name),
-                        Writer.filesystem(fs),
-                        Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass), 
-                        Writer.bufferSize(bufferSize), 
-                        Writer.replication(replication),
-                        Writer.blockSize(blockSize),
-                        Writer.compression(compressionType, codec),
-                        Writer.progressable(progress),
-                        Writer.metadata(metadata));
-  }
-
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem.
-   * @param conf The configuration.
-   * @param name The name of the file.
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param bufferSize buffer size for the underlaying outputstream.
-   * @param replication replication factor for the file.
-   * @param blockSize block size for the file.
-   * @param createParent create parent directory if non-existent
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @param metadata The metadata of the file.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   */
-  @Deprecated
-  public static Writer
-  createWriter(FileSystem fs, Configuration conf, Path name,
-               Class keyClass, Class valClass, int bufferSize,
-               short replication, long blockSize, boolean createParent,
-               CompressionType compressionType, CompressionCodec codec,
-               Metadata metadata) throws IOException {
-    return createWriter(FileContext.getFileContext(fs.getUri(), conf),
-        conf, name, keyClass, valClass, compressionType, codec,
-        metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
-        CreateOpts.bufferSize(bufferSize),
-        createParent ? CreateOpts.createParent()
-                     : CreateOpts.donotCreateParent(),
-        CreateOpts.repFac(replication),
-        CreateOpts.blockSize(blockSize)
-      );
-  }
-
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fc The context for the specified file.
-   * @param conf The configuration.
-   * @param name The name of the file.
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @param metadata The metadata of the file.
-   * @param createFlag gives the semantics of create: overwrite, append etc.
-   * @param opts file creation options; see {@link CreateOpts}.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   */
-  public static Writer
-  createWriter(FileContext fc, Configuration conf, Path name,
-               Class keyClass, Class valClass,
-               CompressionType compressionType, CompressionCodec codec,
-               Metadata metadata,
-               final EnumSet<CreateFlag> createFlag, CreateOpts... opts)
-               throws IOException {
-    return createWriter(conf, fc.create(name, createFlag, opts),
-          keyClass, valClass, compressionType, codec, metadata).ownStream();
-  }
-
-  /**
-   * Construct the preferred type of SequenceFile Writer.
-   * @param fs The configured filesystem. 
-   * @param conf The configuration.
-   * @param name The name of the file. 
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @param progress The Progressable object to track progress.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer
-    createWriter(FileSystem fs, Configuration conf, Path name, 
-                 Class keyClass, Class valClass, 
-                 CompressionType compressionType, CompressionCodec codec,
-                 Progressable progress) throws IOException {
-    return createWriter(conf, Writer.file(name),
-                        Writer.filesystem(fs),
-                        Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass),
-                        Writer.compression(compressionType, codec),
-                        Writer.progressable(progress));
-  }
-
-  /**
-   * Construct the preferred type of 'raw' SequenceFile Writer.
-   * @param conf The configuration.
-   * @param out The stream on top which the writer is to be constructed.
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @param metadata The metadata of the file.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer
-    createWriter(Configuration conf, FSDataOutputStream out, 
-                 Class keyClass, Class valClass,
-                 CompressionType compressionType,
-                 CompressionCodec codec, Metadata metadata) throws IOException {
-    return createWriter(conf, Writer.stream(out), Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass), 
-                        Writer.compression(compressionType, codec),
-                        Writer.metadata(metadata));
-  }
-  
-  /**
-   * Construct the preferred type of 'raw' SequenceFile Writer.
-   * @param conf The configuration.
-   * @param out The stream on top which the writer is to be constructed.
-   * @param keyClass The 'key' type.
-   * @param valClass The 'value' type.
-   * @param compressionType The compression type.
-   * @param codec The compression codec.
-   * @return Returns the handle to the constructed SequenceFile Writer.
-   * @throws IOException
-   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
-   *     instead.
-   */
-  @Deprecated
-  public static Writer
-    createWriter(Configuration conf, FSDataOutputStream out, 
-                 Class keyClass, Class valClass, CompressionType compressionType,
-                 CompressionCodec codec) throws IOException {
-    return createWriter(conf, Writer.stream(out), Writer.keyClass(keyClass),
-                        Writer.valueClass(valClass),
-                        Writer.compression(compressionType, codec));
-  }
-  
-
-  /** The interface to 'raw' values of SequenceFiles. */
-  public static interface ValueBytes {
-
-    /** Writes the uncompressed bytes to the outStream.
-     * @param outStream : Stream to write uncompressed bytes into.
-     * @throws IOException
-     */
-    public void writeUncompressedBytes(DataOutputStream outStream)
-      throws IOException;
-
-    /** Write compressed bytes to outStream. 
-     * Note: that it will NOT compress the bytes if they are not compressed.
-     * @param outStream : Stream to write compressed bytes into.
-     */
-    public void writeCompressedBytes(DataOutputStream outStream) 
-      throws IllegalArgumentException, IOException;
-
-    /**
-     * Size of stored data.
-     */
-    public int getSize();
-  }
-  
-  private static class UncompressedBytes implements ValueBytes {
-    private int dataSize;
-    private byte[] data;
-    
-    private UncompressedBytes() {
-      data = null;
-      dataSize = 0;
-    }
-    
-    private void reset(DataInputStream in, int length) throws IOException {
-      if (data == null) {
-        data = new byte[length];
-      } else if (length > data.length) {
-        data = new byte[Math.max(length, data.length * 2)];
-      }
-      dataSize = -1;
-      in.readFully(data, 0, length);
-      dataSize = length;
-    }
-    
-    @Override
-    public int getSize() {
-      return dataSize;
-    }
-    
-    @Override
-    public void writeUncompressedBytes(DataOutputStream outStream)
-      throws IOException {
-      outStream.write(data, 0, dataSize);
-    }
-
-    @Override
-    public void writeCompressedBytes(DataOutputStream outStream) 
-      throws IllegalArgumentException, IOException {
-      throw 
-        new IllegalArgumentException("UncompressedBytes cannot be compressed!");
-    }
-
-  } // UncompressedBytes
-  
-  private static class CompressedBytes implements ValueBytes {
-    private int dataSize;
-    private byte[] data;
-    DataInputBuffer rawData = null;
-    CompressionCodec codec = null;
-    CompressionInputStream decompressedStream = null;
-
-    private CompressedBytes(CompressionCodec codec) {
-      data = null;
-      dataSize = 0;
-      this.codec = codec;
-    }
-
-    private void reset(DataInputStream in, int length) throws IOException {
-      if (data == null) {
-        data = new byte[length];
-      } else if (length > data.length) {
-        data = new byte[Math.max(length, data.length * 2)];
-      } 
-      dataSize = -1;
-      in.readFully(data, 0, length);
-      dataSize = length;
-    }
-    
-    @Override
-    public int getSize() {
-      return dataSize;
-    }
-    
-    @Override
-    public void writeUncompressedBytes(DataOutputStream outStream)
-      throws IOException {
-      if (decompressedStream == null) {
-        rawData = new DataInputBuffer();
-        decompressedStream = codec.createInputStream(rawData);
-      } else {
-        decompressedStream.resetState();
-      }
-      rawData.reset(data, 0, dataSize);
-
-      byte[] buffer = new byte[8192];
-      int bytesRead = 0;
-      while ((bytesRead = decompressedStream.read(buffer, 0, 8192)) != -1) {
-        outStream.write(buffer, 0, bytesRead);
-      }
-    }
-
-    @Override
-    public void writeCompressedBytes(DataOutputStream outStream) 
-      throws IllegalArgumentException, IOException {
-      outStream.write(data, 0, dataSize);
-    }
-
-  } // CompressedBytes
-  
-  /**
-   * The class encapsulating with the metadata of a file.
-   * The metadata of a file is a list of attribute name/value
-   * pairs of Text type.
-   *
-   */
-  public static class Metadata implements Writable {
-
-    private TreeMap<Text, Text> theMetadata;
-    
-    public Metadata() {
-      this(new TreeMap<Text, Text>());
-    }
-    
-    public Metadata(TreeMap<Text, Text> arg) {
-      if (arg == null) {
-        this.theMetadata = new TreeMap<Text, Text>();
-      } else {
-        this.theMetadata = arg;
-      }
-    }
-    
-    public Text get(Text name) {
-      return this.theMetadata.get(name);
-    }
-    
-    public void set(Text name, Text value) {
-      this.theMetadata.put(name, value);
-    }
-    
-    public TreeMap<Text, Text> getMetadata() {
-      return new TreeMap<Text, Text>(this.theMetadata);
-    }
-    
-    @Override
-    public void write(DataOutput out) throws IOException {
-      out.writeInt(this.theMetadata.size());
-      Iterator<Map.Entry<Text, Text>> iter =
-        this.theMetadata.entrySet().iterator();
-      while (iter.hasNext()) {
-        Map.Entry<Text, Text> en = iter.next();
-        en.getKey().write(out);
-        en.getValue().write(out);
-      }
-    }
-
-    @Override
-    public void readFields(DataInput in) throws IOException {
-      int sz = in.readInt();
-      if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object");
-      this.theMetadata = new TreeMap<Text, Text>();
-      for (int i = 0; i < sz; i++) {
-        Text key = new Text();
-        Text val = new Text();
-        key.readFields(in);
-        val.readFields(in);
-        this.theMetadata.put(key, val);
-      }    
-    }
-
-    @Override
-    public boolean equals(Object other) {
-      if (other == null) {
-        return false;
-      }
-      if (other.getClass() != this.getClass()) {
-        return false;
-      } else {
-        return equals((Metadata)other);
-      }
-    }
-    
-    public boolean equals(Metadata other) {
-      if (other == null) return false;
-      if (this.theMetadata.size() != other.theMetadata.size()) {
-        return false;
-      }
-      Iterator<Map.Entry<Text, Text>> iter1 =
-        this.theMetadata.entrySet().iterator();
-      Iterator<Map.Entry<Text, Text>> iter2 =
-        other.theMetadata.entrySet().iterator();
-      while (iter1.hasNext() && iter2.hasNext()) {
-        Map.Entry<Text, Text> en1 = iter1.next();
-        Map.Entry<Text, Text> en2 = iter2.next();
-        if (!en1.getKey().equals(en2.getKey())) {
-          return false;
-        }
-        if (!en1.getValue().equals(en2.getValue())) {
-          return false;
-        }
-      }
-      if (iter1.hasNext() || iter2.hasNext()) {
-        return false;
-      }
-      return true;
-    }
-
-    @Override
-    public int hashCode() {
-      assert false : "hashCode not designed";
-      return 42; // any arbitrary constant will do 
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("size: ").append(this.theMetadata.size()).append("\n");
-      Iterator<Map.Entry<Text, Text>> iter =
-        this.theMetadata.entrySet().iterator();
-      while (iter.hasNext()) {
-        Map.Entry<Text, Text> en = iter.next();
-        sb.append("\t").append(en.getKey().toString()).append("\t").append(en.getValue().toString());
-        sb.append("\n");
-      }
-      return sb.toString();
-    }
-  }
-  
-  /** Write key/value pairs to a sequence-format file. */
-  public static class Writer implements java.io.Closeable, Syncable {
-    private Configuration conf;
-    FSDataOutputStream out;
-    boolean ownOutputStream = true;
-    DataOutputBuffer buffer = new DataOutputBuffer();
-
-    Class keyClass;
-    Class valClass;
-
-    private final CompressionType compress;
-    CompressionCodec codec = null;
-    CompressionOutputStream deflateFilter = null;
-    DataOutputStream deflateOut = null;
-    Metadata metadata = null;
-    Compressor compressor = null;
-    
-    protected Serializer keySerializer;
-    protected Serializer uncompressedValSerializer;
-    protected Serializer compressedValSerializer;
-    
-    // Insert a globally unique 16-byte value every few entries, so that one
-    // can seek into the middle of a file and then synchronize with record
-    // starts and ends by scanning for this value.
-    long lastSyncPos;                     // position of last sync
-    byte[] sync;                          // 16 random bytes
-    {
-      try {                                       
-        MessageDigest digester = MessageDigest.getInstance("MD5");
-        long time = Time.now();
-        digester.update((new UID()+"@"+time).getBytes());
-        sync = digester.digest();
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    public static interface Option {}
-    
-    static class FileOption extends Options.PathOption 
-                                    implements Option {
-      FileOption(Path path) {
-        super(path);
-      }
-    }
-
-    /**
-     * @deprecated only used for backwards-compatibility in the createWriter methods
-     * that take FileSystem.
-     */
-    @Deprecated
-    private static class FileSystemOption implements Option {
-      private final FileSystem value;
-      protected FileSystemOption(FileSystem value) {
-        this.value = value;
-      }
-      public FileSystem getValue() {
-        return value;
-      }
-    }
-
-    static class StreamOption extends Options.FSDataOutputStreamOption 
-                              implements Option {
-      StreamOption(FSDataOutputStream stream) {
-        super(stream);
-      }
-    }
-
-    static class BufferSizeOption extends Options.IntegerOption
-                                  implements Option {
-      BufferSizeOption(int value) {
-        super(value);
-      }
-    }
-    
-    static class BlockSizeOption extends Options.LongOption implements Option {
-      BlockSizeOption(long value) {
-        super(value);
-      }
-    }
-
-    static class ReplicationOption extends Options.IntegerOption
-                                   implements Option {
-      ReplicationOption(int value) {
-        super(value);
-      }
-    }
-
-    static class KeyClassOption extends Options.ClassOption implements Option {
-      KeyClassOption(Class<?> value) {
-        super(value);
-      }
-    }
-
-    static class ValueClassOption extends Options.ClassOption
-                                          implements Option {
-      ValueClassOption(Class<?> value) {
-        super(value);
-      }
-    }
-
-    static class MetadataOption implements Option {
-      private final Metadata value;
-      MetadataOption(Metadata value) {
-        this.value = value;
-      }
-      Metadata getValue() {
-        return value;
-      }
-    }
-
-    static class ProgressableOption extends Options.ProgressableOption
-                                    implements Option {
-      ProgressableOption(Progressable value) {
-        super(value);
-      }
-    }
-
-    private static class CompressionOption implements Option {
-      private final CompressionType value;
-      private final CompressionCodec codec;
-      CompressionOption(CompressionType value) {
-        this(value, null);
-      }
-      CompressionOption(CompressionType value, CompressionCodec codec) {
-        this.value = value;
-        this.codec = (CompressionType.NONE != value && null == codec)
-          ? new DefaultCodec()
-          : codec;
-      }
-      CompressionType getValue() {
-        return value;
-      }
-      CompressionCodec getCodec() {
-        return codec;
-      }
-    }
-    
-    public static Option file(Path value) {
-      return new FileOption(value);
-    }
-
-    /**
-     * @deprecated only used for backwards-compatibility in the createWriter methods
-     * that take FileSystem.
-     */
-    @Deprecated
-    private static Option filesystem(FileSystem fs) {
-      return new SequenceFile.Writer.FileSystemOption(fs);
-    }
-    
-    public static Option bufferSize(int value) {
-      return new BufferSizeOption(value);
-    }
-    
-    public static Option stream(FSDataOutputStream value) {
-      return new StreamOption(value);
-    }
-    
-    public static Option replication(short value) {
-      return new ReplicationOption(value);
-    }
-    
-    public static Option blockSize(long value) {
-      return new BlockSizeOption(value);
-    }
-    
-    public static Option progressable(Progressable value) {
-      return new ProgressableOption(value);
-    }
-
-    public static Option keyClass(Class<?> value) {
-      return new KeyClassOption(value);
-    }
-    
-    public static Option valueClass(Class<?> value) {
-      return new ValueClassOption(value);
-    }
-    
-    public static Option metadata(Metadata value) {
-      return new MetadataOption(value);
-    }
-
-    public static Option compression(CompressionType value) {
-      return new CompressionOption(value);
-    }
-
-    public static Option compression(CompressionType value,
-        CompressionCodec codec) {
-      return new CompressionOption(value, codec);
-    }
-    
-    /**
-     * Construct a uncompressed writer from a set of options.
-     * @param conf the configuration to use
-     * @param opts the options used when creating the writer
-     * @throws IOException if it fails
-     */
-    Writer(Configuration conf, 
-           Option... opts) throws IOException {
-      BlockSizeOption blockSizeOption = 
-        Options.getOption(BlockSizeOption.class, opts);
-      BufferSizeOption bufferSizeOption = 
-        Options.getOption(BufferSizeOption.class, opts);
-      ReplicationOption replicationOption = 
-        Options.getOption(ReplicationOption.class, opts);
-      ProgressableOption progressOption = 
-        Options.getOption(ProgressableOption.class, opts);
-      FileOption fileOption = Options.getOption(FileOption.class, opts);
-      FileSystemOption fsOption = Options.getOption(FileSystemOption.class, opts);
-      StreamOption streamOption = Options.getOption(StreamOption.class, opts);
-      KeyClassOption keyClassOption = 
-        Options.getOption(KeyClassOption.class, opts);
-      ValueClassOption valueClassOption = 
-        Options.getOption(ValueClassOption.class, opts);
-      MetadataOption metadataOption = 
-        Options.getOption(MetadataOption.class, opts);
-      CompressionOption compressionTypeOption =
-        Options.getOption(CompressionOption.class, opts);
-      // check consistency of options
-      if ((fileOption == null) == (streamOption == null)) {
-        throw new IllegalArgumentException("file or stream must be specified");
-      }
-      if (fileOption == null && (blockSizeOption != null ||
-                                 bufferSizeOption != null ||
-                                 replicationOption != null ||
-                                 progressOption != null)) {
-        throw new IllegalArgumentException("file modifier options not " +
-                                           "compatible with stream");
-      }
-
-      FSDataOutputStream out;
-      boolean ownStream = fileOption != null;
-      if (ownStream) {
-        Path p = fileOption.getValue();
-        FileSystem fs;
-        if (fsOption != null) {
-          fs = fsOption.getValue();
-        } else {
-          fs = p.getFileSystem(conf);
-        }
-        int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
-          bufferSizeOption.getValue();
-        short replication = replicationOption == null ? 
-          fs.getDefaultReplication(p) :
-          (short) replicationOption.getValue();
-        long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
-          blockSizeOption.getValue();
-        Progressable progress = progressOption == null ? null :
-          progressOption.getValue();
-        out = fs.create(p, true, bufferSize, replication, blockSize, progress);
-      } else {
-        out = streamOption.getValue();
-      }
-      Class<?> keyClass = keyClassOption == null ?
-          Object.class : keyClassOption.getValue();
-      Class<?> valueClass = valueClassOption == null ?
-          Object.class : valueClassOption.getValue();
-      Metadata metadata = metadataOption == null ?
-          new Metadata() : metadataOption.getValue();
-      this.compress = compressionTypeOption.getValue();
-      final CompressionCodec codec = compressionTypeOption.getCodec();
-      if (codec != null &&
-          (codec instanceof GzipCodec) &&
-          !NativeCodeLoader.isNativeCodeLoaded() &&
-          !ZlibFactory.isNativeZlibLoaded(conf)) {
-        throw new IllegalArgumentException("SequenceFile doesn't work with " +
-                                           "GzipCodec without native-hadoop " +
-                                           "code!");
-      }
-      init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
-    }
-
-    /** Create the named file.
-     * @deprecated Use 
-     *   {@link SequenceFile#createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)} 
-     *   instead.
-     */
-    @Deprecated
-    public Writer(FileSystem fs, Configuration conf, Path name, 
-                  Class keyClass, Class valClass) throws IOException {
-      this.compress = CompressionType.NONE;
-      init(conf, fs.create(name), true, keyClass, valClass, null, 
-           new Metadata());
-    }
-    
-    /** Create the named file with write-progress reporter.
-     * @deprecated Use 
-     *   {@link SequenceFile#createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)} 
-     *   instead.
-     */
-    @Deprecated
-    public Writer(FileSystem fs, Configuration conf, Path name, 
-                  Class keyClass, Class valClass,
-                  Progressable progress, Metadata metadata) throws IOException {
-      this.compress = CompressionType.NONE;
-      init(conf, fs.create(name, progress), true, keyClass, valClass,
-           null, metadata);
-    }
-    
-    /** Create the named file with write-progress reporter. 
-     * @deprecated Use 
-     *   {@link SequenceFile#createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)} 
-     *   instead.
-     */
-    @Deprecated
-    public Writer(FileSystem fs, Configuration conf, Path name,
-                  Class keyClass, Class valClass,
-                  int bufferSize, short replication, long blockSize,
-                  Progressable progress, Metadata metadata) throws IOException {
-      this.compress = CompressionType.NONE;
-      init(conf,
-           fs.create(name, true, bufferSize, replication, blockSize, progress),
-           true, keyClass, valClass, null, metadata);
-    }
-
-    boolean isCompressed() { return compress != CompressionType.NONE; }
-    boolean isBlockCompressed() { return compress == CompressionType.BLOCK; }
-    
-    Writer ownStream() { this.ownOutputStream = true; return this;  }
-
-    /** Write and flush the file header. */
-    private void writeFileHeader() 
-      throws IOException {
-      out.write(VERSION);
-      Text.writeString(out, keyClass.getName());
-      Text.writeString(out, valClass.getName());
-      
-      out.writeBoolean(this.isCompressed());
-      out.writeBoolean(this.isBlockCompressed());
-      
-      if (this.isCompressed()) {
-        Text.writeString(out, (codec.getClass()).getName());
-      }
-      this.metadata.write(out);
-      out.write(sync);                       // write the sync bytes
-      out.flush();                           // flush header
-    }
-    
-    /** Initialize. */
-    @SuppressWarnings("unchecked")
-    void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
-              Class keyClass, Class valClass,
-              CompressionCodec codec, Metadata metadata) 
-      throws IOException {
-      this.conf = conf;
-      this.out = out;
-      this.ownOutputStream = ownStream;
-      this.keyClass = keyClass;
-      this.valClass = valClass;
-      this.codec = codec;
-      this.metadata = metadata;
-      SerializationFactory serializationFactory = new SerializationFactory(conf);
-      this.keySerializer = serializationFactory.getSerializer(keyClass);
-      if (this.keySerializer == null) {
-        throw new IOException(
-            "Could not find a serializer for the Key class: '"
-                + keyClass.getCanonicalName() + "'. "
-                + "Please ensure that the configuration '" +
-                CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
-                + "properly configured, if you're using"
-                + "custom serialization.");
-      }
-      this.keySerializer.open(buffer);
-      this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
-      if (this.uncompressedValSerializer == null) {
-        throw new IOException(
-            "Could not find a serializer for the Value class: '"
-                + valClass.getCanonicalName() + "'. "
-                + "Please ensure that the configuration '" +
-                CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
-                + "properly configured, if you're using"
-                + "custom serialization.");
-      }
-      this.uncompressedValSerializer.open(buffer);
-      if (this.codec != null) {
-        ReflectionUtils.setConf(this.codec, this.conf);
-        this.compressor = CodecPool.getCompressor(this.codec);
-        this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
-        this.deflateOut = 
-          new DataOutputStream(new BufferedOutputStream(deflateFilter));
-        this.compressedValSerializer = serializationFactory.getSerializer(valClass);
-        if (this.compressedValSerializer == null) {
-          throw new IOException(
-              "Could not find a serializer for the Value class: '"
-                  + valClass.getCanonicalName() + "'. "
-                  + "Please ensure that the configuration '" +
-                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
-                  + "properly configured, if you're using"
-                  + "custom serialization.");
-        }
-        this.compressedValSerializer.open(deflateOut);
-      }
-      writeFileHeader();
-    }
-    
-    /** Returns the class of keys in this file. */
-    public Class getKeyClass() { return keyClass; }
-
-    /** Returns the class of values in this file. */
-    public Class getValueClass() { return valClass; }
-
-    /** Returns the compression codec of data in this file. */
-    public CompressionCodec getCompressionCodec() { return codec; }
-    
-    /** create a sync point */
-    public void sync() throws IOException {
-      if (sync != null && lastSyncPos != out.getPos()) {
-        out.writeInt(SYNC_ESCAPE);                // mark the start of the sync
-        out.write(sync);                          // write sync
-        lastSyncPos = out.getPos();               // update lastSyncPos
-      }
-    }
-
-    /**
-     * flush all currently written data to the file system
-     * @deprecated Use {@link #hsync()} or {@link #hflush()} instead
-     */
-    @Deprecated
-    public void syncFs() throws IOException {
-      if (out != null) {
-        out.sync();                               // flush contents to file system
-      }
-    }
-
-    @Override
-    public void hsync() throws IOException {
-      if (out != null) {
-        out.hsync();
-      }
-    }
-    // Pivotal changes begin
-    public void hsyncWithSizeUpdate() throws IOException {
-      if (out != null) {
-        if (out instanceof HdfsDataOutputStream) {
-          try {
-            ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
-          } catch (NoSuchMethodError e){
-            // We are probably working with an older version of hadoop jars which does not have the 
-            // hsync function with SyncFlag. Use the hsync version that does not update the size. 
-            out.hsync();
-          }
-        }
-        else {
-            out.hsync();
-        }
-      }
-    }
-    // Pivotal changes end
-    @Override
-    public void hflush() throws IOException {
-      if (out != null) {
-        out.hflush();
-      }
-    }
-    
-    /** Returns the configuration of this file. */
-    Configuration getConf() { return conf; }
-    
-    /** Close the file. */
-    @Override
-    public synchronized void close() throws IOException {
-      keySerializer.close();
-      uncompressedValSerializer.close();
-      if (compressedValSerializer != null) {
-        compressedValSerializer.close();
-      }
-
-      CodecPool.returnCompressor(compressor);
-      compressor = null;
-      
-      if (out != null) {
-        
-        // Close the underlying stream iff we own it...
-        if (ownOutputStream) {
-          out.close();
-        } else {
-          out.flush();
-        }
-        out = null;
-      }
-    }
-
-    synchronized void checkAndWriteSync() throws IOException {
-      if (sync != null &&
-          out.getPos() >= lastSyncPos+SYNC_INTERVAL) { // time to emit sync
-        sync();
-      }
-    }
-
-    /** Append a key/value pair. */
-    public void append(Writable key, Writable val)
-      throws IOException {
-      append((Object) key, (Object) val);
-    }
-
-    /** Append a key/value pair. */
-    @SuppressWarnings("unchecked")
-    public synchronized void append(Object key, Object val)
-      throws IOException {
-      if (key.getClass() != keyClass)
-        throw new IOException("wrong key class: "+key.getClass().getName()
-                              +" is not "+keyClass);
-      if (val.getClass() != valClass)
-        throw new IOException("wrong value class: "+val.getClass().getName()
-                              +" is not "+valClass);
-
-      buffer.reset();
-
-      // Append the 'key'
-      keySerializer.serialize(key);
-      int keyLength = buffer.getLength();
-      if (keyLength < 0)
-        throw new IOException("negative length keys not allowed: " + key);
-
-      // Append the 'value'
-      if (compress == CompressionType.RECORD) {
-        deflateFilter.resetState();
-        compressedValSerializer.serialize(val);
-        deflateOut.flush();
-        deflateFilter.finish();
-      } else {
-        uncompressedValSerializer.serialize(val);
-      }
-
-      // Write the record out
-      checkAndWriteSync();                                // sync
-      out.writeInt(buffer.getLength());                   // total record length
-      out.writeInt(keyLength);                            // key portion length
-      out.write(buffer.getData(), 0, buffer.getLength()); // data
-    }
-
-    public synchronized void appendRaw(byte[] keyData, int keyOffset,
-        int keyLength, ValueBytes val) throws IOException {
-      if (keyLength < 0)
-        throw new IOException("negative length keys not allowed: " + keyLength);
-
-      int valLength = val.getSize();
-
-      checkAndWriteSync();
-      
-      out.writeInt(keyLength+valLength);          // total record length
-      out.writeInt(keyLength);                    // key portion length
-      out.write(keyData, keyOffset, keyLength);   // key
-      val.writeUncompressedBytes(out);            // value
-    }
-
-    /** Returns the current length of the output file.
-     *
-     * <p>This always returns a synchronized position.  In other words,
-     * immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
-     * returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called.  However
-     * the key may be earlier in the file than key last written when this
-     * method was called (e.g., with block-compression, it may be the first key
-     * in the block that was being written when this method was called).
-     */
-    public synchronized long getLength() throws IOException {
-      return out.getPos();
-    }
-
-  } // class Writer
-
-  /** Write key/compressed-value pairs to a sequence-format file. */
-  static class RecordCompressWriter extends Writer {
-    
-    RecordCompressWriter(Configuration conf, 
-                         Option... options) throws IOException {
-      super(conf, options);
-    }
-
-    /** Append a key/value pair. */
-    @Override
-    @SuppressWarnings("unchecked")
-    public synchronized void append(Object key, Object val)
-      throws IOException {
-      if (key.getClass() != keyClass)
-        throw new IOException("wrong key class: "+key.getClass().getName()
-                              +" is not "+keyClass);
-      if (val.getClass() != valClass)
-        throw new IOException("wrong value class: "+val.getClass().getName()
-                              +" is not "+valClass);
-
-      buffer.reset();
-
-      // Append the 'key'
-      keySerializer.serialize(key);
-      int keyLength = buffer.getLength();
-      if (keyLength < 0)
-        throw new IOException("negative length keys not allowed: " + key);
-
-      // Compress 'value' and append it
-      deflateFilter.resetState();
-      compressedValSerializer.serialize(val);
-      deflateOut.flush();
-      deflateFilter.finish();
-
-      // Write the record out
-      checkAndWriteSync();                                // sync
-      out.writeInt(buffer.getLength());                   // total record length
-      out.writeInt(keyLength);                            // key portion length
-      out.write(buffer.getData(), 0, buffer.getLength()); // data
-    }
-
-    /** Append a key/value pair. */
-    @Override
-    public synchronized void appendRaw(byte[] keyData, int keyOffset,
-        int keyLength, ValueBytes val) throws IOException {
-
-      if (keyLength < 0)
-        throw new IOException("negative length keys not allowed: " + keyLength);
-
-      int valLength = val.getSize();
-      
-      checkAndWriteSync();                        // sync
-      out.writeInt(keyLength+valLength);          // total record length
-      out.writeInt(keyLength);                    // key portion length
-      out.write(keyData, keyOffset, keyLength);   // 'key' data
-      val.writeCompressedBytes(out);              // 'value' data
-    }
-    
-  } // RecordCompressionWriter
-
-  /** Write compressed key/value blocks to a sequence-format file. */
-  static class BlockCompressWriter extends Writer {
-    
-    private int noBufferedRecords = 0;
-    
-    private DataOutputBuffer keyLenBuffer = new DataOutputBuffer();
-    private DataOutputBuffer keyBuffer = new DataOutputBuffer();
-
-    private DataOutputBuffer valLenBuffer = new DataOutputBuffer();
-    private DataOutputBuffer valBuffer = new DataOutputBuffer();
-
-    private final int compressionBlockSize;
-    
-    BlockCompressWriter(Configuration conf,
-                        Option... options) throws IOException {
-      super(conf, options);
-      compressionBlockSize = 
-        conf.getInt("io.seqfile.compress.blocksize", 1000000);
-      keySerializer.close();
-      keySerializer.open(keyBuffer);
-      uncompressedValSerializer.close();
-      uncompressedValSerializer.open(valBuffer);
-    }
-
-    /** Workhorse to check and write out compressed data/lengths */
-    private synchronized 
-      void writeBuffer(DataOutputBuffer uncompressedDataBuffer) 
-      throws IOException {
-      deflateFilter.resetState();
-      buffer.reset();
-      deflateOut.write(uncompressedDataBuffer.getData(), 0, 
-                       uncompressedDataBuffer.getLength());
-      deflateOut.flush();
-      deflateFilter.finish();
-      
-      WritableUtils.writeVInt(out, buffer.getLength());
-      out.write(buffer.getData(), 0, buffer.getLength());
-    }
-    
-    /** Compress and flush contents to dfs */
-    @Override
-    public synchronized void sync() throws IOException {
-      if (noBufferedRecords > 0) {
-        super.sync();
-        
-        // No. of records
-        WritableUtils.writeVInt(out, noBufferedRecords);
-        
-        // Write 'keys' and lengths
-        writeBuffer(keyLenBuffer);
-        writeBuffer(keyBuffer);
-        
-        // Write 'values' and lengths
-        writeBuffer(valLenBuffer);
-        writeBuffer(valBuffer);
-        
-        // Flush the file-stream
-        out.flush();
-        
-        // Reset internal states
-        keyLenBuffer.reset();
-        keyBuffer.reset();
-        valLenBuffer.reset();
-        valBuffer.reset();
-        noBufferedRecords = 0;
-      }
-      
-    }
-    
-    /** Close the file. */
-    @Override
-    public synchronized void close() throws IOException {
-      if (out != null) {
-        sync();
-      }
-      super.close();
-    }
-
-    /** Append a key/value pair. */
-    @Override
-    @SuppressWarnings("unchecked")
-    public synchronized void append(Object key, Object val)
-      throws IOException {
-      if (key.getClass() != keyClass)
-        throw new IOException("wrong key class: "+key+" is not "+keyClass);
-      if (val.getClass() != valClass)
-        throw new IOException("wrong value class: "+val+" is not "+valClass);
-
-      // Save key/value into respective buffers 
-      int oldKeyLength = keyBuffer.getLength();
-      keySerializer.serialize(key);
-      int keyLength = keyBuffer.getLength() - oldKeyLength;
-      if (keyLength < 0)
-        throw new IOException("negative length keys not allowed: " + key);
-      WritableUtils.writeVInt(keyLenBuffer, keyLength);
-
-      int oldValLength = valBuffer.getLength();
-      uncompressedValSerializer.serialize(val);
-      int valLength = valBuffer.getLength() - oldValLength;
-      WritableUtils.writeVInt(valLenBuffer, valLength);
-      
-      // Added another key/value pair
-      ++noBufferedRecords;
-      
-      // Compress and flush?
-      int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength();
-      if (currentBlockSize >= compressionBlockSize) {
-        sync();
-      }
-    }
-    
-    /** Append a key/value pair. */
-    @Override
-    public synchronized void appendRaw(byte[] keyData, int keyOffset,
-        int keyLength, ValueBytes val) throws IOException {
-      
-      if (keyLength < 0)
-        throw new IOException("negative length keys not allowed");
-
-      int valLength = val.getSize();
-      
-      // Save key/value data in relevant buffers
-      WritableUtils.writeVInt(keyLenBuffer, keyLength);
-      keyBuffer.write(keyData, keyOffset, keyLength);
-      WritableUtils.writeVInt(valLenBuffer, valLength);
-      val.writeUncompressedBytes(valBuffer);
-
-      // Added another key/value pair
-      ++noBufferedRecords;
-
-      // Compress and flush?
-      int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength(); 
-      if (currentBlockSize >= compressionBlockSize) {
-        sync();
-      }
-    }
-  
-  } // BlockCompressionWriter
-
-  /** Get the configured buffer size */
-  private static int getBufferSize(Configuration conf) {
-    return conf.getInt("io.file.buffer.size", 4096);
-  }
-
-  /** Reads key/value pairs from a sequence-format file. */
-  public static class Reader implements java.io.Closeable {
-    private String filename;
-    private FSDataInputStream in;
-    private DataOutputBuffer outBuf = new DataOutputBuffer();
-
-    private byte version;
-
-    private String keyClassName;
-    private String valClassName;
-    private Class keyClass;
-    private Class valClass;
-
-    private CompressionCodec codec = null;
-    private Metadata metadata = null;
-    
-    private byte[] sync = new byte[SYNC_HASH_SIZE];
-    private byte[] syncCheck = new byte[SYNC_HASH_SIZE];
-    private boolean syncSeen;
-
-    private long headerEnd;
-    private long end;
-    private int keyLength;
-    private int recordLength;
-
-    private boolean decompress;
-    private boolean blockCompressed;
-    
-    private Configuration conf;
-
-    private int noBufferedRecords = 0;
-    private boolean lazyDecompress = true;
-    private boolean valuesDecompressed = true;
-    
-    private int noBufferedKeys = 0;
-    private int noBufferedValues = 0;
-    
-    private DataInputBuffer keyLenBuffer = null;
-    private CompressionInputStream keyLenInFilter = null;
-    private DataInputStream keyLenIn = null;
-    private Decompressor keyLenDecompressor = null;
-    private DataInputBuffer keyBuffer = null;
-    private CompressionInputStream keyInFilter = null;
-    private DataInputStream keyIn = null;
-    private Decompressor keyDecompressor = null;
-
-    private DataInputBuffer valLenBuffer = null;
-    private CompressionInputStream valLenInFilter = null;
-    private DataInputStream valLenIn = null;
-    private Decompressor valLenDecompressor = null;
-    private DataInputBuffer valBuffer = null;
-    private CompressionInputStream valInFilter = null;
-    private DataInputStream valIn = null;
-    private Decompressor valDecompressor = null;
-    
-    private Deserializer keyDeserializer;
-    private Deserializer valDeserializer;
-
-    /**
-     * A tag interface for all of the Reader options
-     */
-    public static interface Option {}
-    
-    /**
-     * Create an option to specify the path name of the sequence file.
-     * @param value the path to read
-     * @return a new option
-     */
-    public static Option file(Path value) {
-      return new FileOption(value);
-    }
-    
-    /**
-     * Create an option to specify the stream with the sequence file.
-     * @param value the stream to read.
-     * @return a new option
-     */
-    public static Option stream(FSDataInputStream value) {
-      return new InputStreamOption(value);
-    }
-    
-    /**
-     * Create an option to specify the starting byte to read.
-     * @param value the number of bytes to skip over
-     * @return a new option
-     */
-    public static Option start(long value) {
-      return new StartOption(value);
-    }
-    
-    /**
-     * Create an option to specify the number of bytes to read.
-     * @param value the number of bytes to read
-     * @return a new option
-     */
-    public static Option length(long value) {
-      return new LengthOption(value);
-    }
-    
-    /**
-     * Create an option with the buffer size for reading the given pathname.
-     * @param value the number of bytes to buffer
-     * @return a new option
-     */
-    public static Option bufferSize(int value) {
-      return new BufferSizeOption(value);
-    }
-
-    private static class FileOption extends Options.PathOption 
-                                    implements Option {
-      private FileOption(Path value) {
-        super(value);
-      }
-    }
-    
-    private static class InputStreamOption
-        extends Options.FSDataInputStreamOption 
-        implements Option {
-      private InputStreamOption(FSDataInputStream value) {
-        super(value);
-      }
-    }
-
-    private static class StartOption extends Options.LongOption
-                                     implements Option {
-      private StartOption(long value) {
-        super(value);
-      }
-    }
-
-    private static class LengthOption extends Options.LongOption
-                                      implements Option {
-      private LengthOption(long value) {
-        super(value);
-      }
-    }
-
-    private static class BufferSizeOption extends Options.IntegerOption
-                                      implements Option {
-      private BufferSizeOption(int value) {
-        super(value);
-      }
-    }
-
-    // only used directly
-    private static class OnlyHeaderOption extends Options.BooleanOption 
-                                          implements Option {
-      private OnlyHeaderOption() {
-        super(true);
-      }
-    }
-
-    public Reader(Configuration conf, Option... opts) throws IOException {
-      // Look up the options, these are null if not set
-      FileOption fileOpt = Options.getOption(FileOption.class, opts);
-      InputStreamOption streamOpt = 
-        Options.getOption(InputStreamOption.class, opts);
-      StartOption startOpt = Options.getOption(StartOption.class, opts);
-      LengthOption lenOpt = Options.getOption(LengthOption.class, opts);
-      BufferSizeOption bufOpt = Options.getOption(BufferSizeOption.class,opts);
-      OnlyHeaderOption headerOnly = 
-        Options.getOption(OnlyHeaderOption.class, opts);
-      // check for consistency
-      if ((fileOpt == null) == (streamOpt == null)) {
-        throw new 
-          IllegalArgumentException("File or stream option must be specified");
-      }
-      if (fileOpt == null && bufOpt != null) {
-        throw new IllegalArgumentException("buffer size can only be set when" +
-                                           " a file is specified.");
-      }
-      // figure out the real values
-      Path filename = null;
-      FSDataInputStream file;
-      final long len;
-      if (fileOpt != null) {
-        filename = fileOpt.getValue();
-        FileSystem fs = filename.getFileSystem(conf);
-        int bufSize = bufOpt == null ? getBufferSize(conf): bufOpt.getValue();
-        len = null == lenOpt
-          ? fs.getFileStatus(filename).getLen()
-          : lenOpt.getValue();
-        file = openFile(fs, filename, bufSize, len);
-      } else {
-        len = null == lenOpt ? Long.MAX_VALUE : lenOpt.getValue();
-        file = streamOpt.getValue();
-      }
-      long start = startOpt == null ? 0 : startOpt.getValue();
-      // really set up
-      initialize(filename, file, start, len, conf, headerOnly != null);
-    }
-
-    /**
-     * Construct a reader by opening a file from the given file system.
-     * @param fs The file system used to open the file.
-     * @param file The file being read.
-     * @param conf Configuration
-     * @throws IOException
-     * @deprecated Use Reader(Configuration, Option...) instead.
-     */
-    @Deprecated
-    public Reader(FileSystem fs, Path file, 
-                  Configuration conf) throws IOException {
-      this(conf, file(file.makeQualified(fs)));
-    }
-
-    /**
-     * Construct a reader by the given input stream.
-     * @param in An input stream.
-     * @param buffersize unused
-     * @param start The starting position.
-     * @param length The length being read.
-     * @param conf Configuration
-     * @throws IOException
-     * @deprecated Use Reader(Configuration, Reader.Option...) instead.
-     */
-    @Deprecated
-    public Reader(FSDataInputStream in, int buffersize,
-        long start, long length, Configuration conf) throws IOException {
-      this(conf, stream(in), start(start), length(length));
-    }
-
-    /** Common work of the constructors. */
-    private void initialize(Path filename, FSDataInputStream in,
-                            long start, long length, Configuration conf,
-                            boolean tempReader) throws IOException {
-      if (in == null) {
-        throw new IllegalArgumentException("in == null");
-      }
-      this.filename = filename == null ? "<unknown>" : filename.toString();
-      this.in = in;
-      this.conf = conf;
-      boolean succeeded = false;
-      try {
-        seek(start);
-        this.end = this.in.getPos() + length;
-        // if it wrapped around, use the max
-        if (end < length) {
-          end = Long.MAX_VALUE;
-        }
-        init(tempReader);
-        succeeded = true;
-      } finally {
-        if (!succeeded) {
-          IOUtils.cleanup(LOG, this.in);
-        }
-      }
-    }
-
-    /**
-     * Override this method to specialize the type of
-     * {@link FSDataInputStream} returned.
-     * @param fs The file system used to open the file.
-     * @param file The file being read.
-     * @param bufferSize The buffer size used to read the file.
-     * @param length The length being read if it is >= 0.  Otherwise,
-     *               the length is not available.
-     * @return The opened stream.
-     * @throws IOException
-     */
-    protected FSDataInputStream openFile(FileSystem fs, Path file,
-        int bufferSize, long length) throws IOException {
-      return fs.open(file, bufferSize);
-    }
-    
-    /**
-     * Initialize the {@link Reader}
-     * @param tempReader <code>true</code> if we are constructing a temporary
-     *                  reader {@link SequenceFile.Sorter#cloneFileAttributes}, 
-     *                  and hence do not initialize every component; 
-     *                  <code>false</code> otherwise.
-     * @throws IOException
-     */
-    private void init(boolean tempReader) throws IOException {
-      byte[] versionBlock = new byte[VERSION.length];
-      in.readFully(versionBlock);
-
-      if ((versionBlock[0] != VERSION[0]) ||
-          (versionBlock[1] != VERSION[1]) ||
-          (versionBlock[2] != VERSION[2]))
-        throw new IOException(this + " not a SequenceFile");
-
-      // Set 'version'
-      version = versionBlock[3];
-      if (version > VERSION[3])
-        throw new VersionMismatchException(VERSION[3], version);
-
-      if (version < BLOCK_COMPRESS_VERSION) {
-        UTF8 className = new UTF8();
-
-        className.readFields(in);
-        keyClassName = className.toString(); // key class name
-
-        className.readFields(in);
-        valClassName = className.toString(); // val class name
-      } else {
-        keyClassName = Text.readString(in);
-        valClassName = Text.readString(in);
-      }
-
-      if (version > 2) {                          // if version > 2
-        this.decompress = in.readBoolean();       // is compressed?
-      } else {
-        decompress = false;
-      }
-
-      if (version >= BLOCK_COMPRESS_VERSION) {    // if version >= 4
-        this.blockCompressed = in.readBoolean();  // is block-compressed?
-      } else {
-        blockCompressed = false;
-      }
-      
-      // if version >= 5
-      // setup the compression codec
-      if (decompress) {
-        if (version >= CUSTOM_COMPRESS_VERSION) {
-          String codecClassname = Text.readString(in);
-          try {
-            Class<? extends CompressionCodec> codecClass
-              = conf.getClassByName(codecClassname).asSubclass(CompressionCodec.class);
-            this.codec = ReflectionUtils.newInstance(codecClass, conf);
-          } catch (ClassNotFoundException cnfe) {
-            throw new IllegalArgumentException("Unknown codec: " + 
-                                               codecClassname, cnfe);
-          }
-        } else {
-          codec = new DefaultCodec();
-          ((Configurable)codec).setConf(conf);
-        }
-      }
-      
-      this.metadata = new Metadata();
-      if (version >= VERSION_WITH_METADATA) {    // if version >= 6
-        this.metadata.readFields(in);
-      }
-      
-      if (version > 1) {                          // if version > 1
-        in.readFully(sync);                       // read sync bytes
-        headerEnd = in.getPos();                  // record end of header
-      }
-      
-      // Initialize... *not* if this we are constructing a temporary Reader
-      if (!tempReader) {
-        valBuffer = new DataInputBuffer();
-        if (decompress) {
-          valDecompressor = CodecPool.getDecompressor(codec);
-          valInFilter = codec.createInputStream(valBuffer, valDecompressor);
-          valIn = new DataInputStream(valInFilter);
-        } else {
-          valIn = valBuffer;
-        }
-
-        if (blockCompressed) {
-          keyLenBuffer = new DataInputBuffer();
-          keyBuffer = new DataInputBuffer();
-          valLenBuffer = new DataInputBuffer();
-
-          keyLenDecompressor = CodecPool.getDecompressor(codec);
-          keyLenInFilter = codec.createInputStream(keyLenBuffer, 
-                                                   keyLenDecompressor);
-          keyLenIn = new DataInputStream(keyLenInFilter);
-
-          keyDecompressor = CodecPool.getDecompressor(codec);
-          keyInFilter = codec.createInputStream(keyBuffer, keyDecompressor);
-          keyIn = new DataInputStream(keyInFilter);
-
-          valLenDecompressor = CodecPool.getDecompressor(codec);
-          valLenInFilter = codec.createInputStream(valLenBuffer, 
-                                                   valLenDecompressor);
-          valLenIn = new DataInputStream(valLenInFilter);
-        }
-        
-        SerializationFactory serializationFactory =
-          new SerializationFactory(conf);
-        this.keyDeserializer =
-          getDeserializer(serializationFactory, getKeyClass());
-        if (this.keyDeserializer == null) {
-          throw new IOException(
-              "Could not find a deserializer for the Key class: '"
-                  + getKeyClass().getCanonicalName() + "'. "
-                  + "Please ensure that the configuration '" +
-                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
-                  + "properly configured, if you're using "
-                  + "custom serialization.");
-        }
-        if (!blockCompressed) {
-          this.keyDeserializer.open(valBuffer);
-        } else {
-          this.keyDeserializer.open(keyIn);
-        }
-        this.valDeserializer =
-          getDeserializer(serializationFactory, getValueClass());
-        if (this.valDeserializer == null) {
-          throw new IOException(
-              "Could not find a deserializer for the Value class: '"
-                  + getValueClass().getCanonicalName() + "'. "
-                  + "Please ensure that the configuration '" +
-                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
-                  + "properly configured, if you're using "
-                  + "custom serialization.");
-        }
-        this.valDeserializer.open(valIn);
-      }
-    }
-    
-    @SuppressWarnings("unchecked")
-    private Deserializer getDeserializer(SerializationFactory sf, Class c) {
-      return sf.getDeserializer(c);
-    }
-    
-    /** Close the file. */
-    @Override
-    public synchronized void close() throws IOException {
-      // Return the decompressors to the pool
-      CodecPool.returnDecompressor(keyLenDecompressor);
-      CodecPool.returnDecompressor(keyDecompressor);
-      CodecPool.returnDecompressor(valLenDecompressor);
-      CodecPool.returnDecompressor(valDecompressor);
-      keyLenDecompressor = keyDecompressor = null;
-      valLenDecompressor = valDecompressor = null;
-      
-      if (keyDeserializer != null) {
-    	keyDeserializer.close();
-      }
-      if (valDeserializer != null) {
-        valDeserializer.close();
-      }
-      
-      // Close the input-stream
-      in.close();
-    }
-
-    /** Returns the name of the key class. */
-    public String getKeyClassName() {
-      return keyClassName;
-    }
-
-    /** Returns the class of keys in this file. */
-    public synchronized Class<?> getKeyClass() {
-      if (null == keyClass) {
-        try {
-          keyClass = WritableName.getClass(getKeyClassName(), conf);
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-      return keyClass;
-    }
-
-    /** Returns the name of the value class. */
-    public String getValueClassName() {
-      return valClassName;
-    }
-
-    /** Returns the class of values in this file. */
-    public synchronized Class<?> getValueClass() {
-      if (null == valClass) {
-        try {
-          valClass = WritableName.getClass(getValueClassName(), conf);
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-      return valClass;
-    }
-
-    /** Returns true if values are compressed. */
-    public boolean isCompressed() { return decompress; }
-    
-    /** Returns true if records are block-compressed. */
-    public boolean isBlockCompressed() { return blockCompressed; }
-    
-    /** Returns the compression codec of data in this file. */
-    public CompressionCodec getCompressionCodec() { return codec; }
-    
-    /**
-     * Get the compression type for this file.
-     * @return the compression type
-     */
-    public CompressionType getCompressionType() {
-      if (decompress) {
-        return blockCompressed ? CompressionType.BLOCK : CompressionType.RECORD;
-      } else {
-        return CompressionType.NONE;
-      }
-    }
-
-    /** Returns the metadata object of the file */
-    public Metadata getMetadata() {
-      return this.metadata;
-    }
-    
-    /** Returns the configuration used for this file. */
-    Configuration getConf() { return conf; }
-    
-    /** Read a compressed buffer */
-    private synchronized void readBuffer(DataInputBuffer buffer, 
-                                         CompressionInputStream filter) throws IOException {
-      // Read data into a temporary buffer
-      DataOutputBuffer dataBuffer = new DataOutputBuffer();
-
-      try {
-        int dataBufferLength = WritableUtils.readVInt(in);
-        dataBuffer.write(in, dataBufferLength);
-      
-        // Set up 'buffer' connected to the input-stream
-        buffer.reset(dataBuffer.getData(), 0, dataBuffer.getLength());
-      } finally {
-        dataBuffer.close();
-      }
-
-      // Reset the codec
-      filter.resetState();
-    }
-    
-    /** Read the next 'compressed' block */
-    private synchronized void readBlock() throws IOException {
-      // Check if we need to throw away a whole block of 
-      // 'values' due to 'lazy decompression' 
-      if (lazyDecompress && !valuesDecompressed) {
-        in.seek(WritableUtils.readVInt(in)+in.getPos());
-        in.seek(WritableUtils.readVInt(in)+in.getPos());
-      }
-      
-      // Reset internal states
-      noBufferedKeys = 0; noBufferedValues = 0; noBufferedRecords = 0;
-      valuesDecompressed = false;
-
-      //Process sync
-      if (sync != null) {
-        in.readInt();
-        in.readFully(syncCheck);                // read syncCheck
-        if (!Arrays.equals(sync, syncCheck))    // check it
-          throw new IOException("File is corrupt!");
-      }
-      syncSeen = true;
-
-      // Read number of records in this block
-      noBufferedRecords = WritableUtils.readVInt(in);
-      
-      // Read key lengths and keys
-      readBuffer(keyLenBuffer, keyLenInFilter);
-      readBuffer(keyBuffer, keyInFilter);
-      noBufferedKeys = noBufferedRecords;
-      
-      // Read value lengths and values
-      if (!lazyDecompress) {
-        readBuffer(valLenBuffer, valLenInFilter);
-        readBuffer(valBuffer, valInFilter);
-        noBufferedValues = noBufferedRecords;
-        valuesDecompressed = true;
-      }
-    }
-
-    /** 
-     * Position valLenIn/valIn to the 'value' 
-     * corresponding to the 'current' key 
-     */
-    private synchronized void seekToCurrentValue() throws IOException {
-      if (!blockCompressed) {
-        if (decompress) {
-          valInFilter.resetState();
-        }
-        valBuffer.reset();
-      } else {
-        // Check if this is the first value in the 'block' to be read
-        if (lazyDecompress && !valuesDecompressed) {
-          // Read the value lengths and values
-          readBuffer(valLenBuffer, valLenInFilter);
-          readBuffer(valBuffer, valInFilter);
-          noBufferedValues = noBufferedRecords;
-          valuesDecompressed = true;
-        }
-        
-        // Calculate the no. of bytes to skip
-        // Note: 'current' key has already been read!
-        int skipValBytes = 0;
-        int currentKey = noBufferedKeys + 1;          
-        for (int i=noBufferedValues; i > currentKey; --i) {
-          skipValBytes += WritableUtils.readVInt(valLenIn);
-          --noBufferedValues;
-        }
-        
-        // Skip to the 'val' corresponding to 'current' key
-        if (skipValBytes > 0) {
-          if (valIn.skipBytes(skipValBytes) != skipValBytes) {
-            throw new IOException("Failed to seek to " + currentKey + 
-                                  "(th) value!");
-          }
-        }
-      }
-    }
-
-    /**
-     * Get the 'value' corresponding to the last read 'key'.
-     * @param val : The 'value' to be read.
-     * @throws IOException
-     */
-    public synchronized void getCurrentValue(Writable val) 
-      throws IOException {
-      if (val instanceof Configurable) {
-        ((Configurable) val).setConf(this.conf);
-      }
-
-      // Position stream to 'current' value
-      seekToCurrentValue();
-
-      if (!blockCompressed) {
-        val.readFields(valIn);
-        
-        if (valIn.read() > 0) {
-          LOG.info("available bytes: " + valIn.available());
-          throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
-                                + " bytes, should read " +
-                                (valBuffer.getLength()-keyLength));
-        }
-      } else {
-        // Get the value
-        int valLength = WritableUtils.readVInt(valLenIn);
-        val.readFields(valIn);
-        
-        // Read another compressed 'value'
-        --noBufferedValues;
-        
-        // Sanity check
-        if ((valLength < 0) && LOG.isDebugEnabled()) {
-          LOG.debug(val + " is a zero-length value");
-        }
-      }
-
-    }
-    
-    /**
-     * Get the 'value' corresponding to the last read 'key'.
-     * @param val : The 'value' to be read.
-     * @throws IOException
-     */
-    public synchronized Object getCurrentValue(Object val) 
-      throws IOException {
-      if (val instanceof Configurable) {
-        ((Configurable) val).setConf(this.conf);
-      }
-
-      // Position stream to 'current' value
-      seekToCurrentValue();
-
-      if (!blockCompressed) {
-        val = deserializeValue(val);
-        
-        if (valIn.read() > 0) {
-          LOG.info("available bytes: " + valIn.available());
-          throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
-                                + " bytes, should read " +
-                                (valBuffer.getLength()-keyLength));
-        }
-      } else {
-        // Get the value
-        int valLength = WritableUtils.readVInt(valLenIn);
-        val = deserializeValue(val);
-        
-        // Read another compressed 'value'
-        --noBufferedValues;
-        
-        // Sanity check
-        if ((valLength < 0) && LOG.isDebugEnabled()) {
-          LOG.debug(val + " is a zero-length value");
-        }
-      }
-      return val;
-
-    }
-
-    @SuppressWarnings("unchecked")
-    private Object deserializeValue(Object val) throws IOException {
-      return valDeserializer.deserialize(val);
-    }
-    
-    /** Read the next key in the file into <code>key</code>, skipping its
-     * value.  True if another entry exists, and false at end of file. */
-    public synchronized boolean next(Writable key) throws IOException {
-      if (key.getClass() != getKeyClass())
-        throw new IOException("wrong key class: "+key.getClass().getName()
-                              +" is not "+keyClass);
-
-      if (!blockCompressed) {
-        outBuf.reset();
-        
-        keyLength = next(outBuf);
-        if (keyLength < 0)
-          return false;
-        
-        valBuffer.reset(outBuf.getData(), outBuf.getLength());
-        
-        key.readFields(valBuffer);
-        valBuffer.mark(0);
-        if (valBuffer.getPosition() != keyLength)
-          throw new IOException(key + " read " + valBuffer.getPosition()
-                                + " bytes, should read " + keyLength);
-      } else {
-        //Reset syncSeen
-        syncSeen = false;
-        
-        if (noBufferedKeys == 0) {
-          try {
-            readBlock();
-          } catch (EOFException eof) {
-            return false;
-          }
-        }
-        
-        int keyLength = WritableUtils.readVInt(keyLenIn);
-        
-        // Sanity check
-        if (keyLength < 0) {
-          return false;
-        }
-        
-        //Read another compressed 'key'
-        key.readFields(keyIn);
-        --noBufferedKeys;
-      }
-
-      return true;
-    }
-
-    /** Read the next key/value pair in the file into <code>key</code> and
-     * <code>val</code>.  Returns true if such a pair exists and false when at
-     * end of file */
-    public synchronized boolean next(Writable key, Writable val)
-      throws IOException {
-      if (val.getClass() != getValueClass())
-        throw new IOException("wrong value class: "+val+" is not "+valClass);
-
-      boolean more = next(key);
-      
-      if (more) {
-        getCurrentValue(val);
-      }
-
-      return more;
-    }
-    
-    /**
-     * Read and return the next record length, potentially skipping over 
-     * a sync block.
-     * @return the length of the next record or -1 if there is no next record
-     * @throws IOException
-     */
-    private synchronized int readRecordLength() throws IOException {
-      if (in.getPos() >= end) {
-        return -1;
-      }      
-      int length = in.readInt();
-      if (version > 1 && sync != null &&
-          length == SYNC_ESCAPE) {              // process a sync entry
-        in.readFully(syncCheck);                // read syncCheck
-        if (!Arrays.equals(sync, syncCheck))    // check it
-          throw new IOException("File is corrupt!");
-        syncSeen = true;
-        if (in.getPos() >= end) {
-          return -1;
-        }
-        length = in.readInt();                  // re-read length
-      } else {
-        syncSeen = false;
-      }
-      
-      return length;
-    }
-    
-    /** Read the next key/value pair in the file into <code>buffer</code>.
-     * Returns the length of the key read, or -1 if at end of file.  The length
-     * of the value may be computed by calling buffer.getLength() before and
-     * after calls to this method. */
-    /** @deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}. */
-    @Deprecated
-    synchronized int next(DataOutputBuffer buffer) throws IOException {
-      // Unsupported for block-compressed sequence files
-      if (blockCompressed) {
-        throw new IOException("Unsupported call for block-compressed" +
-                              " SequenceFiles - use SequenceFile.Reader.next(DataOutputStream, ValueBytes)");
-      }
-      try {
-        int length = readRecordLength();
-        if (length == -1) {
-          return -1;
-        }
-        int keyLength = in.readInt();
-        buffer.write(in, length);
-        return keyLength;
-      } catch (ChecksumException e) {             // checksum failure
-        handleChecksumException(e);
-        return next(buffer);
-      }
-    }
-
-    public ValueBytes createValueBytes() {
-      ValueBytes val = null;
-      if (!decompress || blockCompressed) {
-        val = new UncompressedBytes();
-      } else {
-        val = new CompressedBytes(codec);
-      }
-      return val;
-    }
-
-    /**
-     * Read 'raw' records.
-     * @param key - The buffer into which the key is read
-     * @param val - The 'raw' value
-     * @return Returns the total record length or -1 for end of file
-     * @throws IOException
-     */
-    public synchronized int nextRaw(DataOutputBuffer key, ValueBytes val) 
-      throws IOException {
-      if (!blockCompressed) {
-        int length = readRecordLength();
-        if (length == -1) {
-          return -1;
-        }
-        int keyLength = in.readInt();
-        int valLength = le

<TRUNCATED>


[33/63] [abbrv] incubator-geode git commit: GEODE-17: review changes

Posted by kl...@apache.org.
GEODE-17: review changes


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3d8f54c9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3d8f54c9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3d8f54c9

Branch: refs/heads/feature/GEODE-1276
Commit: 3d8f54c9827ed308696ae44481a3705ca6e52c54
Parents: 165c9bc
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Apr 28 09:38:12 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Thu Apr 28 11:21:09 2016 -0700

----------------------------------------------------------------------
 .../internal/security/AuthorizeRequest.java        | 17 -----------------
 .../internal/security/MBeanServerWrapper.java      |  4 ++--
 .../com/gemstone/gemfire/security/ShiroUtil.java   | 10 +++++++---
 3 files changed, 9 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3d8f54c9/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
index 7981ccb..9ec7578 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
@@ -95,23 +95,6 @@ public class AuthorizeRequest {
         new Object[] {id, authzFactoryName});
     }
   }
- 
-  public AccessControl getAuthzCallback() {
-
-    return this.authzCallback;
-  }
-
-  public Principal getPrincipal() {
-    return principal;
-  }
-
-  public boolean isPrincipalSerializable() {
-    return isPrincipalSerializable;
-  }
-
-  public LogWriterI18n getLogger() {
-    return logger;
-  }
 
   public GetOperationContext getAuthorize(String regionName, Object key,
       Object callbackArg) throws NotAuthorizedException {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3d8f54c9/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
index c7cb058..bbc0442 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
@@ -207,7 +207,7 @@ public class MBeanServerWrapper implements MBeanServerForwarder {
       try {
         setAttribute(name, attribute);
       } catch (Exception e) {
-        throw new GemFireSecurityException("error setting attribute "+attribute+" of "+name);
+        throw new GemFireSecurityException("error setting attribute "+attribute+" of "+name, e);
       }
     }
     return attributes;
@@ -237,7 +237,7 @@ public class MBeanServerWrapper implements MBeanServerForwarder {
     try {
       beanInfo = mbs.getMBeanInfo(objectName);
     } catch (IntrospectionException e) {
-      throw new GemFireSecurityException("error getting beanInfo of "+objectName);
+      throw new GemFireSecurityException("error getting beanInfo of "+objectName, e);
     }
     // If there is no annotation defined either in the class level or method level, we should consider this operation/attribute freely accessible
     ResourceOperationContext result = null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3d8f54c9/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
index 5eedaf4..01914e4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
@@ -22,6 +22,8 @@ import java.util.concurrent.Callable;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 import com.gemstone.gemfire.management.internal.security.ResourceOperationContext;
+
+import org.apache.logging.log4j.Logger;
 import org.apache.shiro.SecurityUtils;
 import org.apache.shiro.ShiroException;
 import org.apache.shiro.UnavailableSecurityManagerException;
@@ -31,6 +33,8 @@ import org.apache.shiro.util.ThreadContext;
 
 public class ShiroUtil {
 
+  private static Logger logger = LogService.getLogger();
+
   public static void login(String username, String password){
     if(!isShiroConfigured())
       return;
@@ -40,7 +44,7 @@ public class ShiroUtil {
     UsernamePasswordToken token =
         new UsernamePasswordToken(username, password);
     try {
-      LogService.getLogger().info("Logging in "+username+"/"+password);
+      logger.info("Logging in "+username+"/"+password);
       currentUser.login(token);
     } catch (ShiroException e) {
       throw new AuthenticationFailedException(e.getMessage(), e);
@@ -53,7 +57,7 @@ public class ShiroUtil {
 
     Subject currentUser = SecurityUtils.getSubject();
     try {
-      LogService.getLogger().info("Logging out "+currentUser.getPrincipal());
+      logger.info("Logging out "+currentUser.getPrincipal());
       currentUser.logout();
     }
     catch(ShiroException e){
@@ -93,7 +97,7 @@ public class ShiroUtil {
       currentUser.checkPermission(permission);
     }
     catch(ShiroException e){
-      LogService.getLogger().info(currentUser.getPrincipal() + " not authorized for "+resource+":"+operation+":"+regionName);
+      logger.info(currentUser.getPrincipal() + " not authorized for "+resource+":"+operation+":"+regionName);
       throw new GemFireSecurityException(e.getMessage(), e);
     }
   }


[57/63] [abbrv] incubator-geode git commit: GEODE-1183: keep only one proxy if there're 3 cache servers on one JVM

Posted by kl...@apache.org.
GEODE-1183: keep only one proxy if there're 3 cache servers on one JVM

Current API allows us to create 2 cache servers on the same JVM, then the client
will try to create 2 queues to that JVM, one secondary and one primary.
But the proxy is actually the same (since there's only one client), so the
CCN keeps destroying and recreating the proxy.

To fix, we will keep the first proxy and reject the duplicate creating.
Then the secondary proxy will automatically become primary.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/51e4e71e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/51e4e71e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/51e4e71e

Branch: refs/heads/feature/GEODE-1276
Commit: 51e4e71ef1ffb2fddb3ade42e0ad46fe40886239
Parents: 1aa08cd
Author: zhouxh <gz...@pivotal.io>
Authored: Sun Apr 24 22:51:07 2016 -0700
Committer: zhouxh <gz...@pivotal.io>
Committed: Mon May 2 22:08:58 2016 -0700

----------------------------------------------------------------------
 .../cache/tier/sockets/CacheClientNotifier.java |  36 +++--
 .../cache/wan/CacheClientNotifierDUnitTest.java | 106 ++++++++++---
 .../cache/wan/Simple2CacheServerDUnitTest.java  | 157 +++++++++++++++++++
 3 files changed, 260 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/51e4e71e/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheClientNotifier.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheClientNotifier.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheClientNotifier.java
index 1ba2294..80d05ba 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheClientNotifier.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheClientNotifier.java
@@ -505,24 +505,32 @@ public class CacheClientNotifier {
       }
     } else {
       CacheClientProxy staleClientProxy = this.getClientProxy(proxyId);
+      boolean toCreateNewProxy = true;
       if (staleClientProxy != null) {
-        // A proxy exists for this non-durable client. It must be closed.
-        if (logger.isDebugEnabled()) {
-          logger.debug("CacheClientNotifier: A proxy exists for this non-durable client. It must be closed.");
-        }
-        if (staleClientProxy.startRemoval()) {
-          staleClientProxy.waitRemoval();
-        }
-        else {
-          staleClientProxy.close(false, false); // do not check for queue, just close it
-          removeClientProxy(staleClientProxy); // remove old proxy from proxy set
+        if (staleClientProxy.isConnected() && staleClientProxy.getSocket().isConnected()) {
+          successful = false;
+          toCreateNewProxy = false;
+        } else {
+          // A proxy exists for this non-durable client. It must be closed.
+          if (logger.isDebugEnabled()) {
+            logger.debug("CacheClientNotifier: A proxy exists for this non-durable client. It must be closed.");
+          }
+          if (staleClientProxy.startRemoval()) {
+            staleClientProxy.waitRemoval();
+          }
+          else {
+            staleClientProxy.close(false, false); // do not check for queue, just close it
+            removeClientProxy(staleClientProxy); // remove old proxy from proxy set
+          }
         }
       } // non-null stale proxy
 
-      // Create the new proxy for this non-durable client
-      l_proxy = new CacheClientProxy(this, socket, proxyId,
-          isPrimary, clientConflation, clientVersion, acceptorId, notifyBySubscription);
-      successful = this.initializeProxy(l_proxy);
+      if (toCreateNewProxy) {
+        // Create the new proxy for this non-durable client
+        l_proxy = new CacheClientProxy(this, socket, proxyId,
+            isPrimary, clientConflation, clientVersion, acceptorId, notifyBySubscription);
+        successful = this.initializeProxy(l_proxy);
+      }
     }
 
     if (!successful){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/51e4e71e/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/CacheClientNotifierDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/CacheClientNotifierDUnitTest.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/CacheClientNotifierDUnitTest.java
index 8bf819c..0b1cd11 100755
--- a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/CacheClientNotifierDUnitTest.java
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/CacheClientNotifierDUnitTest.java
@@ -18,25 +18,40 @@ package com.gemstone.gemfire.internal.cache.wan;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Properties;
 
 import org.junit.experimental.categories.Category;
 
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.EvictionAction;
 import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.client.Pool;
+import com.gemstone.gemfire.cache.client.PoolManager;
+import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.server.ClientSubscriptionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.ServerLocation;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
+import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
+import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.UserSpecifiedRegionAttributes;
 import com.gemstone.gemfire.internal.cache.ha.HAContainerRegion;
 import com.gemstone.gemfire.internal.cache.ha.HAContainerWrapper;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.Wait;
@@ -121,24 +136,17 @@ public class CacheClientNotifierDUnitTest extends WANTestBase {
     vm.invoke(checkCacheServer);
   }
 
-  private void closeCacheServer(VM vm, final int serverPort) {
-    SerializableRunnable stopCacheServer = new SerializableRunnable() {
-
-      @Override
-      public void run() throws Exception {
-        List<CacheServer> cacheServers = cache.getCacheServers();
-        CacheServerImpl server = null;
-        for (CacheServer cs:cacheServers) {
-          if (cs.getPort() == serverPort) {
-            server = (CacheServerImpl)cs;
-            break;
-          }
-        }
-        assertNotNull(server);
-        server.stop();
+  public static void closeACacheServer(final int serverPort) {
+    List<CacheServer> cacheServers = cache.getCacheServers();
+    CacheServerImpl server = null;
+    for (CacheServer cs:cacheServers) {
+      if (cs.getPort() == serverPort) {
+        server = (CacheServerImpl)cs;
+        break;
       }
-    };
-    vm.invoke(stopCacheServer);
+    }
+    assertNotNull(server);
+    server.stop();
   }
 
   private void verifyRegionSize(VM vm, final int expect) {
@@ -165,8 +173,12 @@ public class CacheClientNotifierDUnitTest extends WANTestBase {
    * The test will start several cache servers, including gateway receivers.
    * Shutdown them and verify the CacheClientNofifier for each server is correct
    */
-  @Category(FlakyTest.class) // GEODE-1183: random ports, failure to start threads, eats exceptions, time sensitive
-  public void testMultipleCacheServer() throws Exception {
+  // GEODE-1183: random ports, failure to start threads, eats exceptions, time sensitive
+  public void testNormalClient2MultipleCacheServer() throws Exception {
+    doMultipleCacheServer(false);
+  }
+
+  public void doMultipleCacheServer(boolean durable) throws Exception {
     /* test senario: */
     /* create 1 GatewaySender on vm0 */
     /* create 1 GatewayReceiver on vm1 */
@@ -199,8 +211,8 @@ public class CacheClientNotifierDUnitTest extends WANTestBase {
     checkCacheServer(vm1, serverPort2, true, 3);
     LogService.getLogger().info("receiverPort="+receiverPort+",serverPort="+serverPort+",serverPort2="+serverPort2);
     
-    vm2.invoke(() -> WANTestBase.createClientWithLocator(nyPort, "localhost", getTestMethodName() + "_PR" ));
-    vm3.invoke(() -> WANTestBase.createClientWithLocator(nyPort, "localhost", getTestMethodName() + "_PR" ));
+    vm2.invoke(() -> createClientWithLocator(nyPort, "localhost", getTestMethodName() + "_PR", "123", durable));
+    vm3.invoke(() -> createClientWithLocator(nyPort, "localhost", getTestMethodName() + "_PR", "124", durable));
 
     vm0.invoke(() -> WANTestBase.createCache( lnPort ));
     vm0.invoke(() -> WANTestBase.createSender( "ln", 2, false, 100, 400, false, false, null, true ));
@@ -211,19 +223,63 @@ public class CacheClientNotifierDUnitTest extends WANTestBase {
     /* verify */
     verifyRegionSize(vm0, NUM_KEYS);
     verifyRegionSize(vm1, NUM_KEYS);
-    verifyRegionSize(vm2, NUM_KEYS);
     verifyRegionSize(vm3, NUM_KEYS);
+    verifyRegionSize(vm2, NUM_KEYS);
 
     // close a cache server, then re-test
-    closeCacheServer(vm1, serverPort2);
+    vm1.invoke(() -> closeACacheServer(serverPort2));
 
     vm0.invoke(() -> WANTestBase.doPuts( getTestMethodName() + "_PR", NUM_KEYS*2 ));
 
     /* verify */
     verifyRegionSize(vm0, NUM_KEYS*2);
     verifyRegionSize(vm1, NUM_KEYS*2);
-    verifyRegionSize(vm2, NUM_KEYS*2);
     verifyRegionSize(vm3, NUM_KEYS*2);
+    verifyRegionSize(vm2, NUM_KEYS*2);
+    
+    disconnectAllFromDS();
+  }
+
+  public static void createClientWithLocator(int port0,String host,
+      String regionName, String clientId, boolean isDurable) {
+    WANTestBase test = new WANTestBase(getTestMethodName());
+    Properties props = test.getDistributedSystemProperties();
+    props.setProperty("mcast-port", "0");
+    props.setProperty("locators", "");
+    if (isDurable) {
+      props.setProperty("durable-client-id", clientId);
+      props.setProperty("durable-client-timeout", "" + 200);
+    }
+
+    InternalDistributedSystem ds = test.getSystem(props);
+    cache = CacheFactory.create(ds);
+
+    assertNotNull(cache);
+    CacheServerTestUtil.disableShufflingOfEndpoints();
+    Pool p;
+    try {
+      p = PoolManager.createFactory().addLocator(host, port0)
+          .setPingInterval(250).setSubscriptionEnabled(true)
+          .setSubscriptionRedundancy(-1).setReadTimeout(2000)
+          .setSocketBufferSize(1000).setMinConnections(6).setMaxConnections(10)
+          .setRetryAttempts(3).create(regionName);
+    } finally {
+      CacheServerTestUtil.enableShufflingOfEndpoints();
+    }
+
+    AttributesFactory factory = new AttributesFactory();
+    factory.setPoolName(p.getName());
+    factory.setDataPolicy(DataPolicy.NORMAL);
+    RegionAttributes attrs = factory.create();
+    region = cache.createRegion(regionName, attrs);
+    region.registerInterest("ALL_KEYS");
+    assertNotNull(region);
+    if (isDurable) {
+      cache.readyForEvents();
+    }
+    LogWriterUtils.getLogWriter().info(
+        "Distributed Region " + regionName + " created Successfully :"
+            + region.toString() + " in a "+(isDurable?"durable":"")+" client");
   }
 
-}
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/51e4e71e/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/Simple2CacheServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/Simple2CacheServerDUnitTest.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/Simple2CacheServerDUnitTest.java
new file mode 100755
index 0000000..684660b
--- /dev/null
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/Simple2CacheServerDUnitTest.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.wan;
+
+import java.util.Iterator;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.client.internal.PoolImpl;
+import com.gemstone.gemfire.distributed.internal.ServerLocation;
+import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
+import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.junit.categories.FlakyTest;
+
+public class Simple2CacheServerDUnitTest extends WANTestBase {
+  private static final int NUM_KEYS = 10;
+  static int afterPrimaryCount = 0;
+  static int afterProxyReinitialized = 0;
+  
+  public Simple2CacheServerDUnitTest(String name) {
+    super(name);
+  }
+  
+  // GEODE-1183: random ports, failure to start threads, eats exceptions, time sensitive
+  public void testDurableClient2MultipleCacheServer() throws Exception {
+    doMultipleCacheServer(true);
+  }
+
+  public void testNormalClient2MultipleCacheServer() throws Exception {
+    doMultipleCacheServer(false);
+  }
+  
+  public void doMultipleCacheServer(boolean durable) throws Exception {
+    Integer lnPort = (Integer)vm1.invoke(() -> WANTestBase.createFirstLocatorWithDSId( 1 ));
+    vm1.invoke(() -> WANTestBase.createCache( lnPort ));
+    vm1.invoke(() -> WANTestBase.createPersistentPartitionedRegion( getTestMethodName() + "_PR", null, 1, 100, isOffHeap() ));
+    int serverPort = vm1.invoke(() -> WANTestBase.createCacheServer());
+    int serverPort2 = vm1.invoke(() -> WANTestBase.createCacheServer());
+
+    if (durable) {
+      vm1.invoke(() -> setCacheClientProxyTestHook());
+    } else {
+      vm2.invoke(() -> setClientServerObserver());
+    }
+    vm2.invoke(() -> CacheClientNotifierDUnitTest.createClientWithLocator(lnPort, "localhost", getTestMethodName() + "_PR" , "123", durable));
+
+    vm0.invoke(() -> WANTestBase.createCache( lnPort ));
+    vm0.invoke(() -> WANTestBase.createPersistentPartitionedRegion( getTestMethodName() + "_PR", null, 1, 100, isOffHeap() ));
+    int serverPort3 = vm0.invoke(() -> WANTestBase.createCacheServer());
+    
+    if (durable) {
+      vm1.invoke(() -> checkResultAndUnsetCacheClientProxyTestHook());
+    } else {
+      vm2.invoke(() -> checkResultAndUnsetClientServerObserver());
+    }
+    
+    boolean vm0_proxy = checkProxyIsPrimary(vm0);
+    boolean vm1_proxy = checkProxyIsPrimary(vm1);
+    assertTrue(vm1_proxy || vm0_proxy);
+    
+    // close the current primary cache server, then re-test
+    vm1.invoke(()-> CacheClientNotifierDUnitTest.closeACacheServer(serverPort2));
+    vm0_proxy = checkProxyIsPrimary(vm0);
+    vm1_proxy = checkProxyIsPrimary(vm1);
+    assertTrue(vm1_proxy || vm0_proxy);
+    
+    disconnectAllFromDS();
+  }
+
+  public static void setClientServerObserver()
+  {
+    PoolImpl.AFTER_PRIMARY_IDENTIFICATION_FROM_BACKUP_CALLBACK_FLAG = true;
+    ClientServerObserverHolder
+    .setInstance(new ClientServerObserverAdapter() {
+      public void afterPrimaryIdentificationFromBackup(ServerLocation primaryEndpoint)
+      {
+        LogService.getLogger().info("After primary is set");
+        afterPrimaryCount++;
+      }
+    });
+  }
+
+  public static void checkResultAndUnsetClientServerObserver()
+  {
+    PoolImpl.AFTER_PRIMARY_IDENTIFICATION_FROM_BACKUP_CALLBACK_FLAG = false;
+    // setPrimary only happened once
+    assertEquals(1, afterPrimaryCount);
+    afterPrimaryCount = 0;
+  }
+
+  public static void setCacheClientProxyTestHook()
+  {
+    CacheClientProxy.testHook = new CacheClientProxy.TestHook() {
+      @Override
+      public void doTestHook(String spot) {
+        if (spot.equals("CLIENT_RECONNECTED")) {
+          afterProxyReinitialized++;
+        }
+      }
+    };
+  }
+
+  public static void checkResultAndUnsetCacheClientProxyTestHook()
+  {
+    // Reinitialize only happened once
+    CacheClientProxy.testHook = null;
+    assertEquals(1, afterProxyReinitialized);
+    afterProxyReinitialized = 0;
+  }
+  
+  private boolean checkProxyIsPrimary(VM vm) {
+    SerializableCallable checkProxyIsPrimary = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        final CacheClientNotifier ccn = CacheClientNotifier.getInstance();
+        
+        Wait.waitForCriterion(new WaitCriterion() {
+          public boolean done() {
+            return ccn.getClientProxies().size() == 1; 
+          }
+          public String description() {
+            return null;
+          }
+        }, 20000, 100, false);
+        assertEquals(1, ccn.getClientProxies().size());
+
+        Iterator iter_prox = ccn.getClientProxies().iterator();
+        assertEquals(1, ccn.getClientProxies().size());
+        CacheClientProxy proxy = (CacheClientProxy)iter_prox.next();
+        return proxy.isPrimary();
+      }
+    };
+    return (Boolean)vm.invoke(checkProxyIsPrimary);
+  }
+}


[11/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java
deleted file mode 100644
index 5ba20d2..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java
+++ /dev/null
@@ -1,853 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.ByteArrayInputStream;
-import java.io.Closeable;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NoSuchElementException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import com.gemstone.gemfire.internal.hll.HyperLogLog;
-import com.gemstone.gemfire.internal.hll.ICardinality;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.ShutdownHookManager;
-
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.DelegatingSerializedComparator;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics.ScanOperation;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedReader.SerializedComparator;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.Version;
-import com.gemstone.gemfire.internal.util.Hex;
-import com.gemstone.gemfire.internal.util.SingletonValue;
-import com.gemstone.gemfire.internal.util.SingletonValue.SingletonBuilder;
-
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
-import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
-import org.apache.hadoop.hbase.util.BloomFilterFactory;
-import org.apache.hadoop.hbase.util.BloomFilterWriter;
-
-/**
- * Implements hfile based {@link Hoplog}
- */
-public final class HFileSortedOplog extends AbstractHoplog {
-
-//  private static final boolean CACHE_DATA_BLOCKS_ON_READ = !Boolean.getBoolean("gemfire.HFileSortedOplog.DISABLE_CACHE_ON_READ");
-  private final CacheConfig cacheConf;
-  private ICardinality entryCountEstimate;
-  
-  // a cached reader for the file
-  private final SingletonValue<HFileReader> reader;
-
-  public HFileSortedOplog(HDFSStoreImpl store, Path hfilePath,
-      BlockCache blockCache, SortedOplogStatistics stats,
-      HFileStoreStatistics storeStats) throws IOException {
-    super(store, hfilePath, stats);
-    cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
-    reader = getReaderContainer();
-  }
-
-  /**
-   * THIS METHOD SHOULD BE USED FOR LONER ONLY
-   */
-  public static HFileSortedOplog getHoplogForLoner(FileSystem inputFS,
-      Path hfilePath) throws IOException {
-    return new HFileSortedOplog(inputFS, hfilePath, null, null, null);
-  }
-
-  private HFileSortedOplog(FileSystem inputFS, Path hfilePath,
-      BlockCache blockCache, SortedOplogStatistics stats,
-      HFileStoreStatistics storeStats) throws IOException {
-    super(inputFS, hfilePath, stats);
-    cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
-    reader = getReaderContainer();
-  }
-
-  protected CacheConfig getCacheConfInstance(BlockCache blockCache,
-      SortedOplogStatistics stats, HFileStoreStatistics storeStats) {
-    CacheConfig tmpConfig = null;
-//    if (stats == null) {
-      tmpConfig = new CacheConfig(conf);
-//    } else {
-//      tmpConfig = new CacheConfig(conf, CACHE_DATA_BLOCKS_ON_READ, blockCache,
-//          HFileSortedOplogFactory.convertStatistics(stats, storeStats));
-//    }
-    tmpConfig.shouldCacheBlockOnRead(BlockCategory.ALL_CATEGORIES);
-    return tmpConfig;
-  }  
-
-  private SingletonValue<HFileReader> getReaderContainer() {
-    return new SingletonValue<HFileReader>(new SingletonBuilder<HFileReader>() {
-      @Override
-      public HFileReader create() throws IOException {
-        if (logger.isDebugEnabled())
-          logger.debug("{}Creating hoplog reader", logPrefix);
-        return new HFileReader();
-      }
-
-      @Override
-      public void postCreate() {
-        if (readerListener != null) {
-          readerListener.readerCreated();
-        }
-      }
-      
-      @Override
-      public void createInProgress() {
-      }
-    });
-  }
-  
-  @Override
-  public HoplogReader getReader() throws IOException {
-    return reader.get();
-  }
-  
-  @Override
-  public ICardinality getEntryCountEstimate() throws IOException {
-    ICardinality result = entryCountEstimate;
-    if (result == null) {
-      HoplogReader rdr = getReader(); // keep this out of the critical section
-      synchronized(this) {
-        result = entryCountEstimate;
-          if (result == null) {
-            entryCountEstimate = result = rdr.getCardinalityEstimator();
-          }
-        }
-    }
-    return result;
-  }
-  
-  @Override
-  public HoplogWriter createWriter(int keys) throws IOException {
-    return new HFileSortedOplogWriter(keys);
-  }
-
-  @Override
-  public boolean isClosed() {
-    HFileReader rdr = reader.getCachedValue();
-    return rdr == null || rdr.isClosed();
-  }
-  
-  @Override
-  public void close() throws IOException {
-    close(true);
-  }
-
-  @Override
-  public void close(boolean clearCache) throws IOException {
-    compareAndClose(null, clearCache);
-  }
-  
-  private void compareAndClose(HFileReader hfileReader, boolean clearCache) throws IOException {
-    HFileReader rdr ;
-    if (hfileReader == null) {
-      rdr = reader.clear(true);
-    } else {
-      boolean result = reader.clear(hfileReader, true);
-      if (! result) {
-        if (logger.isDebugEnabled())
-          logger.debug("{}skipping close, provided hfileReader mismatched", logPrefix);
-        return;
-      } 
-      rdr = hfileReader;
-    }
-    
-    if (rdr != null) {
-      try {
-        rdr.close(clearCache);
-      } finally {
-        if (readerListener != null) {
-          readerListener.readerClosed();
-        }
-      }
-    }
-  }
-  
-  @Override
-  public String toString() {
-    return "HFileSortedOplog[" + getFileName() + "]";
-  }
-
-  private class HFileSortedOplogWriter implements HoplogWriter {
-    private final Writer writer;
-    private final BloomFilterWriter bfw;
-    private final AtomicBoolean closed = new AtomicBoolean(false);
-
-    public HFileSortedOplogWriter(int keys) throws IOException {
-      try {
-        int hfileBlockSize = Integer.getInteger(
-            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));
-
-        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
-            HoplogConfig.COMPRESSION_DEFAULT));
-
-//        ByteComparator bc = new ByteComparator();
-        writer = HFile.getWriterFactory(conf, cacheConf)
-            .withPath(fsProvider.getFS(), path)
-            .withBlockSize(hfileBlockSize)
-//            .withComparator(bc)
-            .withCompression(compress)
-            .create();
-//        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
-//            writer, bc);
-        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
-            writer);
-
-        if (logger.isDebugEnabled())
-          logger.debug("{}Created hoplog writer with compression " + compress, logPrefix);
-      } catch (IOException e) {
-        if (logger.isDebugEnabled())
-          logger.debug("{}IO Error while creating writer", logPrefix);
-        throw e;
-      }
-    }
-
-    @Override
-    public void append(byte[] key, byte[] value) throws IOException {
-      writer.append(key, value);
-      bfw.add(key, 0, key.length);
-    }
-
-    @Override
-    public void append(ByteBuffer key, ByteBuffer value) throws IOException {
-      byte[] keyBytes = byteBufferToArray(key);
-      byte[] valueBytes = byteBufferToArray(value);
-      writer.append(keyBytes, valueBytes);
-      bfw.add(keyBytes, 0, keyBytes.length);
-    }
-
-    @Override
-    public void close() throws IOException {
-      close(null);
-    }
-
-    @Override
-    public void close(EnumMap<Meta, byte[]> metadata) throws IOException {
-      if (closed.get()) {
-        if (logger.isDebugEnabled())
-          logger.debug("{}Writer already closed", logPrefix);
-        return;
-      }
-      
-      bfw.compactBloom();
-      writer.addGeneralBloomFilter(bfw);
-
-      // append system metadata
-      writer.appendFileInfo(Meta.GEMFIRE_MAGIC.toBytes(), Hoplog.MAGIC);
-      writer.appendFileInfo(Meta.SORTED_OPLOG_VERSION.toBytes(), HoplogVersion.V1.toBytes());
-      writer.appendFileInfo(Meta.GEMFIRE_VERSION.toBytes(), Version.CURRENT.toBytes());
-      
-      // append comparator info
-//      if (writer.getComparator() instanceof DelegatingSerializedComparator) {
-//        ByteArrayOutputStream bos = new ByteArrayOutputStream();
-//        DataOutput out = new DataOutputStream(bos);
-//        
-//        writeComparatorInfo(out, ((DelegatingSerializedComparator) writer.getComparator()).getComparators());
-//        writer.appendFileInfo(Meta.COMPARATORS.toBytes(), bos.toByteArray());
-//      }
-      
-      // append user metadata
-      HyperLogLog cachedEntryCountEstimate = null;
-      if (metadata != null) {
-        for (Entry<Meta, byte[]> entry : metadata.entrySet()) {
-          writer.appendFileInfo(entry.getKey().toBytes(), entry.getValue());
-          if (Meta.LOCAL_CARDINALITY_ESTIMATE_V2.equals(entry.getKey())) {
-             cachedEntryCountEstimate = HyperLogLog.Builder.build(entry.getValue()); 
-          }
-        }
-      }
-      
-      writer.close();
-      if (logger.isDebugEnabled())
-        logger.debug("{}Completed closing writer", logPrefix);
-      closed.set(true);
-      // cache estimate value to avoid reads later
-      entryCountEstimate = cachedEntryCountEstimate;
-    }
-
-    @Override
-    public void hsync() throws IOException {
-      throw new UnsupportedOperationException("hsync is not supported for HFiles"); 
-    }
-
-    @Override
-    public long getCurrentSize() throws IOException {
-      throw new UnsupportedOperationException("getCurrentSize is not supported for HFiles"); 
-    }
-    
-//    private void writeComparatorInfo(DataOutput out, SerializedComparator[] comparators) throws IOException {
-//      out.writeInt(comparators.length);
-//      for (SerializedComparator sc : comparators) {
-//        out.writeUTF(sc.getClass().getName());
-//        if (sc instanceof DelegatingSerializedComparator) {
-//          writeComparatorInfo(out, ((DelegatingSerializedComparator) sc).getComparators());
-//        }
-//      }
-//    }
-  }
-  
-  private void handleReadIOError(HFileReader hfileReader, IOException e, boolean skipFailIfSafe) {
-    if (logger.isDebugEnabled())
-      logger.debug("Read IO error", e);
-    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
-    if (safeError) {
-      // IOException because of closed file system. This happens when member is
-      // shutting down
-      if (logger.isDebugEnabled())
-        logger.debug("IO error caused by filesystem shutdown", e);
-      throw new CacheClosedException("IO error caused by filesystem shutdown", e);
-    } 
-    
-    // expose the error wrapped inside remote exception. Remote exceptions are
-    // handled by file system client. So let the caller handle this error
-    if (e instanceof RemoteException) {
-      e = ((RemoteException) e).unwrapRemoteException();
-      throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
-    } 
-    
-    FileSystem currentFs = fsProvider.checkFileSystem();
-    if (hfileReader != null && hfileReader.previousFS != currentFs) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Detected new FS client, closing old reader", logPrefix);
-        if (currentFs != null) {
-          if (logger.isDebugEnabled())
-            logger.debug("CurrentFs:" + currentFs.getUri() + "-"
-                + currentFs.hashCode(), logPrefix);
-        }
-        if (hfileReader.previousFS != null) {
-          if (logger.isDebugEnabled())
-            logger.debug("OldFs:" + hfileReader.previousFS.getUri() + "-"
-                + hfileReader.previousFS.hashCode() + ", closing old reader", logPrefix);
-        }
-      }
-      try {
-        HFileSortedOplog.this.compareAndClose(hfileReader, false);
-      } catch (Exception ex) {
-        if (logger.isDebugEnabled())
-          logger.debug("Failed to close reader", ex);
-      }
-      if (skipFailIfSafe) {
-        if (logger.isDebugEnabled())
-          logger.debug("Not faling after io error since FS client changed");
-        return;
-      }
-    }
-
-    // it is not a safe error. let the caller handle it
-    throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
-  }
-
-  class HFileReader implements HoplogReader, Closeable {
-    private final Reader reader;
-    private volatile BloomFilter hoplogBloom;
-    private final AtomicBoolean closed;
-    private final Map<byte[], byte[]> fileInfo;
-    private final HyperLogLog estimator;
-    private final FileSystem previousFS;
-    
-    public HFileReader() throws IOException {
-      try {
-        FileSystem fs = fsProvider.getFS();
-        reader = HFile.createReader(fs, path, cacheConf);
-        fileInfo = reader.loadFileInfo();
-        closed = new AtomicBoolean(false);
-
-        validate();
-        if (reader.getComparator() instanceof DelegatingSerializedComparator) {
-          loadComparators((DelegatingSerializedComparator) reader.getComparator());
-        }
-
-        // read the old HLL if it exists so that a CardinalityMergeException will trigger a Major Compaction
-        byte[] hll = fileInfo.get(Meta.LOCAL_CARDINALITY_ESTIMATE.toBytes());
-        if (hll != null) {
-          entryCountEstimate = estimator = HyperLogLog.Builder.build(hll);
-        } else if ((hll = fileInfo.get(Meta.LOCAL_CARDINALITY_ESTIMATE_V2.toBytes())) != null) {
-          entryCountEstimate = estimator = HyperLogLog.Builder.build(hll);
-        } else {
-          estimator = new HyperLogLog(HdfsSortedOplogOrganizer.HLL_CONSTANT);
-        }
-        
-        previousFS = fs;
-      } catch (IOException e) {
-        if (logger.isDebugEnabled())
-          logger.debug("IO Error while creating reader", e);
-        throw e;
-      }
-    }
-
-    @Override
-    public byte[] read(byte[] key) throws IOException {
-      IOException err = null;
-      HFileReader delegateReader = this;
-      for (int retry = 1; retry >= 0; retry --) {
-        try {
-          return delegateReader.readDelegate(key);
-        } catch (IOException e) {
-          err = e;
-          handleReadIOError(delegateReader, e, retry > 0);
-          // Current reader may have got closed in error handling. Get the new
-          // one for retry attempt
-          try {
-            delegateReader = (HFileReader) HFileSortedOplog.this.getReader(); 
-          } catch (IOException ex) {
-            handleReadIOError(null, e, false);
-          }
-        }
-      }
-
-      if (logger.isDebugEnabled())
-        logger.debug("Throwing err from read delegate ", err);
-      throw err;
-    }
-
-    private byte[] readDelegate(byte[] key) throws IOException {
-      try {
-        if (!getBloomFilter().mightContain(key)) {
-          // bloom filter check failed, the key is not present in this hoplog
-          return null;
-        }
-      } catch (IllegalArgumentException e) {
-        if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
-          throw (IOException) e.getCause();
-        } else {
-          throw e;
-        }
-      }
-      
-      byte[] valueBytes = null;
-      ByteBuffer bb = get(key);
-      if (bb != null) {
-        valueBytes = new byte[bb.remaining()];
-        bb.get(valueBytes);
-      } else {
-        stats.getBloom().falsePositive();
-      }
-      return valueBytes;
-    }
-
-    @Override
-    public ByteBuffer get(byte[] key) throws IOException {
-      assert key != null;
-      HFileScanner seek = reader.getScanner(false, true);
-      if (seek.seekTo(key) == 0) {
-        return seek.getValue();
-      }
-      return null;
-    }
-
-    @Override
-    public HoplogIterator<byte[], byte[]> scan(byte[] from, boolean fromInclusive, byte[] to,
-        boolean toInclusive) throws IOException {
-      IOException err = null;
-      HFileReader delegateReader = this;
-      for (int retry = 1; retry >= 0; retry --) {
-        try {
-          return delegateReader.scanDelegate(from, fromInclusive, to, toInclusive);
-        } catch (IOException e) {
-          err = e;
-          handleReadIOError(delegateReader, e, retry > 0);
-          // Current reader may have got closed in error handling. Get the new
-          // one for retry attempt
-          try {
-            delegateReader = (HFileReader) HFileSortedOplog.this.getReader(); 
-          } catch (IOException ex) {
-            handleReadIOError(null, e, false);
-          }
-        }
-      }
-      if (logger.isDebugEnabled())
-        logger.debug("Throwing err from scan delegate ", err);
-      throw err;
-    }
-
-    private HoplogIterator<byte[], byte[]> scanDelegate(byte[] from, boolean fromInclusive, byte[] to,
-        boolean toInclusive) throws IOException {
-      return new HFileSortedIterator(reader.getScanner(true, false), from,
-          fromInclusive, to, toInclusive);
-    }
-    
-    @Override
-    public HoplogIterator<byte[], byte[]> scan(long offset, long length)
-        throws IOException {
-      /**
-       * Identifies the first and last key to be scanned based on offset and
-       * length. It loads hfile block index and identifies the first hfile block
-       * starting after offset. The key of that block is from key for scanner.
-       * Similarly it locates first block starting beyond offset + length range.
-       * It uses key of that block as the to key for scanner
-       */
-
-      // load block indexes in memory
-      BlockIndexReader bir = reader.getDataBlockIndexReader();
-      int blockCount = bir.getRootBlockCount();
-      
-      byte[] fromKey = null, toKey = null;
-
-      // find from key
-      int i = 0;
-      for (; i < blockCount; i++) {
-        if (bir.getRootBlockOffset(i) < offset) {
-          // hfile block has offset less than this reader's split offset. check
-          // the next block
-          continue;
-        }
-
-        // found the first hfile block starting after offset
-        fromKey = bir.getRootBlockKey(i);
-        break;
-      }
-
-      if (fromKey == null) {
-        // seems no block starts after the offset. return no-op scanner
-        return new HFileSortedIterator(null, null, false, null, false);
-      }
-      
-      // find to key
-      for (; i < blockCount; i++) {
-        if (bir.getRootBlockOffset(i) < (offset + length)) {
-          // this hfile block lies within the offset+lenght range. check the
-          // next block for a higher offset
-          continue;
-        }
-
-        // found the first block starting beyong offset+length range.
-        toKey = bir.getRootBlockKey(i);
-        break;
-      }
-
-      // from key is included in scan and to key is excluded
-      HFileScanner scanner = reader.getScanner(true, false);
-      return new HFileSortedIterator(scanner, fromKey, true, toKey, false);
-    }
-    
-    @Override
-    public HoplogIterator<byte[], byte[]> scan() throws IOException {
-      return scan(null, null);
-    }
-
-    public HoplogIterator<byte[], byte[]> scan(byte[] from, byte[] to)
-        throws IOException {
-      return scan(from, true, to, false);
-    }
-
-    @Override
-    public BloomFilter getBloomFilter() throws IOException {
-      BloomFilter result = hoplogBloom;
-      if (result == null) {
-        synchronized (this) {
-          result = hoplogBloom;
-          if (result == null) {
-            hoplogBloom = result = new BloomFilterImpl();
-          }
-        }
-      }
-      return result;
-    }
-
-    @Override
-    public boolean isClosed() {
-      return closed.get();
-    }
-    
-    @Override
-    public void close() throws IOException {
-      close(true);
-    }
-    
-    public void close(boolean clearCache) throws IOException {
-      if (closed.compareAndSet(false, true)) {
-        if (logger.isDebugEnabled())
-          logger.debug("{}Closing reader", logPrefix);
-        reader.close(clearCache);
-      }
-    }
-
-    @Override
-    public long getEntryCount() {
-      return reader.getEntries();
-    }
-
-    public ICardinality getCardinalityEstimator() {
-      return estimator;
-    }
-
-    @Override
-    public long sizeEstimate() {
-      return getCardinalityEstimator().cardinality();
-    }
-
-    private void validate() throws IOException {
-      // check magic
-      byte[] magic = fileInfo.get(Meta.GEMFIRE_MAGIC.toBytes());
-      if (!Arrays.equals(magic, MAGIC)) {
-        throw new IOException(LocalizedStrings.Soplog_INVALID_MAGIC.toLocalizedString(Hex.toHex(magic)));
-      }
-      
-      // check version compatibility
-      byte[] ver = fileInfo.get(Meta.SORTED_OPLOG_VERSION.toBytes());
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Hoplog version is " + Hex.toHex(ver), logPrefix);
-      }
-      
-      if (!Arrays.equals(ver, HoplogVersion.V1.toBytes())) {
-        throw new IOException(LocalizedStrings.Soplog_UNRECOGNIZED_VERSION.toLocalizedString(Hex.toHex(ver)));
-      }
-    }
-    
-    private void loadComparators(DelegatingSerializedComparator comparator) throws IOException {
-      byte[] raw = fileInfo.get(Meta.COMPARATORS.toBytes());
-      assert raw != null;
-
-      DataInput in = new DataInputStream(new ByteArrayInputStream(raw));
-      comparator.setComparators(readComparators(in));
-    }
-    
-    private SerializedComparator[] readComparators(DataInput in) throws IOException {
-      try {
-        SerializedComparator[] comps = new SerializedComparator[in.readInt()];
-        assert comps.length > 0;
-        
-        for (int i = 0; i < comps.length; i++) {
-          comps[i] = (SerializedComparator) Class.forName(in.readUTF()).newInstance();
-          if (comps[i] instanceof DelegatingSerializedComparator) {
-            ((DelegatingSerializedComparator) comps[i]).setComparators(readComparators(in));
-          }
-        }
-        return comps;
-        
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
-    
-    class BloomFilterImpl implements BloomFilter {
-      private final org.apache.hadoop.hbase.util.BloomFilter hfileBloom;
-
-      public BloomFilterImpl() throws IOException {
-        DataInput bin = reader.getGeneralBloomFilterMetadata();
-        // instantiate bloom filter if meta present in hfile
-        if (bin != null) {
-          hfileBloom = BloomFilterFactory.createFromMeta(bin, reader);
-          if (reader.getComparator() instanceof DelegatingSerializedComparator) {
-            loadComparators((DelegatingSerializedComparator) hfileBloom.getComparator());
-          }
-        } else {
-          hfileBloom = null;
-        }
-      }
-
-      @Override
-      public boolean mightContain(byte[] key) {
-        assert key != null;
-        return mightContain(key, 0, key.length);
-      }
-
-      @Override
-      public boolean mightContain(byte[] key, int keyOffset, int keyLength) {
-        assert key != null;
-        long start = stats.getBloom().begin();
-        boolean found = hfileBloom == null ? true : hfileBloom.contains(key, keyOffset, keyLength, null);
-        stats.getBloom().end(start);
-        return found;
-      }
-
-      @Override
-      public long getBloomSize() {
-        return hfileBloom == null ? 0 : hfileBloom.getByteSize();
-      }
-    }
-
-    // TODO change the KV types to ByteBuffer instead of byte[]
-    public final class HFileSortedIterator implements HoplogIterator<byte[], byte[]> {
-      private final HFileScanner scan;
-      
-      private final byte[] from;
-      private final boolean fromInclusive;
-      
-      private final byte[] to;
-      private final boolean toInclusive;
-      
-      private ByteBuffer prefetchedKey;
-      private ByteBuffer prefetchedValue;
-      private ByteBuffer currentKey;
-      private ByteBuffer currentValue;
-      
-      // variable linked to scan stats
-      ScanOperation scanStat;
-      private long scanStart;
-      
-      public HFileSortedIterator(HFileScanner scan, byte[] from, boolean fromInclusive, byte[] to, 
-          boolean toInclusive) throws IOException {
-        this.scan = scan;
-        this.from = from;
-        this.fromInclusive = fromInclusive;
-        this.to = to;
-        this.toInclusive = toInclusive;
-
-        scanStat = (stats == null) ? new SortedOplogStatistics("", "").new ScanOperation(
-            0, 0, 0, 0, 0, 0, 0) : stats.getScan();
-        scanStart = scanStat.begin();
-
-        if (scan == null) {
-          return;
-        }
-
-        assert from == null || to == null
-            || scan.getReader().getComparator().compare(from, to) <= 0;
-
-        initIterator();
-      }
-      
-      /*
-       * prefetches first key and value from the file for hasnext to work
-       */
-      private void initIterator() throws IOException {
-        long startNext = scanStat.beginIteration();
-        boolean scanSuccessful = true;
-        if (from == null) {
-          scanSuccessful = scan.seekTo();
-        } else {
-          int compare = scan.seekTo(from);
-          if (compare == 0 && !fromInclusive || compare > 0) {
-            // as from in exclusive and first key is same as from, skip the first key
-            scanSuccessful = scan.next();
-          }
-        }
-        
-        populateKV(startNext, scanSuccessful);
-      }
-      
-      @Override
-      public boolean hasNext() {
-        return prefetchedKey != null;
-      }
-
-      @Override
-      public byte[] next() throws IOException {
-        return byteBufferToArray(nextBB());
-      }
-
-      public ByteBuffer nextBB() throws IOException {
-        long startNext = scanStat.beginIteration();
-        if (prefetchedKey == null) {
-          throw new NoSuchElementException();
-        }
-
-        currentKey = prefetchedKey;
-        currentValue = prefetchedValue;
-
-        prefetchedKey = null;
-        prefetchedValue = null;
-
-        if (scan.next()) {
-          populateKV(startNext, true);
-        }
-        
-        return currentKey;
-      }
-
-      
-      private void populateKV(long nextStartTime, boolean scanSuccessful) {
-        if (!scanSuccessful) {
-          //end of file reached. collect stats and return
-          scanStat.endIteration(0, nextStartTime);
-          return;
-        }
-        
-        prefetchedKey = scan.getKey();
-        prefetchedValue = scan.getValue();
-        
-        if (to != null) {
-          // TODO Optimization? Perform int comparison instead of byte[]. Identify
-          // offset of key greater than two.
-          int compare = -1;
-          compare = scan.getReader().getComparator().compare
-              (prefetchedKey.array(), prefetchedKey.arrayOffset(), prefetchedKey.remaining(), to, 0, to.length);
-          if (compare > 0 || (compare == 0 && !toInclusive)) {
-            prefetchedKey = null;
-            prefetchedValue = null;
-            return;
-          }
-        }
-        
-        // account for bytes read and time spent
-        int byteCount = prefetchedKey.remaining() + prefetchedValue.remaining();
-        scanStat.endIteration(byteCount, nextStartTime);
-      }
-      
-
-      @Override
-      public byte[] getKey() {
-        return byteBufferToArray(getKeyBB());
-      }
-      public ByteBuffer getKeyBB() {
-        return currentKey;
-      }
-
-      @Override
-      public byte[] getValue() {
-        return byteBufferToArray(getValueBB());
-      }
-      public ByteBuffer getValueBB() {
-        return currentValue;
-      }
-
-      @Override
-      public void remove() {
-        throw new UnsupportedOperationException("Cannot delete a key-value from a hfile sorted oplog");
-      }
-      
-      @Override
-      public void close() {
-        scanStat.end(scanStart);
-      }
-    }
-  }
-  
-  public static byte[] byteBufferToArray(ByteBuffer bb) {
-    if (bb == null) {
-      return null;
-    }
-    
-    byte[] tmp = new byte[bb.remaining()];
-    bb.duplicate().get(tmp);
-    return tmp;
-  }
-}


[31/63] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17-2

Posted by kl...@apache.org.
Merge branch 'develop' into feature/GEODE-17-2


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/165c9bc9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/165c9bc9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/165c9bc9

Branch: refs/heads/feature/GEODE-1276
Commit: 165c9bc9ee262fd1edeaf4038480f042a644a787
Parents: c235ef8 7e2ca6c
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Apr 28 07:51:27 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Thu Apr 28 07:51:27 2016 -0700

----------------------------------------------------------------------
 .../com/gemstone/gemfire/cache/query/Utils.java |   38 +
 .../dunit/QueryDataInconsistencyDUnitTest.java  |    2 -
 .../QueryUsingFunctionContextDUnitTest.java     |    8 +-
 .../QueryREUpdateInProgressJUnitTest.java       |   12 +-
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |   27 +-
 ...ndexOperationsOnOverflowRegionDUnitTest.java |   97 +-
 ...pdateWithInplaceObjectModFalseDUnitTest.java |   46 +-
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |   48 +-
 ...itializeIndexEntryDestroyQueryDUnitTest.java |   96 +-
 .../PRBasicIndexCreationDUnitTest.java          |  302 ++---
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |   42 +-
 .../PRBasicMultiIndexCreationDUnitTest.java     |  267 ++---
 .../partitioned/PRBasicQueryDUnitTest.java      |   36 +-
 .../PRBasicRemoveIndexDUnitTest.java            |   37 +-
 .../PRColocatedEquiJoinDUnitTest.java           |  106 +-
 .../partitioned/PRInvalidQueryDUnitTest.java    |   26 +-
 .../partitioned/PRQueryCacheCloseDUnitTest.java |   81 +-
 .../query/partitioned/PRQueryDUnitHelper.java   |  818 ++-----------
 .../query/partitioned/PRQueryDUnitTest.java     |   87 +-
 .../query/partitioned/PRQueryPerfDUnitTest.java |  504 --------
 .../PRQueryRegionCloseDUnitTest.java            |   28 +-
 .../PRQueryRegionDestroyedDUnitTest.java        |   36 +-
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |   48 +-
 .../cache30/ClientMembershipDUnitTest.java      |  827 +++++++------
 ...hreadPoolExecutorWithKeepAliveJUnitTest.java |   10 +-
 .../cache/wan/AsyncEventQueueTestBase.java      |   12 -
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 1112 +++++++++---------
 .../AsyncEventQueueStatsDUnitTest.java          |  186 +--
 .../ConcurrentAsyncEventQueueDUnitTest.java     |  168 +--
 .../CommonParallelAsyncEventQueueDUnitTest.java |    8 +-
 .../gemfire/management/QueryDataDUnitTest.java  |    6 +-
 .../gemfire/internal/cache/wan/WANTestBase.java |   17 +
 .../wan/misc/NewWanAuthenticationDUnitTest.java |  309 +++++
 .../serial/SerialWANPropogationDUnitTest.java   |    4 -
 34 files changed, 2282 insertions(+), 3169 deletions(-)
----------------------------------------------------------------------



[49/63] [abbrv] incubator-geode git commit: GEODE-510 added cache-server port to 0 and added wait for client queue to pause

Posted by kl...@apache.org.
GEODE-510 added cache-server port to 0 and added wait for client queue to pause


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/15b1e70e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/15b1e70e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/15b1e70e

Branch: refs/heads/feature/GEODE-1276
Commit: 15b1e70e7de5c8cb27508fc0e7d9973722013170
Parents: 29fde0d
Author: Hitesh Khamesra <hi...@yahoo.com>
Authored: Fri Apr 29 15:57:50 2016 -0700
Committer: Hitesh Khamesra <hi...@yahoo.com>
Committed: Fri Apr 29 15:57:50 2016 -0700

----------------------------------------------------------------------
 .../internal/cache/ha/Bug48571DUnitTest.java    | 34 ++++++++++++++++++--
 1 file changed, 31 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/15b1e70e/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
index a81d9c1..02f7014 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache.ha;
 
+import java.util.Collection;
+import java.util.Iterator;
 import java.util.Properties;
 
 import org.junit.experimental.categories.Category;
@@ -90,7 +92,32 @@ public class Bug48571DUnitTest extends DistributedTestCase {
     }
   }
 
-  @Category(FlakyTest.class) // GEODE-510: random ports, async actions, AsyncInvocation orphan
+  private static void verifyProxyHasBeenPaused() {
+    WaitCriterion criterion = new WaitCriterion() {
+      @Override
+      public boolean done() {
+        CacheClientNotifier ccn = CacheClientNotifier.getInstance();
+        Collection<CacheClientProxy> ccProxies = ccn.getClientProxies();
+
+        Iterator<CacheClientProxy> itr = ccProxies.iterator();
+
+        while (itr.hasNext()) {
+          CacheClientProxy ccp = itr.next();
+          System.out.println("proxy status " + ccp.getState());
+          if (ccp.isPaused())
+            return true;
+        }
+        return false;
+      }
+      @Override
+      public String description() {
+        // TODO Auto-generated method stub
+        return "Proxy has not paused yet";
+      }
+    };
+    Wait.waitForCriterion(criterion, 15 * 1000, 200, true);
+  }
+  
   public void testStatsMatchWithSize() throws Exception {
     IgnoredException.addIgnoredException("Unexpected IOException||Connection reset");
     // start a server
@@ -101,6 +128,8 @@ public class Bug48571DUnitTest extends DistributedTestCase {
     server.invoke(() -> Bug48571DUnitTest.doPuts());
     // close durable client
     client.invoke(() -> Bug48571DUnitTest.closeClientCache());
+    
+    server.invoke("verifyProxyHasBeenPaused", () -> verifyProxyHasBeenPaused() );
     // resume puts on server, add another 100.
     server.invokeAsync(() -> Bug48571DUnitTest.resumePuts()); // TODO: join or await result
     // start durable client
@@ -131,9 +160,8 @@ public class Bug48571DUnitTest extends DistributedTestCase {
     rf.setConcurrencyChecksEnabled(false);
     rf.create(region);
 
-    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     CacheServer server1 = cache.addCacheServer();
-    server1.setPort(port);
+    server1.setPort(0);
     server1.start();
     return server1.getPort();
   }


[32/63] [abbrv] incubator-geode git commit: GEODE-1176 Added some more ops to trigger client to fetch PR meta data.

Posted by kl...@apache.org.
GEODE-1176 Added some more ops to trigger client to fetch PR meta data.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a254c428
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a254c428
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a254c428

Branch: refs/heads/feature/GEODE-1276
Commit: a254c428cd9a805384bbf39d7bde64649cf56624
Parents: 7e2ca6c
Author: Hitesh Khamesra <hi...@yahoo.com>
Authored: Thu Apr 28 10:43:46 2016 -0700
Committer: Hitesh Khamesra <hi...@yahoo.com>
Committed: Thu Apr 28 10:44:48 2016 -0700

----------------------------------------------------------------------
 .../internal/cache/FixedPRSinglehopDUnitTest.java   | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a254c428/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
index 61ea97a..8f3fc2d 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
@@ -635,6 +635,22 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
     region.put(q3dateSep1, "update1010");
     region.put(q4dateDec1, "update1111");
     
+    region.put(q1dateJan1, "update000");
+    region.put(q1dateFeb1, "update444");
+    region.put(q1dateMar1, "update888");
+    region.put(q2dateApr1, "update111");
+    region.put(q2dateMay1, "update555");
+    region.put(q2dateJun1, "update999");
+    region.put(q1dateJan1, "update0000");
+    region.put(q3dateJuly1, "update222");
+    region.put(q3dateAug1, "update666");
+    region.put(q3dateSep1, "update101010");
+    region.put(q1dateJan1, "update00000");
+    region.put(q4dateOct1, "update333");              
+    region.put(q4dateNov1, "update777");          
+    region.put(q4dateDec1, "update111111");
+    region.put(q1dateJan1, "update000000");
+    
   }
 
   public static void putIntoPartitionedRegionsThreeQs() {


[16/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
GEODE-1072: Removing HDFS related code

Removing all HDFS and EvictionCriteria created code. This code will be
reinstated on a branch to be cleaned up and merged as a separate module.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/46535f28
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/46535f28
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/46535f28

Branch: refs/heads/feature/GEODE-1276
Commit: 46535f28e4740ed9b6da87bbb27c39d0c13b3da4
Parents: 32c9df6
Author: Dan Smith <up...@apache.org>
Authored: Fri Apr 15 14:38:06 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 11:24:34 2016 -0700

----------------------------------------------------------------------
 geode-core/build.gradle                         |   22 -
 .../gemfire/cache/AttributesFactory.java        |   58 -
 .../gemfire/cache/AttributesMutator.java        |   14 -
 .../gemfire/cache/CustomEvictionAttributes.java |   78 -
 .../com/gemstone/gemfire/cache/DataPolicy.java  |   11 -
 .../gemfire/cache/EvictionCriteria.java         |   57 -
 .../com/gemstone/gemfire/cache/Operation.java   |   13 -
 .../gemfire/cache/RegionAttributes.java         |   23 -
 .../gemstone/gemfire/cache/RegionFactory.java   |   24 -
 .../internal/AsyncEventQueueFactoryImpl.java    |    5 -
 .../gemfire/cache/hdfs/HDFSIOException.java     |   52 -
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  |  341 --
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |  203 -
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    |  196 -
 .../cache/hdfs/StoreExistsException.java        |   32 -
 .../cache/hdfs/internal/FailureTracker.java     |   96 -
 .../cache/hdfs/internal/FlushObserver.java      |   53 -
 .../hdfs/internal/HDFSBucketRegionQueue.java    | 1232 ------
 .../cache/hdfs/internal/HDFSEntriesSet.java     |  329 --
 .../cache/hdfs/internal/HDFSEventListener.java  |  179 -
 .../hdfs/internal/HDFSEventQueueFilter.java     |   73 -
 .../hdfs/internal/HDFSGatewayEventImpl.java     |  180 -
 .../hdfs/internal/HDFSIntegrationUtil.java      |  117 -
 .../HDFSParallelGatewaySenderQueue.java         |  471 ---
 .../hdfs/internal/HDFSStoreConfigHolder.java    |  559 ---
 .../cache/hdfs/internal/HDFSStoreCreation.java  |  198 -
 .../hdfs/internal/HDFSStoreFactoryImpl.java     |   77 -
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  638 ---
 .../hdfs/internal/HDFSStoreMutatorImpl.java     |  200 -
 .../HDFSWriteOnlyStoreEventListener.java        |  184 -
 .../hdfs/internal/HoplogListenerForRegion.java  |   72 -
 .../cache/hdfs/internal/PersistedEventImpl.java |  202 -
 .../hdfs/internal/QueuedPersistentEvent.java    |   27 -
 .../hdfs/internal/SignalledFlushObserver.java   |  122 -
 .../internal/SortedHDFSQueuePersistedEvent.java |   86 -
 .../internal/SortedHoplogPersistedEvent.java    |  114 -
 .../UnsortedHDFSQueuePersistedEvent.java        |   76 -
 .../internal/UnsortedHoplogPersistedEvent.java  |   92 -
 .../hdfs/internal/hoplog/AbstractHoplog.java    |  357 --
 .../hoplog/AbstractHoplogOrganizer.java         |  430 --
 .../cache/hdfs/internal/hoplog/BloomFilter.java |   36 -
 .../hoplog/CloseTmpHoplogsTimerTask.java        |  108 -
 .../hdfs/internal/hoplog/CompactionStatus.java  |   72 -
 .../cache/hdfs/internal/hoplog/FlushStatus.java |   72 -
 .../internal/hoplog/HDFSCompactionManager.java  |  330 --
 .../internal/hoplog/HDFSFlushQueueArgs.java     |   93 -
 .../internal/hoplog/HDFSFlushQueueFunction.java |  287 --
 .../hoplog/HDFSForceCompactionArgs.java         |  107 -
 .../hoplog/HDFSForceCompactionFunction.java     |  129 -
 .../HDFSForceCompactionResultCollector.java     |  131 -
 .../hoplog/HDFSLastCompactionTimeFunction.java  |   56 -
 .../internal/hoplog/HDFSRegionDirector.java     |  480 ---
 .../hdfs/internal/hoplog/HDFSStoreDirector.java |   78 -
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |  447 ---
 .../hdfs/internal/hoplog/HFileSortedOplog.java  |  853 ----
 .../hoplog/HdfsSortedOplogOrganizer.java        | 2004 ----------
 .../cache/hdfs/internal/hoplog/Hoplog.java      |  263 --
 .../hdfs/internal/hoplog/HoplogConfig.java      |   74 -
 .../hdfs/internal/hoplog/HoplogListener.java    |   47 -
 .../hdfs/internal/hoplog/HoplogOrganizer.java   |  123 -
 .../hdfs/internal/hoplog/HoplogSetIterator.java |  166 -
 .../hdfs/internal/hoplog/HoplogSetReader.java   |  114 -
 .../internal/hoplog/SequenceFileHoplog.java     |  395 --
 .../hoplog/mapred/AbstractGFRecordReader.java   |  106 -
 .../internal/hoplog/mapred/GFInputFormat.java   |   95 -
 .../internal/hoplog/mapred/GFOutputFormat.java  |   75 -
 .../mapreduce/AbstractGFRecordReader.java       |  140 -
 .../hoplog/mapreduce/GFInputFormat.java         |  124 -
 .../hdfs/internal/hoplog/mapreduce/GFKey.java   |   72 -
 .../hoplog/mapreduce/GFOutputFormat.java        |  198 -
 .../hoplog/mapreduce/HDFSSplitIterator.java     |  197 -
 .../internal/hoplog/mapreduce/HoplogUtil.java   |  463 ---
 .../hoplog/mapreduce/RWSplitIterator.java       |   48 -
 .../hoplog/mapreduce/StreamSplitIterator.java   |   46 -
 .../org/apache/hadoop/io/SequenceFile.java      | 3726 ------------------
 .../gemfire/cache/wan/GatewaySender.java        |    2 -
 .../gemstone/gemfire/internal/DSFIDFactory.java |    3 -
 .../internal/DataSerializableFixedID.java       |    1 -
 .../admin/remote/RemoteRegionAttributes.java    |   25 -
 .../cache/AbstractBucketRegionQueue.java        |   18 +-
 .../gemfire/internal/cache/AbstractRegion.java  |  147 -
 .../internal/cache/AbstractRegionEntry.java     |   30 +-
 .../internal/cache/AbstractRegionMap.java       |   86 +-
 .../gemfire/internal/cache/BucketAdvisor.java   |    1 -
 .../gemfire/internal/cache/BucketRegion.java    |  209 +-
 .../internal/cache/BucketRegionQueue.java       |    2 +-
 .../cache/CacheDistributionAdvisor.java         |   22 +-
 .../gemfire/internal/cache/CachePerfStats.java  |   75 -
 .../internal/cache/ColocationHelper.java        |    3 -
 .../cache/CustomEvictionAttributesImpl.java     |   35 -
 .../gemfire/internal/cache/DistTXState.java     |    2 +-
 .../cache/DistributedCacheOperation.java        |    3 -
 .../cache/DistributedPutAllOperation.java       |   20 +-
 .../internal/cache/DistributedRegion.java       |   31 +-
 .../gemfire/internal/cache/EntryEventImpl.java  |   31 -
 .../gemfire/internal/cache/EvictorService.java  |  284 --
 .../internal/cache/GemFireCacheImpl.java        |   99 -
 .../gemfire/internal/cache/HARegion.java        |   15 +-
 .../internal/cache/HDFSLRURegionMap.java        |  111 -
 .../gemfire/internal/cache/HDFSRegionMap.java   |   32 -
 .../internal/cache/HDFSRegionMapDelegate.java   |  540 ---
 .../internal/cache/HDFSRegionMapImpl.java       |   74 -
 .../gemfire/internal/cache/InternalCache.java   |    4 -
 .../internal/cache/InternalDataView.java        |   28 +-
 .../internal/cache/InternalRegionArguments.java |   16 -
 .../gemfire/internal/cache/LocalRegion.java     |  226 +-
 .../internal/cache/LocalRegionDataView.java     |   35 +-
 .../internal/cache/NonLocalRegionEntry.java     |   20 -
 .../gemstone/gemfire/internal/cache/Oplog.java  |   13 -
 .../internal/cache/PartitionedRegion.java       |  482 +--
 .../cache/PartitionedRegionDataStore.java       |   49 +-
 .../cache/PartitionedRegionDataView.java        |   27 +-
 .../gemfire/internal/cache/ProxyRegionMap.java  |   21 -
 .../gemfire/internal/cache/RegionEntry.java     |   20 -
 .../internal/cache/RegionMapFactory.java        |    6 -
 .../internal/cache/RemoteGetMessage.java        |    2 +-
 .../gemfire/internal/cache/TXEntry.java         |    3 +-
 .../gemfire/internal/cache/TXState.java         |   38 +-
 .../internal/cache/TXStateInterface.java        |   10 +-
 .../internal/cache/TXStateProxyImpl.java        |   30 +-
 .../gemfire/internal/cache/TXStateStub.java     |   32 +-
 .../cache/UserSpecifiedRegionAttributes.java    |   24 +-
 .../internal/cache/ValidatingDiskRegion.java    |   13 -
 .../partitioned/FetchBulkEntriesMessage.java    |    2 +-
 .../internal/cache/partitioned/GetMessage.java  |   22 +-
 .../cache/partitioned/PutAllPRMessage.java      |   16 +-
 .../internal/cache/partitioned/PutMessage.java  |    9 -
 .../persistence/soplog/ByteComparator.java      |   55 -
 .../persistence/soplog/CursorIterator.java      |   81 -
 .../soplog/DelegatingSerializedComparator.java  |   37 -
 .../soplog/HFileStoreStatistics.java            |  205 -
 .../persistence/soplog/KeyValueIterator.java    |   42 -
 .../soplog/SortedOplogStatistics.java           |  505 ---
 .../cache/persistence/soplog/SortedReader.java  |  255 --
 .../persistence/soplog/TrackedReference.java    |  153 -
 .../cache/tier/sockets/BaseCommand.java         |    8 +-
 .../cache/tier/sockets/command/Get70.java       |    3 +-
 .../cache/tier/sockets/command/Request.java     |    2 +-
 .../internal/cache/tx/ClientTXRegionStub.java   |    4 +-
 .../cache/tx/DistributedTXRegionStub.java       |   14 +-
 .../cache/tx/PartitionedTXRegionStub.java       |    8 +-
 .../gemfire/internal/cache/tx/TXRegionStub.java |    4 +-
 .../cache/wan/AbstractGatewaySender.java        |   22 +-
 .../cache/wan/GatewaySenderAttributes.java      |    5 -
 ...rentParallelGatewaySenderEventProcessor.java |    3 -
 .../ConcurrentParallelGatewaySenderQueue.java   |   12 -
 .../ParallelGatewaySenderEventProcessor.java    |   22 +-
 .../parallel/ParallelGatewaySenderQueue.java    |   20 +-
 .../cache/xmlcache/AsyncEventQueueCreation.java |    9 -
 .../internal/cache/xmlcache/CacheCreation.java  |   39 +-
 .../internal/cache/xmlcache/CacheXml.java       |   31 -
 .../internal/cache/xmlcache/CacheXmlParser.java |  170 -
 .../xmlcache/RegionAttributesCreation.java      |   55 +-
 .../gemfire/internal/i18n/LocalizedStrings.java |   30 -
 .../management/DistributedRegionMXBean.java     |   11 -
 .../management/DistributedSystemMXBean.java     |    8 -
 .../gemfire/management/MemberMXBean.java        |    7 -
 .../gemfire/management/RegionMXBean.java        |   10 -
 .../internal/beans/DistributedRegionBridge.java |    5 -
 .../internal/beans/DistributedRegionMBean.java  |    5 -
 .../internal/beans/DistributedSystemBridge.java |   19 -
 .../internal/beans/DistributedSystemMBean.java  |    7 -
 .../internal/beans/HDFSRegionBridge.java        |  173 -
 .../management/internal/beans/MemberMBean.java  |    5 -
 .../internal/beans/MemberMBeanBridge.java       |   27 -
 .../internal/beans/PartitionedRegionBridge.java |   13 +-
 .../management/internal/beans/RegionMBean.java  |    5 -
 .../internal/beans/RegionMBeanBridge.java       |    5 -
 .../beans/stats/RegionClusterStatsMonitor.java  |    7 -
 .../cli/domain/RegionAttributesInfo.java        |   21 +-
 .../functions/DescribeHDFSStoreFunction.java    |   86 -
 .../cli/util/HDFSStoreNotFoundException.java    |   47 -
 .../cli/util/RegionAttributesNames.java         |    4 +-
 .../support/MemberMXBeanAdapter.java            |    5 -
 .../geode.apache.org/schema/cache/cache-1.0.xsd |   31 -
 .../SignalledFlushObserverJUnitTest.java        |   97 -
 .../SortedListForAsyncQueueJUnitTest.java       |  564 ---
 .../gemfire/cache30/Bug38741DUnitTest.java      |    2 +-
 .../ParallelGatewaySenderQueueJUnitTest.java    |    2 +-
 .../domain/CacheElementJUnitTest.java           |    1 -
 .../internal/JUnit4DistributedTestCase.java     |    3 -
 .../sanctionedDataSerializables.txt             |   92 +-
 .../codeAnalysis/sanctionedSerializables.txt    |   27 +-
 geode-lucene/build.gradle                       |    4 -
 .../tools/pulse/internal/data/Cluster.java      |    9 -
 .../pulse/internal/data/PulseConstants.java     |    1 -
 .../internal/service/ClusterRegionService.java  |   11 -
 .../internal/service/ClusterRegionsService.java |   11 -
 .../service/ClusterSelectedRegionService.java   |    6 -
 .../scripts/pulsescript/PulseCallbacks.js       |    2 -
 .../webapp/scripts/pulsescript/clusterDetail.js |    7 +-
 .../controllers/PulseControllerJUnitTest.java   |    3 -
 .../gemfire/tools/pulse/tests/Region.java       |    9 +-
 geode-pulse/src/test/resources/test.properties  |    6 +-
 geode-rebalancer/build.gradle                   |    7 -
 .../cache/wan/GatewaySenderFactoryImpl.java     |    4 -
 .../internal/cache/UpdateVersionDUnitTest.java  |    6 +-
 197 files changed, 427 insertions(+), 24839 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/build.gradle
----------------------------------------------------------------------
diff --git a/geode-core/build.gradle b/geode-core/build.gradle
index 2206018..fedd63e 100755
--- a/geode-core/build.gradle
+++ b/geode-core/build.gradle
@@ -62,27 +62,6 @@ dependencies {
     ext.optional = true;
   }
   compile ('net.java.dev.jna:jna:' + project.'jna.version')
-  provided ('org.apache.hadoop:hadoop-common:' + project.'hadoop.version') {
-    transitive=false
-  }
-  provided ('org.apache.hadoop:hadoop-annotations:' + project.'hadoop.version') {
-    transitive=false
-  }
-  provided ('org.apache.hadoop:hadoop-hdfs:' + project.'hadoop.version') {
-    transitive=false
-  }
-  provided ('org.apache.hadoop:hadoop-mapreduce-client-core:' + project.'hadoop.version') {
-    transitive=false
-  }
-  provided ('org.apache.hbase:hbase:' + project.'hbase.version') {
-    transitive=false
-  }
-
-  compile ('com.google.guava:guava:' + project.'guava.version') {
-    ext.optional = true
-  }
-  //jsr305 is included only to prevent javadoc warnings about missing annotations in the guava jar
-  provided 'com.google.code.findbugs:jsr305:' + project.'jsr305.version'
 
   compile 'org.apache.logging.log4j:log4j-api:' + project.'log4j.version'
   compile 'org.apache.logging.log4j:log4j-core:' + project.'log4j.version'
@@ -131,7 +110,6 @@ dependencies {
   // External
   testCompile 'org.apache.bcel:bcel:' + project.'bcel.version'
   testRuntime 'org.apache.derby:derby:' + project.'derby.version'
-  testRuntime 'org.apache.hadoop:hadoop-auth:' + project.'hadoop.version'
   testCompile 'org.mockito:mockito-core:' + project.'mockito-core.version'
   testRuntime 'commons-collections:commons-collections:' + project.'commons-collections.version'
   testRuntime 'commons-configuration:commons-configuration:' + project.'commons-configuration.version'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
index 201c1aa..34eafb9 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
@@ -31,7 +31,6 @@ import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.compression.Compressor;
 import com.gemstone.gemfire.internal.cache.AbstractRegion;
-import com.gemstone.gemfire.internal.cache.CustomEvictionAttributesImpl;
 import com.gemstone.gemfire.internal.cache.DiskStoreFactoryImpl;
 import com.gemstone.gemfire.internal.cache.DiskWriteAttributesImpl;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
@@ -448,8 +447,6 @@ public class AttributesFactory<K,V> {
         .getPartitionAttributes();
     this.regionAttributes.evictionAttributes = (EvictionAttributesImpl)regionAttributes
         .getEvictionAttributes();
-    this.regionAttributes.customEvictionAttributes = regionAttributes
-        .getCustomEvictionAttributes();
 
     this.regionAttributes.membershipAttributes = regionAttributes.getMembershipAttributes();
     this.regionAttributes.subscriptionAttributes = regionAttributes.getSubscriptionAttributes();
@@ -723,32 +720,6 @@ public class AttributesFactory<K,V> {
      this.regionAttributes.setHasEvictionAttributes(true);
    }
 
-  /**
-   * Set custom {@link EvictionCriteria} for the region with start time and
-   * frequency of evictor task to be run in milliseconds, or evict incoming rows
-   * in case both start and frequency are specified as zero.
-   * 
-   * @param criteria
-   *          an {@link EvictionCriteria} to be used for eviction for HDFS
-   *          persistent regions
-   * @param start
-   *          the start time at which periodic evictor task should be first
-   *          fired to apply the provided {@link EvictionCriteria}; if this is
-   *          zero then current time is used for the first invocation of evictor
-   * @param interval
-   *          the periodic frequency at which to run the evictor task after the
-   *          initial start; if this is if both start and frequency are zero
-   *          then {@link EvictionCriteria} is applied on incoming insert/update
-   *          to determine whether it is to be retained
-   */
-  public void setCustomEvictionAttributes(EvictionCriteria<K, V> criteria,
-      long start, long interval) {
-    this.regionAttributes.customEvictionAttributes =
-        new CustomEvictionAttributesImpl(criteria, start, interval,
-            start == 0 && interval == 0);
-    this.regionAttributes.setHasCustomEviction(true);
-  }
-
    /** Sets the mirror type for the next <code>RegionAttributes</code> created.
    * @param mirrorType The type of mirroring to use for the region
    * @throws IllegalArgumentException if mirrorType is null
@@ -1465,12 +1436,6 @@ public class AttributesFactory<K,V> {
       }
     }
     
-    if (attrs.getHDFSStoreName() != null) {
-      if (!attrs.getDataPolicy().withHDFS() && (attrs.getPartitionAttributes() == null || attrs.getPartitionAttributes().getLocalMaxMemory() != 0)) {
-        throw new IllegalStateException(LocalizedStrings.HDFSSTORE_IS_USED_IN_NONHDFS_REGION.toLocalizedString());        
-      }
-    }
-
     if (!attrs.getStatisticsEnabled() &&
           (attrs.getRegionTimeToLive().getTimeout() != 0 ||
            attrs.getRegionIdleTimeout().getTimeout() != 0 ||
@@ -1633,11 +1598,8 @@ public class AttributesFactory<K,V> {
     SubscriptionAttributes subscriptionAttributes = new SubscriptionAttributes();
     boolean multicastEnabled = false;
     EvictionAttributesImpl evictionAttributes = new EvictionAttributesImpl();  // TODO need to determine the constructor
-    transient CustomEvictionAttributes customEvictionAttributes;
     String poolName = null;
     String diskStoreName = null;
-    String hdfsStoreName = null;
-    private boolean hdfsWriteOnly = false;
     boolean diskSynchronous = DEFAULT_DISK_SYNCHRONOUS;
     protected boolean isBucketRegion = false;
     private boolean isCloningEnabled = false;
@@ -1696,8 +1658,6 @@ public class AttributesFactory<K,V> {
       } else {
         buf.append("; diskStoreName=").append(diskStoreName);
       }
-      buf.append("; hdfsStoreName=").append(hdfsStoreName);
-      buf.append("; hdfsWriteOnly=").append(hdfsWriteOnly);
       buf.append("; GatewaySenderIds=").append(gatewaySenderIds);
       buf.append("; AsyncEventQueueIds=").append(asyncEventQueueIds);
       buf.append("; compressor=").append(compressor == null ? null : compressor.getClass().getName());
@@ -1972,14 +1932,6 @@ public class AttributesFactory<K,V> {
     }
 
     /**
-     * {@inheritDoc}
-     */
-    @Override
-    public CustomEvictionAttributes getCustomEvictionAttributes() {
-      return this.customEvictionAttributes;
-    }
-
-    /**
      * @deprecated this API is scheduled to be removed
      */
     public MembershipAttributes getMembershipAttributes() {
@@ -2037,16 +1989,6 @@ public class AttributesFactory<K,V> {
     }
 
     @Override
-    public String getHDFSStoreName() {
-      return hdfsStoreName;
-    }
-    
-    @Override
-    public boolean getHDFSWriteOnly() {
-      return hdfsWriteOnly;
-    }
-
-    @Override
     public Compressor getCompressor() {
       return this.compressor;
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
index eb46433..0a69437 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
@@ -166,20 +166,6 @@ public interface AttributesMutator<K,V> {
   public EvictionAttributesMutator getEvictionAttributesMutator();
 
   /**
-   * Changes the evictor frequency for custom eviction attributes.
-   * 
-   * @param newStart
-   *          the new start time in millis since epoch for the evictor task
-   * 
-   * @param newInterval
-   *          the new interval between evictor task invocations in millis
-   * 
-   * @return the updated {@link CustomEvictionAttributes}
-   */
-  public CustomEvictionAttributes setCustomEvictionAttributes(long newStart,
-      long newInterval);
-
-  /**
    * Sets cloning on region
    * @param cloningEnable
    * @since 6.1

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java
deleted file mode 100644
index c2bc41b..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache;
-
-/**
- * Custom eviction attributes including {@link EvictionCriteria} and evictor
- * start time and frequency, if any.
- * 
- * @since gfxd 1.0
- */
-public abstract class CustomEvictionAttributes {
-
-  private final EvictionCriteria<?, ?> criteria;
-
-  private final long evictorStartTime;
-  private final long evictorInterval;
-
-  private final boolean evictIncoming;
-
-  protected CustomEvictionAttributes(EvictionCriteria<?, ?> criteria,
-      long startTime, long interval, boolean evictIncoming) {
-    this.criteria = criteria;
-    this.evictorStartTime = startTime;
-    this.evictorInterval = interval;
-    this.evictIncoming = evictIncoming;
-  }
-
-  /**
-   * Get the {@link EvictionCriteria} for this custom eviction. The criteria
-   * will be applied to the region entries either periodically as per
-   * {@link #getEvictorStartTime()} and {@link #getEvictorInterval()}, or on
-   * incoming puts if {@link #isEvictIncoming()} is true.
-   */
-  @SuppressWarnings({ "rawtypes", "unchecked" })
-  public <K, V> EvictionCriteria<K, V> getCriteria() {
-    return (EvictionCriteria)this.criteria;
-  }
-
-  /**
-   * The absolute start time in milliseconds (as returned by
-   * {@link System#currentTimeMillis()}) when the evictor will be first fired.
-   * Thereafter the evictor will be fired periodically every
-   * {@link #getEvictorInterval()} milliseconds.
-   */
-  public final long getEvictorStartTime() {
-    return this.evictorStartTime;
-  }
-
-  /**
-   * The intervals at which the periodic evictor task is fired and
-   * {@link EvictionCriteria} evaluated to evict entries.
-   */
-  public final long getEvictorInterval() {
-    return this.evictorInterval;
-  }
-
-  /**
-   * If this returns true, then the criteria should always be applied to
-   * incoming entries and never as a periodic task.
-   */
-  public final boolean isEvictIncoming() {
-    return this.evictIncoming;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
index 1e87c0f..80918d9 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
@@ -31,7 +31,6 @@ import java.io.*;
  * <li><code>PERSISTENT_PARTITION</code> in addition to <code>PARTITION</code> also causes data to be stored to disk. The region initialization uses the data stored on disk.
  * <li><code>REPLICATE</code> causes data that this region is interested in to be stored in local memory. A distributed region will be initialized with the data from other caches. On distributed region operations that would cause the contents to differ with other caches are not allowed. This policy is allowed on local scope region but it behaves the same as <code>NORMAL</code>.
  * <li><code>PERSISTENT_REPLICATE</code> in addition to <code>REPLICATE</code> also causes data to be stored to disk. The region initialization uses the data stored on disk. Note that the persistence applies to both local scope and distributed scope.
- * <li><code>HDFS_PARTITION</code> in addition to <code>PARTITION</code> also causes data to be stored to HDFS. The region initialization may use the data stored on HDFS. 
  * </ol>
  *
  *
@@ -245,16 +244,6 @@ public class DataPolicy implements java.io.Serializable {
     return this == PARTITION;
   }
   
-  /** Return whether this policy does persistence on HDFS.
-   * @return true if this policy does persistence on HDFS.
-   */
-  public boolean withHDFS() {
-//    return this == HDFS_PARTITION || this == HDFS_PERSISTENT_PARTITION;
-	  return false;
-  }
-  
-  
-  
   /** Returns a string representation for this data policy.
      * @return the name of this data policy.
      */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java
deleted file mode 100644
index 8df201c..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache;
-
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * Interface implemented by an EVICTION BY CRITERIA of
- * {@link CustomEvictionAttributes}. This will be invoked by periodic evictor
- * task that will get the keys to be evicted using this and then destroy from
- * the region to which this is attached.
- * 
- * @since gfxd 1.0
- */
-public interface EvictionCriteria<K, V> {
-
-  /**
-   * Get the (key, routing object) of the entries to be evicted from region
-   * satisfying EVICTION BY CRITERIA at this point of time.
-   * <p>
-   * The returned Map.Entry object by the Iterator may be reused internally so
-   * caller must extract the key, routing object from the entry on each
-   * iteration.
-   */
-  Iterator<Map.Entry<K, Object>> getKeysToBeEvicted(long currentMillis,
-      Region<K, V> region);
-
-  /**
-   * Last moment check if an entry should be evicted or not applying the
-   * EVICTION BY CRITERIA again under the region entry lock in case the entry
-   * has changed after the check in {@link #getKeysToBeEvicted}.
-   */
-  boolean doEvict(EntryEvent<K, V> event);
-
-  /**
-   * Return true if this eviction criteria is equivalent to the other one. This
-   * is used to ensure that custom eviction is configured identically on all the
-   * nodes of a cluster hosting the region to which this eviction criteria has
-   * been attached.
-   */
-  boolean isEquivalent(EvictionCriteria<K, V> other);
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
index 7a63855..a104751 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
@@ -672,19 +672,6 @@ public final class Operation implements java.io.Serializable {
                     OP_DETAILS_REMOVEALL
                     );
     
-  /**
-   * An entry local destroy caused by an eviction.
-   * @see Region#localDestroy(Object)
-   */
-  public static final Operation CUSTOM_EVICT_DESTROY
-    = new Operation("EVICT_DESTROY",
-                    false, // isLocal
-                    false, // isRegion
-                    OP_TYPE_DESTROY,
-                    OP_DETAILS_EVICT
-                    );
-
-
   /** The name of this mirror type. */
   private final transient String name;
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
index dd5c0e0..94cc11a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
@@ -20,7 +20,6 @@ import java.io.File;
 import java.util.Set;
 
 import com.gemstone.gemfire.cache.client.Pool;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 import com.gemstone.gemfire.compression.Compressor;
 
 /** Defines attributes for configuring a region.
@@ -148,11 +147,6 @@ public interface RegionAttributes<K,V> {
    */
   public EvictionAttributes getEvictionAttributes();
 
-  /**
-   * Return the {@link CustomEvictionAttributes}, if any, set for the region.
-   */
-  public CustomEvictionAttributes getCustomEvictionAttributes();
-
   /** Returns the cache listener for the region.
    * @throws IllegalStateException if more than one cache listener exists on this attributes
    * @return the region's <code>CacheListener</code>
@@ -453,23 +447,6 @@ public interface RegionAttributes<K,V> {
   public boolean getConcurrencyChecksEnabled();
   
   /**
-   * Returns the name of the {@link HDFSStore} that this region belongs
-   * to, if any.
-   * @return the name of the {@link HDFSStore} of this region; 
-   * <code>null</code> is returned if this region has no
-   * {@link HDFSStore}.
-   * @since 9.0
-   */
-  public String getHDFSStoreName();
-  
-  /**
-   * Returns true if this region is configured to
-   * be write-only to HDFS. 
-   * @since 9.0
-   */
-  public boolean getHDFSWriteOnly();
-  
-  /**
    * Returns the compressor used by this region's entry values.
    * @since 8.0
    * @return null if the region does not have compression enabled.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
index 729374e..b919cc0 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
@@ -401,30 +401,6 @@ public class RegionFactory<K,V>
   }
 
   /**
-   * Set custom {@link EvictionCriteria} for the region with start time and
-   * interval of evictor task to be run in milliseconds, or evict incoming rows
-   * in case both start and frequency are specified as zero.
-   * 
-   * @param criteria
-   *          an {@link EvictionCriteria} to be used for eviction for HDFS
-   *          persistent regions
-   * @param start
-   *          the start time at which periodic evictor task should be first
-   *          fired to apply the provided {@link EvictionCriteria}; if this is
-   *          zero then current time is used for the first invocation of evictor
-   * @param interval
-   *          the periodic frequency at which to run the evictor task after the
-   *          initial start; if this is if both start and frequency are zero
-   *          then {@link EvictionCriteria} is applied on incoming insert/update
-   *          to determine whether it is to be retained
-   */
-  public RegionFactory<K, V> setCustomEvictionAttributes(
-      EvictionCriteria<K, V> criteria, long start, long interval) {
-    this.attrsFactory.setCustomEvictionAttributes(criteria, start, interval);
-    return this;
-  }
-
-  /**
    * Sets the scope for the next <code>RegionAttributes</code> created.
    *
    * @param scopeType

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
index d15222b..312e880 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
@@ -272,7 +272,6 @@ public class AsyncEventQueueFactoryImpl implements AsyncEventQueueFactory {
     this.attrs.maximumQueueMemory = asyncQueueCreation.getMaximumQueueMemory();
     this.attrs.isParallel = asyncQueueCreation.isParallel();
     this.attrs.isBucketSorted = ((AsyncEventQueueCreation)asyncQueueCreation).isBucketSorted();
-	this.attrs.isHDFSQueue = ((AsyncEventQueueCreation)asyncQueueCreation).isHDFSQueue();
     this.attrs.dispatcherThreads = asyncQueueCreation.getDispatcherThreads();
     this.attrs.policy = asyncQueueCreation.getOrderPolicy();
     this.attrs.eventFilters = asyncQueueCreation.getGatewayEventFilters();
@@ -289,10 +288,6 @@ public class AsyncEventQueueFactoryImpl implements AsyncEventQueueFactory {
     this.attrs.isBucketSorted = isbucketSorted;
     return this;
   }
-  public AsyncEventQueueFactory setIsHDFSQueue(boolean isHDFSQueue) {
-    this.attrs.isHDFSQueue = isHDFSQueue;
-    return this;
-  }
   public AsyncEventQueueFactory setIsMetaQueue(boolean isMetaQueue) {
     this.attrs.isMetaQueue = isMetaQueue;
     return this;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java
deleted file mode 100644
index d9b6179..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * 
- */
-package com.gemstone.gemfire.cache.hdfs;
-
-import com.gemstone.gemfire.GemFireIOException;
-
-/**
- * Thrown when an error has occurred while attempted to use
- * the HDFS file system. This error may indicate a failure of the HDFS
- * system.
- * 
- * 
- * @since 7.5
- * 
- */
-public class HDFSIOException extends GemFireIOException {
-
-  /**
-   * @param message
-   * @param cause
-   */
-  public HDFSIOException(String message, Throwable cause) {
-    super(message, cause);
-    // TODO Auto-generated constructor stub
-  }
-
-  /**
-   * @param message
-   */
-  public HDFSIOException(String message) {
-    super(message);
-    // TODO Auto-generated constructor stub
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
deleted file mode 100644
index 45ba370..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs;
-
-import com.gemstone.gemfire.cache.wan.GatewaySender;
-
-/**
- * HDFS stores provide a means of persisting data on HDFS. There can be multiple
- * instance of HDFS stores in a cluster. The regions connected using a HDFS
- * store will share the same HDFS persistence attributes. A user will normally
- * perform the following steps to enable HDFS persistence for a region:
- * <ol>
- * <li>[Optional] Creates a DiskStore for HDFS buffer reliability (HDFS buffers
- * will be persisted locally till data lands on HDFS)
- * <li>Creates a HDFS Store (connects to DiskStore created earlier)
- * <li>Creates a Region connected to HDFS Store
- * <li>Uses region API to create and query data
- * </ol>
- * <p>
- * Instances of this interface are created using {@link HDFSStoreFactory#create}
- * 
- */
-
-public interface HDFSStore {
-  public static final String DEFAULT_HOME_DIR = "gemfire";
-  public static final float DEFAULT_BLOCK_CACHE_SIZE = 10f;
-  public static final int DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT = 256;
-  public static final int DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = 3600;
-
-  public static final int DEFAULT_BATCH_SIZE_MB = 32;
-  public static final int DEFAULT_BATCH_INTERVAL_MILLIS = 60000;
-  public static final boolean DEFAULT_WRITEONLY_HDFSSTORE = false;
-  public static final boolean DEFAULT_BUFFER_PERSISTANCE = GatewaySender.DEFAULT_PERSISTENCE_ENABLED;
-  public static final boolean DEFAULT_DISK_SYNCHRONOUS = GatewaySender.DEFAULT_DISK_SYNCHRONOUS;
-  public static final int DEFAULT_MAX_BUFFER_MEMORY = GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY;
-  public static final int DEFAULT_DISPATCHER_THREADS = GatewaySender.DEFAULT_HDFS_DISPATCHER_THREADS;
-
-  public static final boolean DEFAULT_MINOR_COMPACTION = true;
-  public static final int DEFAULT_MINOR_COMPACTION_THREADS = 10;
-  public static final boolean DEFAULT_MAJOR_COMPACTION = true;
-  public static final int DEFAULT_MAJOR_COMPACTION_THREADS = 2;
-  public static final int DEFAULT_INPUT_FILE_SIZE_MAX_MB = 512;
-  public static final int DEFAULT_INPUT_FILE_COUNT_MAX = 10;
-  public static final int DEFAULT_INPUT_FILE_COUNT_MIN = 4;
-
-  public static final int DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS = 720;
-  public static final int DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS = 30;
-
-  /**
-   * @return A unique identifier for the HDFSStore
-   */
-  public String getName();
-
-  /**
-   * HDFSStore persists data on a HDFS cluster identified by cluster's NameNode
-   * URL or NameNode Service URL. NameNode URL can also be provided via
-   * hdfs-site.xml (see HDFSClientConfigFile). If the NameNode url is missing
-   * HDFSStore creation will fail. HDFS client can also load hdfs configuration
-   * files in the classpath. The following precedence order is applied
-   * <ol>
-   * <li>URL explicitly configured in the HdfsStore
-   * <li>URL provided in client configuration file:
-   * {@link #getHDFSClientConfigFile()}
-   * <li>URL provided in default configuration files loaded by hdfs-client
-   * </ol>
-   * 
-   * HDFSStore will use the selected URL only. It will fail if the selected URL
-   * is not reachable.
-   * 
-   * @return Namenode url explicitly configured by user
-   */
-  public String getNameNodeURL();
-
-  /**
-   * HomeDir is the HDFS directory path in which HDFSStore stores files. The
-   * value must not contain the NameNode URL. The owner of this node's JVM
-   * process must have read and write access to this directory. The path could
-   * be absolute or relative. If a relative path for HomeDir is provided, then
-   * the HomeDir is created relative to /user/JVM_owner_name or, if specified,
-   * relative to directory specified by the hdfs-root-dir property. As a best
-   * practice, HDFS store directories should be created relative to a single
-   * HDFS root directory. As an alternative, an absolute path beginning with the
-   * "/" character to override the default root location can be provided.
-   * 
-   * @return path
-   */
-  public String getHomeDir();
-
-  /**
-   * The full path to the HDFS client configuration file, for e.g. hdfs-site.xml
-   * or core-site.xml. This file must be accessible to any node where an
-   * instance of this HDFSStore will be created. If each node has a local copy
-   * of this configuration file, it is important for all the copies to be
-   * "identical". Alternatively, by default HDFS client can also load some HDFS
-   * configuration files if added in the classpath.
-   * 
-   * @return path
-   */
-  public String getHDFSClientConfigFile();
-
-  /**
-   * The maximum amount of memory in megabytes to be used by HDFSStore.
-   * HDFSStore buffers data in memory to optimize HDFS IO operations. Once the
-   * configured memory is utilized, data may overflow to disk.
-   * 
-   * @return max memory in MB
-   */
-  public int getMaxMemory();
-
-  /**
-   * @return the percentage of the heap to use for the block cache in the range
-   *         0 ... 100
-   */
-  public float getBlockCacheSize();
-
-  /**
-   * HDFSStore buffer data is persisted on HDFS in batches. The BatchSize
-   * defines the maximum size (in megabytes) of each batch that is written to
-   * HDFS. This parameter, along with BatchInterval determines the rate at which
-   * data is persisted on HDFS. A higher value causes fewer and bigger batches
-   * to be persisted to HDFS and hence big files are created on HDFS. But,
-   * bigger batches consume more memory.
-   * 
-   * @return batch size in MB
-   */
-  public int getBatchSize();
-
-  /**
-   * HDFSStore buffer data is persisted on HDFS in batches, and the
-   * BatchInterval defines the number of milliseconds that can elapse between
-   * writing batches to HDFS. This parameter, along with BatchSize determines
-   * the rate at which data is persisted on HDFS.
-   * 
-   * @return batch interval in milliseconds
-   */
-  public int getBatchInterval();
-
-  /**
-   * The maximum number of threads (per region) used to write batches to HDFS.
-   * If you have a large number of clients that add or update data in a region,
-   * then you may need to increase the number of dispatcher threads to avoid
-   * bottlenecks when writing data to HDFS.
-   * 
-   * @return The maximum number of threads
-   */
-  public int getDispatcherThreads();
-
-  /**
-   * Configure if HDFSStore in-memory buffer data, that has not been persisted
-   * on HDFS yet, should be persisted to a local disk to prevent buffer data
-   * loss. Persisting buffer data may impact write performance. If performance
-   * is critical and buffer data loss is acceptable, disable persistence.
-   * 
-   * @return true if buffer is persisted locally
-   */
-  public boolean getBufferPersistent();
-
-  /**
-   * The named DiskStore to use for any local disk persistence needs of
-   * HDFSStore, for e.g. store's buffer persistence and buffer overflow. If you
-   * specify a value, the named DiskStore must exist. If you specify a null
-   * value or you omit this option, default DiskStore is used.
-   * 
-   * @return disk store name
-   */
-  public String getDiskStoreName();
-
-  /**
-   * HDFS buffers can be persisted on local disk. Each region update record is
-   * written to the disk synchronously if synchronous disk write is enabled.
-   * Enable this option if the data being persisted is critical and no record
-   * should be lost in case of a crash. This high reliability mode may increase
-   * write latency. If synchronous mode is disabled, data is persisted in
-   * batches which usually results in better performance.
-   * 
-   * @return true if enabled
-   */
-  public boolean getSynchronousDiskWrite();
-
-  /**
-   * For HDFS write-only regions, this defines the maximum size (in megabytes)
-   * that an HDFS log file can reach before HDFSStore closes the file and begins
-   * writing to a new file. This option is ignored for HDFS read/write regions.
-   * Keep in mind that the files are not available for MapReduce processing
-   * until the file is closed; you can also set WriteOnlyFileRolloverInterval to
-   * specify the maximum amount of time an HDFS log file remains open.
-   * 
-   * @return max file size in MB.
-   */
-  public int getWriteOnlyFileRolloverSize();
-
-  /**
-   * For HDFS write-only regions, this defines the number of seconds that can
-   * elapse before HDFSStore closes an HDFS file and begins writing to a new
-   * file. This configuration is ignored for HDFS read/write regions.
-   * 
-   * @return interval in seconds
-   */
-  public int getWriteOnlyFileRolloverInterval();
-
-  /**
-   * Minor compaction reorganizes data in files to optimize read performance and
-   * reduce number of files created on HDFS. Minor compaction process can be
-   * I/O-intensive, tune the performance of minor compaction using
-   * MinorCompactionThreads. Minor compaction is not applicable to write-only
-   * regions.
-   * 
-   * @return true if auto minor compaction is enabled
-   */
-  public boolean getMinorCompaction();
-
-  /**
-   * The maximum number of threads that HDFSStore uses to perform minor
-   * compaction. You can increase the number of threads used for compaction as
-   * necessary in order to fully utilize the performance of your HDFS cluster.
-   * Minor compaction is not applicable to write-only regions.
-   * 
-   * @return maximum number of threads executing minor compaction
-   */
-  public int getMinorCompactionThreads();
-
-  /**
-   * Major compaction removes old values of a key and deleted records from the
-   * HDFS files, which can save space in HDFS and improve performance when
-   * reading from HDFS. As major compaction process can be long-running and
-   * I/O-intensive, tune the performance of major compaction using
-   * MajorCompactionInterval and MajorCompactionThreads. Major compaction is not
-   * applicable to write-only regions.
-   * 
-   * @return true if auto major compaction is enabled
-   */
-  public boolean getMajorCompaction();
-
-  /**
-   * The number of minutes after which HDFSStore performs the next major
-   * compaction cycle. Major compaction is not applicable to write-only regions.
-   * 
-   * @return interval in minutes
-   */
-  public int getMajorCompactionInterval();
-
-  /**
-   * The maximum number of threads that HDFSStore uses to perform major
-   * compaction. You can increase the number of threads used for compaction as
-   * necessary in order to fully utilize the performance of your HDFS cluster.
-   * Major compaction is not applicable to write-only regions.
-   * 
-   * @return maximum number of threads executing major compaction
-   */
-  public int getMajorCompactionThreads();
-
-  /**
-   * HDFSStore may create new files as part of periodic maintenance activity. It
-   * deletes old files asynchronously. PurgeInterval defines the number of
-   * minutes for which old files will remain available to be consumed
-   * externally, e.g. read by MR jobs. After this interval, old files are
-   * deleted. This configuration is not applicable to write-only regions
-   * 
-   * @return old file purge interval in minutes
-   */
-  public int getPurgeInterval();
-
-  /**
-   * Permanently deletes all HDFS files associated with this {@link HDFSStore}.
-   * This operation will fail if any region is still using this store for
-   * persistence.
-   * 
-   * @exception IllegalStateException
-   *              if any region using this hdfsStore still exists
-   */
-  public void destroy();
-
-  /**
-   * @return new instance of mutator object that can be used to alter properties
-   *         of this store
-   */
-  public HDFSStoreMutator createHdfsStoreMutator();
-
-  /**
-   * Identifies attributes configured in {@link HDFSStoreMutator} and applies
-   * the new attribute values to this instance of {@link HDFSStore} dynamically.
-   * Any property which is not set in {@link HDFSStoreMutator} remains
-   * unaltered. In most cases altering the attributes does not cause existing
-   * operations to terminate. The altered attributes are used in the next cycle
-   * of the operation they impact.
-   * 
-   * @return hdfsStore reference representing the old {@link HDFSStore}
-   */
-  public HDFSStore alter(HDFSStoreMutator mutator);
-
-  /**
-   * A file larger than this size, in megabytes, will not be compacted by minor
-   * compactor. Increasing this value will result in compaction of bigger files.
-   * This will lower the number of files on HDFS at the cost of increased IO.
-   * This option is for advanced users and will need tuning in special cases
-   * only. This option is not applicable to write-only regions.
-   * 
-   * @return size threshold (in MB)
-   */
-  public int getInputFileSizeMax();
-
-  /**
-   * A minimum number of files must exist in a bucket directory on HDFS before
-   * minor compaction will start compaction. Keeping a higher value for this
-   * option will reduce the frequency of minor compaction, which in turn may
-   * result in reduced IO overhead. However it may result in increased pressure
-   * on HDFS NameNode. This option is for advanced users and will need tuning in
-   * special cases only. This option is not applicable to write-only regions.
-   * 
-   * @return minimum number of files for minor compaction to get triggered
-   */
-  public int getInputFileCountMin();
-
-  /**
-   * The maximum number of files compacted by Minor compactor in a cycle.
-   * Keeping a higher value for this option will reduce the frequency of minor
-   * compaction, which in turn may result in reduced IO overhead. However it may
-   * result in large number of concurrent IO operations which in-turn may
-   * degrade the performance. This option is for advanced users and will need
-   * tuning in special cases only. This option is not applicable to write-only
-   * regions.
-   * 
-   * @return maximum number of files minor compacted in one cycle
-   */
-  public int getInputFileCountMax();
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
deleted file mode 100644
index 0d80a67..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs;
-
-import com.gemstone.gemfire.GemFireConfigException;
-import com.gemstone.gemfire.cache.Cache;
-
-/**
- * Factory for creating instances of {@link HDFSStore}. To get an instance of
- * this factory call Cache#createHDFSStoreFactory.
- * <P>
- * Usage
- * <ol>
- * <li> configure factory using <code>set</code> methods
- * <li> call {@link #create} to produce a HDFSStore instance.
- * </ol>
- * 
- */
-public interface HDFSStoreFactory {
-
-  /**
-   * @see HDFSStore#getName()
-   */
-  public HDFSStoreFactory setName(String name);
-
-  /**
-   * @see HDFSStore#getNameNodeURL()
-   */
-  public HDFSStoreFactory setNameNodeURL(String url);
-
-  /**
-   * @see HDFSStore#getHomeDir()
-   */
-  public HDFSStoreFactory setHomeDir(String dir);
-
-  /**
-   * @see HDFSStore#getHDFSClientConfigFile()
-   */
-  public HDFSStoreFactory setHDFSClientConfigFile(String filePath);
-
-  /**
-   * @see HDFSStore#getHDFSClientConfigFile()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 or more than 100
-   */
-  public HDFSStoreFactory setBlockCacheSize(float value);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT}
-   * @see HDFSStore#getWriteOnlyFileRolloverSize()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setWriteOnlyFileRolloverSize(int maxFileSize);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL}
-   * @see HDFSStore#getWriteOnlyFileRolloverInterval()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int interval);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_MINOR_COMPACTION}
-   * @see HDFSStore#getMinorCompaction()
-   */
-  public HDFSStoreFactory setMinorCompaction(boolean auto);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_MINOR_COMPACTION_THREADS}
-   * @see HDFSStore#getMinorCompactionThreads()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setMinorCompactionThreads(int count);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION}
-   * @see HDFSStore#getMajorCompaction()
-   */
-  public HDFSStoreFactory setMajorCompaction(boolean auto);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS}
-   * @see HDFSStore#getMajorCompactionInterval()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setMajorCompactionInterval(int interval);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION_THREADS}
-   * @see HDFSStore#getMajorCompactionThreads()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setMajorCompactionThreads(int count);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_SIZE_MAX_MB}
-   * @see HDFSStore#getInputFileSizeMax()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setInputFileSizeMax(int size);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_COUNT_MIN}
-   * @see HDFSStore#getInputFileCountMin()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setInputFileCountMin(int count);
-
-  /**
-   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_COUNT_MAX}
-   * @see HDFSStore#getInputFileCountMax()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setInputFileCountMax(int count);
-
-  /**
-   * @see HDFSStore#getPurgeInterval()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setPurgeInterval(int interval);
-
-  /**
-   * @see HDFSStore#getDiskStoreName()
-   */
-  public HDFSStoreFactory setDiskStoreName(String name);
-
-  /**
-   * @see HDFSStore#getMaxMemory()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setMaxMemory(int memory);
-
-  /**
-   * @see HDFSStore#getBatchInterval()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setBatchInterval(int interval);
-
-  /**
-   * @see HDFSStore#getBatchSize()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setBatchSize(int size);
-
-  /**
-   * @see HDFSStore#getBufferPersistent()
-   */
-  public HDFSStoreFactory setBufferPersistent(boolean isPersistent);
-
-  /**
-   * @see HDFSStore#getSynchronousDiskWrite()
-   */
-  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous);
-
-  /**
-   * @see HDFSStore#getDispatcherThreads()
-   * @exception IllegalArgumentException
-   *              if the {@code value} is less than 0 
-   */
-  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads);
-
-  /**
-   * Validates all attribute values and assigns defaults where applicable.
-   * Creates a new instance of {@link HDFSStore} based on the current attribute
-   * values configured in this factory.
-   * 
-   * @param name
-   *          the name of the HDFSStore
-   * @return the newly created HDFSStore.
-   * @throws GemFireConfigException
-   *           if the configuration is invalid
-   * @throws StoreExistsException
-   *           if a {@link HDFSStore} with the same name exists
-   */
-  public HDFSStore create(String name) throws GemFireConfigException, StoreExistsException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
deleted file mode 100644
index d98c9cd..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs;
-
-/**
- * HDFSStoreMutator provides a means to dynamically alter {@link HDFSStore}'s
- * behavior. Instances of this interface are created using
- * {@link HDFSStore#createHdfsStoreMutator} and applied using
- * {@link HDFSStore#alter}
- * 
- */
-public interface HDFSStoreMutator {
-  /**
-   * {@link HDFSStoreFactory#setWriteOnlyFileRolloverSize(int)}
-   */
-  public HDFSStoreMutator setWriteOnlyFileRolloverSize(int maxFileSize);
-
-  /**
-   * {@link HDFSStore#getWriteOnlyFileRolloverSize()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getWriteOnlyFileRolloverSize();
-
-  /**
-   * {@link HDFSStoreFactory#setWriteOnlyFileRolloverInterval(int)}
-   */
-  public HDFSStoreMutator setWriteOnlyFileRolloverInterval(int interval);
-
-  /**
-   * {@link HDFSStore#getWriteOnlyFileRolloverInterval()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getWriteOnlyFileRolloverInterval();
-
-  /**
-   * {@link HDFSStore#getMinorCompaction()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. null if not
-   *         set
-   */
-  public Boolean getMinorCompaction();
-
-  /**
-   * {@link HDFSStoreFactory#setMinorCompaction(boolean)}
-   */
-  public HDFSStoreMutator setMinorCompaction(boolean auto);
-
-  /**
-   * {@link HDFSStoreFactory#setMinorCompactionThreads(int)}
-   */
-  public HDFSStoreMutator setMinorCompactionThreads(int count);
-
-  /**
-   * {@link HDFSStore#getMinorCompactionThreads()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getMinorCompactionThreads();
-
-  /**
-   * {@link HDFSStoreFactory#setMajorCompaction(boolean)}
-   */
-  public HDFSStoreMutator setMajorCompaction(boolean auto);
-
-  /**
-   * {@link HDFSStore#getMajorCompaction()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. null if not
-   *         set
-   */
-  public Boolean getMajorCompaction();
-
-  /**
-   * {@link HDFSStoreFactory#setMajorCompactionInterval(int)}
-   */
-  public HDFSStoreMutator setMajorCompactionInterval(int interval);
-
-  /**
-   * {@link HDFSStore#getMajorCompactionInterval()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getMajorCompactionInterval();
-
-  /**
-   * {@link HDFSStoreFactory#setMajorCompactionThreads(int)}
-   */
-  public HDFSStoreMutator setMajorCompactionThreads(int count);
-
-  /**
-   * {@link HDFSStore#getMajorCompactionThreads()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getMajorCompactionThreads();
-
-  /**
-   * {@link HDFSStoreFactory#setInputFileSizeMax(int)}
-   */
-  public HDFSStoreMutator setInputFileSizeMax(int size);
-
-  /**
-   * {@link HDFSStore#getInputFileSizeMax()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getInputFileSizeMax();
-
-  /**
-   * {@link HDFSStoreFactory#setInputFileCountMin(int)}
-   */
-  public HDFSStoreMutator setInputFileCountMin(int count);
-
-  /**
-   * {@link HDFSStore#getInputFileCountMin()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getInputFileCountMin();
-
-  /**
-   * {@link HDFSStoreFactory#setInputFileCountMax(int)}
-   */
-  public HDFSStoreMutator setInputFileCountMax(int count);
-
-  /**
-   * {@link HDFSStore#getInputFileCountMax()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getInputFileCountMax();
-
-  /**
-   * {@link HDFSStoreFactory#setPurgeInterval(int)}
-   */
-  public HDFSStoreMutator setPurgeInterval(int interval);
-
-  /**
-   * {@link HDFSStore#getPurgeInterval()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getPurgeInterval();
-
-  /**
-   * {@link HDFSStore#getBatchSize()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getBatchSize();
-
-  /**
-   * {@link HDFSStoreFactory#setBatchSize(int)}
-   */
-  public HDFSStoreMutator setBatchSize(int size);
-
-  /**
-   * {@link HDFSStore#getBatchInterval()}
-   * 
-   * @return value to be used when mutator is executed on hdfsStore. -1 if not
-   *         set
-   */
-  public int getBatchInterval();
-
-  /**
-   * {@link HDFSStoreFactory#setBatchInterval(int)}
-   */
-  public HDFSStoreMutator setBatchInterval(int interval);
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java
deleted file mode 100644
index de21b23..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.cache.hdfs;
-
-import com.gemstone.gemfire.cache.CacheException;
-
-/**
- * Thrown when attempting to create a {@link HDFSStore} if one already exists.
- * 
- */
-public class StoreExistsException extends CacheException {
-  private static final long serialVersionUID = 1L;
-
-  public StoreExistsException(String storeName) {
-    super(storeName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java
deleted file mode 100644
index 789d497..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * 
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import org.apache.commons.lang.mutable.MutableInt;
-import org.apache.commons.lang.mutable.MutableLong;
-
-/**
- * Class for tracking failures and backing off if necessary.
- *
- */
-public class FailureTracker  extends ThreadLocal<MutableInt> {
-  private final long minTime;
-  private final long maxTime;
-  private final float rate;
-  private final FailureCount waitTime = new FailureCount();
-  
-  
-  /**
-   * @param minTime the minimum wait time after a failure in ms.
-   * @param maxTime the maximum wait tim after a failure, in ms.
-   * @param rate the rate of growth of the failures
-   */
-  public FailureTracker(long minTime, long maxTime, float rate) {
-    this.minTime = minTime;
-    this.maxTime = maxTime;
-    this.rate = rate;
-  }
-  
-  /**
-   * Wait for the current wait time.
-   */
-  public void sleepIfRetry() throws InterruptedException {
-      Thread.sleep(waitTime());
-  }
-
-  /**
-   * @return the wait time = rate^(num_failures) * minTime
-   */
-  public long waitTime() {
-    return waitTime.get().longValue();
-  }
-  
-  public void record(boolean success) {
-    if(success) {
-      success();
-    } else {
-      failure();
-    }
-    
-  }
-  
-  public void success() {
-    waitTime.get().setValue(0);
-    
-  }
-  public void failure() {
-    long current = waitTime.get().intValue();
-    if(current == 0) {
-      current=minTime;
-    }
-    else if(current < maxTime) {
-      current = (long) (current * rate);
-    }
-    waitTime.get().setValue(Math.min(current, maxTime));
-  }
-
-
-  private static class FailureCount extends ThreadLocal<MutableLong> {
-
-    @Override
-    protected MutableLong initialValue() {
-      return new MutableLong();
-    }
-  }
-
-
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java
deleted file mode 100644
index f69b3dc..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * Observes and reacts to flush events.
- * 
- */
-public interface FlushObserver {
-  public interface AsyncFlushResult {
-    /**
-     * Waits for the most recently enqueued batch to completely flush.
-     * 
-     * @param time the time to wait
-     * @param unit the time unit
-     * @return true if flushed before the timeout
-     * @throws InterruptedException interrupted while waiting
-     */
-    public boolean waitForFlush(long time, TimeUnit unit) throws InterruptedException;
-  }
-
-  /**
-   * Returns true when the queued events should be drained from the queue
-   * immediately.
-   * 
-   * @return true if draining
-   */
-  boolean shouldDrainImmediately();
-  
-  /**
-   * Begins the flushing the queued events.
-   * 
-   * @return the async result
-   */
-  public AsyncFlushResult flush();
-}
-


[35/63] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17-2

Posted by kl...@apache.org.
Merge branch 'develop' into feature/GEODE-17-2


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/0c0825af
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/0c0825af
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/0c0825af

Branch: refs/heads/feature/GEODE-1276
Commit: 0c0825afc2ca492d17b6b1332ee7e5278282bb7f
Parents: f446bbe a254c42
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 08:22:01 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 08:22:01 2016 -0700

----------------------------------------------------------------------
 .../internal/cache/FixedPRSinglehopDUnitTest.java   | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------



[29/63] [abbrv] incubator-geode git commit: GEODE-1240: Changed the test to use Awaitility with a maximum timeout period. This might work better than the time sensitive conditionals that this test uses.

Posted by kl...@apache.org.
GEODE-1240: Changed the test to use Awaitility with a maximum timeout period. This might work better than the time sensitive conditionals that this test uses.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9fbf219c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9fbf219c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9fbf219c

Branch: refs/heads/feature/GEODE-1276
Commit: 9fbf219c383ff499fa34c7cf9ad846ecdffcf3d4
Parents: ecbbf76
Author: Udo Kohlmeyer <uk...@pivotal.io>
Authored: Thu Apr 28 04:14:00 2016 +1000
Committer: Udo Kohlmeyer <uk...@pivotal.io>
Committed: Thu Apr 28 11:32:48 2016 +1000

----------------------------------------------------------------------
 .../cache30/ClientMembershipDUnitTest.java      | 799 +++++++++----------
 1 file changed, 398 insertions(+), 401 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9fbf219c/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
index 46e91b0..9036e5e 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
@@ -16,24 +16,7 @@
  */
 package com.gemstone.gemfire.cache30;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.junit.experimental.categories.Category;
-
 import com.gemstone.gemfire.InternalGemFireException;
-import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.Statistics;
 import com.gemstone.gemfire.StatisticsType;
 import com.gemstone.gemfire.cache.AttributesFactory;
@@ -48,27 +31,23 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.DurableClientAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.gemfire.internal.SocketCreator;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.tier.InternalClientMembership;
 import com.gemstone.gemfire.internal.cache.tier.sockets.AcceptorImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ServerConnection;
-import com.gemstone.gemfire.internal.logging.LocalLogWriter;
-import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.management.membership.ClientMembership;
 import com.gemstone.gemfire.management.membership.ClientMembershipEvent;
 import com.gemstone.gemfire.management.membership.ClientMembershipListener;
-import com.gemstone.gemfire.test.dunit.Assert;
-import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.IgnoredException;
-import com.gemstone.gemfire.test.dunit.Invoke;
-import com.gemstone.gemfire.test.dunit.NetworkUtils;
-import com.gemstone.gemfire.test.dunit.SerializableCallable;
-import com.gemstone.gemfire.test.dunit.SerializableRunnable;
-import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.Wait;
-import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.*;
 import com.gemstone.gemfire.test.junit.categories.FlakyTest;
+import com.jayway.awaitility.Awaitility;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Tests the ClientMembership API including ClientMembershipListener.
@@ -79,13 +58,13 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
   protected static final boolean CLIENT = true;
   protected static final boolean SERVER = false;
-  
+
   protected static final int JOINED = 0;
   protected static final int LEFT = 1;
   protected static final int CRASHED = 2;
-  
+
   private static Properties properties;
-    
+
   public ClientMembershipDUnitTest(String name) {
     super(name);
   }
@@ -94,16 +73,17 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
   public final void postTearDownCacheTestCase() throws Exception {
     Invoke.invokeInEveryVM((() -> cleanup()));
   }
-  
+
   public static void cleanup() {
     properties = null;
     InternalClientMembership.unregisterAllListeners();
   }
-  
+
   private void waitForAcceptsInProgressToBe(final int target)
-    throws Exception {
+      throws Exception {
     WaitCriterion ev = new WaitCriterion() {
       String excuse;
+
       public boolean done() {
         int actual = getAcceptsInProgress();
         if (actual == getAcceptsInProgress()) {
@@ -112,13 +92,22 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         excuse = "accepts in progress (" + actual + ") never became " + target;
         return false;
       }
+
       public String description() {
         return excuse;
       }
     };
-    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
+    Awaitility.await().pollInterval(200, TimeUnit.MILLISECONDS).atMost(60, TimeUnit.SECONDS)
+        .until(() -> {
+          int actual = getAcceptsInProgress();
+          if (actual == getAcceptsInProgress()) {
+            return true;
+          }
+          return false;
+        });
+//    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
-  
+
   protected int getAcceptsInProgress() {
     StatisticsType st = InternalDistributedSystem.getAnyInstance().findType("CacheServerStats");
     Statistics[] s = InternalDistributedSystem.getAnyInstance().findStatisticsByType(st);
@@ -127,8 +116,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
   protected static Socket meanSocket;
 
-  /** test that a server times out waiting for a handshake that
-      never arrives. 
+  /**
+   * test that a server times out waiting for a handshake that
+   * never arrives.
    */
   public void testConnectionTimeout() throws Exception {
     IgnoredException.addIgnoredException("failed accepting client connection");
@@ -137,101 +127,97 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final VM vm0 = host.getVM(0);
     System.setProperty(AcceptorImpl.ACCEPT_TIMEOUT_PROPERTY_NAME, "1000");
     try {
-    final int port = startBridgeServer(0);
-//    AsyncInvocation ai = null;
-    try {
-      assertTrue(port != 0);
-      SerializableRunnable createMeanSocket = new CacheSerializableRunnable("Connect to server with socket") {
-        public void run2() throws CacheException {
-          getCache(); // create a cache so we have stats
-          System.out.println("connecting to cache server with socket");
-          try {
-            InetAddress addr = InetAddress.getByName(hostName);
-            meanSocket = new Socket(addr, port);
+      final int port = startBridgeServer(0);
+      //    AsyncInvocation ai = null;
+      try {
+        assertTrue(port != 0);
+        SerializableRunnable createMeanSocket = new CacheSerializableRunnable("Connect to server with socket") {
+          public void run2() throws CacheException {
+            getCache(); // create a cache so we have stats
+            System.out.println("connecting to cache server with socket");
+            try {
+              InetAddress addr = InetAddress.getByName(hostName);
+              meanSocket = new Socket(addr, port);
+            } catch (Exception e) {
+              throw new RuntimeException("Test failed to connect or was interrupted", e);
+            }
           }
-          catch (Exception e) {
-            throw new RuntimeException("Test failed to connect or was interrupted", e);
+        };
+        SerializableRunnable closeMeanSocket = new CacheSerializableRunnable("close mean socket") {
+          public void run2() throws CacheException {
+            System.out.println("closing mean socket");
+            try {
+              meanSocket.close();
+            } catch (IOException ignore) {
+            }
           }
-        }
-      };
-      SerializableRunnable closeMeanSocket = new CacheSerializableRunnable("close mean socket") {
-        public void run2() throws CacheException {
+        };
+
+        assertEquals(0, getAcceptsInProgress());
+
+        System.out.println("creating mean socket");
+        vm0.invoke(createMeanSocket);
+        try {
+          System.out.println("waiting to see it connect on server");
+          waitForAcceptsInProgressToBe(1);
+        } finally {
           System.out.println("closing mean socket");
-          try {
-            meanSocket.close();
-          }
-          catch (IOException ignore) {
-          }
+          vm0.invoke(closeMeanSocket);
         }
-      };
+        System.out.println("waiting to see accept to go away on server");
+        waitForAcceptsInProgressToBe(0);
 
-      assertEquals(0, getAcceptsInProgress());
-      
-      System.out.println("creating mean socket");
-      vm0.invoke(createMeanSocket);
-      try {
-        System.out.println("waiting to see it connect on server");
-        waitForAcceptsInProgressToBe(1);
-      } finally {
-        System.out.println("closing mean socket");
-        vm0.invoke(closeMeanSocket);
-      }
-      System.out.println("waiting to see accept to go away on server");
-      waitForAcceptsInProgressToBe(0);
+        // now try it without a close. Server should timeout the mean connect
+        System.out.println("creating mean socket 2");
+        vm0.invoke(createMeanSocket);
+        try {
+          System.out.println("waiting to see it connect on server 2");
+          waitForAcceptsInProgressToBe(1);
+          System.out.println("waiting to see accept to go away on server without us closing");
+          waitForAcceptsInProgressToBe(0);
+        } finally {
+          System.out.println("closing mean socket 2");
+          vm0.invoke(closeMeanSocket);
+        }
 
-      // now try it without a close. Server should timeout the mean connect
-      System.out.println("creating mean socket 2");
-      vm0.invoke(createMeanSocket);
-      try {
-        System.out.println("waiting to see it connect on server 2");
-        waitForAcceptsInProgressToBe(1);
-        System.out.println("waiting to see accept to go away on server without us closing");
-        waitForAcceptsInProgressToBe(0);
+        //       SerializableRunnable denialOfService = new CacheSerializableRunnable("Do lots of connects") {
+        //         public void run2() throws CacheException {
+        //           int connectionCount = 0;
+        //           ArrayList al = new ArrayList(60000);
+        //           try {
+        //             InetAddress addr = InetAddress.getLocalHost();
+        //             for (;;) {
+        //               Socket s = new Socket(addr, port);
+        //               al.add(s);
+        //               connectionCount++;
+        //               getLogWriter().info("connected # " + connectionCount + " s=" + s);
+        // //               try {
+        // //                 s.close();
+        // //               } catch (IOException ignore) {}
+        //             }
+        //           }
+        //           catch (Exception e) {
+        //             getLogWriter().info("connected # " + connectionCount
+        //                                 + " stopped because of exception " + e);
+        //             Iterator it = al.iterator();
+        //             while (it.hasNext()) {
+        //               Socket s = (Socket)it.next();
+        //               try {
+        //                 s.close();
+        //               } catch (IOException ignore) {}
+        //             }
+        //           }
+        //         }
+        //       };
+        //       // now pretend to do a denial of service attack by doing a bunch of connects
+        //       // really fast and see what that does to the server's fds.
+        //       getLogWriter().info("doing denial of service attach");
+        //       vm0.invoke(denialOfService);
+        //       // @todo darrel: check fd limit?
       } finally {
-        System.out.println("closing mean socket 2");
-        vm0.invoke(closeMeanSocket);
+        stopBridgeServers(getCache());
       }
-
-//       SerializableRunnable denialOfService = new CacheSerializableRunnable("Do lots of connects") {
-//         public void run2() throws CacheException {
-//           int connectionCount = 0;
-//           ArrayList al = new ArrayList(60000);
-//           try {
-//             InetAddress addr = InetAddress.getLocalHost();
-//             for (;;) {
-//               Socket s = new Socket(addr, port);
-//               al.add(s);
-//               connectionCount++;
-//               getLogWriter().info("connected # " + connectionCount + " s=" + s);
-// //               try {
-// //                 s.close();
-// //               } catch (IOException ignore) {}
-//             }
-//           }
-//           catch (Exception e) {
-//             getLogWriter().info("connected # " + connectionCount
-//                                 + " stopped because of exception " + e);
-//             Iterator it = al.iterator();
-//             while (it.hasNext()) {
-//               Socket s = (Socket)it.next();
-//               try {
-//                 s.close();
-//               } catch (IOException ignore) {}
-//             }
-//           }
-//         }
-//       };
-//       // now pretend to do a denial of service attack by doing a bunch of connects
-//       // really fast and see what that does to the server's fds.
-//       getLogWriter().info("doing denial of service attach");
-//       vm0.invoke(denialOfService);
-//       // @todo darrel: check fd limit?
-    }
-    finally {
-      stopBridgeServers(getCache());
-    }
-    }
-    finally {
+    } finally {
       System.getProperties().remove(AcceptorImpl.ACCEPT_TIMEOUT_PROPERTY_NAME);
     }
   }
@@ -241,12 +227,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     InternalClientMembership.setForceSynchronous(true);
     try {
       doTestBasicEvents();
-    }
-    finally {
+    } finally {
       InternalClientMembership.setForceSynchronous(false);
     }
   }
-  
+
   /**
    * Tests event notification methods on ClientMembership.
    */
@@ -254,13 +239,13 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     getSystem();
     doTestBasicEvents();
   }
-  
+
   public void doTestBasicEvents() throws Exception {
     final boolean[] fired = new boolean[3];
     final DistributedMember[] member = new DistributedMember[3];
     final String[] memberId = new String[3];
     final boolean[] isClient = new boolean[3];
-    
+
     ClientMembershipListener listener = new ClientMembershipListener() {
       public synchronized void memberJoined(ClientMembershipEvent event) {
         fired[JOINED] = true;
@@ -269,6 +254,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         isClient[JOINED] = event.isClient();
         notify();
       }
+
       public synchronized void memberLeft(ClientMembershipEvent event) {
         fired[LEFT] = true;
         member[LEFT] = event.getMember();
@@ -276,6 +262,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         isClient[LEFT] = event.isClient();
         notify();
       }
+
       public synchronized void memberCrashed(ClientMembershipEvent event) {
         fired[CRASHED] = true;
         member[CRASHED] = event.getMember();
@@ -285,11 +272,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     };
     ClientMembership.registerClientMembershipListener(listener);
-    
+
     // test JOIN for server
     DistributedMember serverJoined = new TestDistributedMember("serverJoined");
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[JOINED]) {
         listener.wait(2000);
       }
@@ -309,7 +296,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test JOIN for client
     DistributedMember clientJoined = new TestDistributedMember("clientJoined");
     InternalClientMembership.notifyJoined(clientJoined, CLIENT);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[JOINED]) {
         listener.wait(2000);
       }
@@ -329,7 +316,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test LEFT for server
     DistributedMember serverLeft = new TestDistributedMember("serverLeft");
     InternalClientMembership.notifyLeft(serverLeft, SERVER);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[LEFT]) {
         listener.wait(2000);
       }
@@ -349,7 +336,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test LEFT for client
     DistributedMember clientLeft = new TestDistributedMember("clientLeft");
     InternalClientMembership.notifyLeft(clientLeft, CLIENT);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[LEFT]) {
         listener.wait(2000);
       }
@@ -369,7 +356,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test CRASHED for server
     DistributedMember serverCrashed = new TestDistributedMember("serverCrashed");
     InternalClientMembership.notifyCrashed(serverCrashed, SERVER);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[CRASHED]) {
         listener.wait(2000);
       }
@@ -389,7 +376,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // test CRASHED for client
     DistributedMember clientCrashed = new TestDistributedMember("clientCrashed");
     InternalClientMembership.notifyCrashed(clientCrashed, CLIENT);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[CRASHED]) {
         listener.wait(2000);
       }
@@ -406,15 +393,15 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertTrue(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
   }
-  
+
   /**
    * Resets all elements of arrays used for listener testing. Boolean values
    * are reset to false. String values are reset to null.
    */
-  private void resetArraysForTesting(boolean[] fired, 
-                                     DistributedMember[] member,
-                                     String[] memberId, 
-                                     boolean[] isClient) {
+  private void resetArraysForTesting(boolean[] fired,
+      DistributedMember[] member,
+      String[] memberId,
+      boolean[] isClient) {
     for (int i = 0; i < fired.length; i++) {
       fired[i] = false;
       member[i] = null;
@@ -422,7 +409,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       isClient[i] = false;
     }
   }
-  
+
   /**
    * Tests unregisterClientMembershipListener to ensure that no further events
    * are delivered to unregistered listeners.
@@ -432,7 +419,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final DistributedMember[] member = new DistributedMember[1];
     final String[] memberId = new String[1];
     final boolean[] isClient = new boolean[1];
-    
+
     getSystem();
 
     ClientMembershipListener listener = new ClientMembershipListener() {
@@ -443,17 +430,19 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         isClient[0] = event.isClient();
         notify();
       }
+
       public void memberLeft(ClientMembershipEvent event) {
       }
+
       public void memberCrashed(ClientMembershipEvent event) {
       }
     };
     ClientMembership.registerClientMembershipListener(listener);
-    
+
     // fire event to make sure listener is registered
     DistributedMember clientJoined = new TestDistributedMember("clientJoined");
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[0]) {
         listener.wait(2000);
       }
@@ -471,7 +460,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // unregister and verify listener is not notified
     ClientMembership.unregisterClientMembershipListener(listener);
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listener) {
+    synchronized (listener) {
       listener.wait(20);
     }
     assertFalse(fired[0]);
@@ -479,14 +468,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(memberId[0]);
     assertFalse(isClient[0]);
   }
-  
+
   public void testMultipleListeners() throws Exception {
     final int NUM_LISTENERS = 4;
     final boolean[] fired = new boolean[NUM_LISTENERS];
     final DistributedMember[] member = new DistributedMember[NUM_LISTENERS];
     final String[] memberId = new String[NUM_LISTENERS];
     final boolean[] isClient = new boolean[NUM_LISTENERS];
-    
+
     getSystem();
 
     final ClientMembershipListener[] listeners = new ClientMembershipListener[NUM_LISTENERS];
@@ -504,17 +493,19 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           isClient[whichListener] = event.isClient();
           notify();
         }
+
         public void memberLeft(ClientMembershipEvent event) {
         }
+
         public void memberCrashed(ClientMembershipEvent event) {
         }
       };
     }
-    
+
     final DistributedMember clientJoined = new TestDistributedMember("clientJoined");
     InternalClientMembership.notifyJoined(clientJoined, true);
     for (int i = 0; i < NUM_LISTENERS; i++) {
-      synchronized(listeners[i]) {
+      synchronized (listeners[i]) {
         listeners[i].wait(20);
       }
       assertFalse(fired[i]);
@@ -522,17 +513,17 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       assertNull(memberId[i]);
       assertFalse(isClient[i]);
     }
-    
+
     // attempt to register same listener twice... 2nd reg should be ignored
     // failure would cause an assertion failure in memberJoined impl
     ClientMembership.registerClientMembershipListener(listeners[0]);
     ClientMembership.registerClientMembershipListener(listeners[0]);
-    
-    ClientMembershipListener[] registeredListeners = 
-      ClientMembership.getClientMembershipListeners();
+
+    ClientMembershipListener[] registeredListeners =
+        ClientMembership.getClientMembershipListeners();
     assertEquals(1, registeredListeners.length);
     assertEquals(listeners[0], registeredListeners[0]);
-    
+
     ClientMembership.registerClientMembershipListener(listeners[1]);
     registeredListeners = ClientMembership.getClientMembershipListeners();
     assertEquals(2, registeredListeners.length);
@@ -540,7 +531,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(listeners[1], registeredListeners[1]);
 
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[1]) {
+    synchronized (listeners[1]) {
       if (!fired[1]) {
         listeners[1].wait(2000);
       }
@@ -559,14 +550,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     resetArraysForTesting(fired, member, memberId, isClient);
-        
+
     ClientMembership.unregisterClientMembershipListener(listeners[0]);
     registeredListeners = ClientMembership.getClientMembershipListeners();
     assertEquals(1, registeredListeners.length);
     assertEquals(listeners[1], registeredListeners[0]);
-    
+
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[1]) {
+    synchronized (listeners[1]) {
       if (!fired[1]) {
         listeners[1].wait(2000);
       }
@@ -595,7 +586,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(listeners[3], registeredListeners[2]);
 
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[3]) {
+    synchronized (listeners[3]) {
       if (!fired[3]) {
         listeners[3].wait(2000);
       }
@@ -614,7 +605,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     ClientMembership.registerClientMembershipListener(listeners[0]);
     registeredListeners = ClientMembership.getClientMembershipListeners();
     assertEquals(4, registeredListeners.length);
@@ -624,7 +615,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(listeners[0], registeredListeners[3]);
 
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[0]) {
+    synchronized (listeners[0]) {
       if (!fired[0]) {
         listeners[0].wait(2000);
       }
@@ -636,16 +627,16 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       assertTrue(isClient[i]);
     }
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     ClientMembership.unregisterClientMembershipListener(listeners[3]);
     registeredListeners = ClientMembership.getClientMembershipListeners();
     assertEquals(3, registeredListeners.length);
     assertEquals(listeners[1], registeredListeners[0]);
     assertEquals(listeners[2], registeredListeners[1]);
     assertEquals(listeners[0], registeredListeners[2]);
-    
+
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[0]) {
+    synchronized (listeners[0]) {
       if (!fired[0]) {
         listeners[0].wait(2000);
       }
@@ -670,9 +661,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(2, registeredListeners.length);
     assertEquals(listeners[1], registeredListeners[0]);
     assertEquals(listeners[0], registeredListeners[1]);
-    
+
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[0]) {
+    synchronized (listeners[0]) {
       if (!fired[0]) {
         listeners[0].wait(2000);
       }
@@ -696,10 +687,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     ClientMembership.unregisterClientMembershipListener(listeners[0]);
     registeredListeners = ClientMembership.getClientMembershipListeners();
     assertEquals(0, registeredListeners.length);
-    
+
     InternalClientMembership.notifyJoined(clientJoined, true);
     for (int i = 0; i < NUM_LISTENERS; i++) {
-      synchronized(listeners[i]) {
+      synchronized (listeners[i]) {
         listeners[i].wait(20);
       }
       assertFalse(fired[i]);
@@ -708,14 +699,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       assertFalse(isClient[i]);
     }
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     ClientMembership.registerClientMembershipListener(listeners[1]);
     registeredListeners = ClientMembership.getClientMembershipListeners();
     assertEquals(1, registeredListeners.length);
     assertEquals(listeners[1], registeredListeners[0]);
-    
+
     InternalClientMembership.notifyJoined(clientJoined, true);
-    synchronized(listeners[1]) {
+    synchronized (listeners[1]) {
       if (!fired[1]) {
         listeners[1].wait(2000);
       }
@@ -734,11 +725,13 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
   }
- 
+
   protected static int testClientMembershipEventsInClient_port;
+
   private static int getTestClientMembershipEventsInClient_port() {
     return testClientMembershipEventsInClient_port;
   }
+
   /**
    * Tests notification of events in client process. Bridge clients detect
    * server joins when the client connects to the server. If the server
@@ -752,28 +745,27 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final DistributedMember[] member = new DistributedMember[3];
     final String[] memberId = new String[3];
     final boolean[] isClient = new boolean[3];
-    
+
     // create and register ClientMembershipListener in controller vm...
     ClientMembershipListener listener = new ClientMembershipListener() {
-      public synchronized void memberJoined(ClientMembershipEvent event) {
+      public void memberJoined(ClientMembershipEvent event) {
         System.out.println("[testClientMembershipEventsInClient] memberJoined: " + event);
         fired[JOINED] = true;
         member[JOINED] = event.getMember();
         memberId[JOINED] = event.getMemberId();
         isClient[JOINED] = event.isClient();
-        notifyAll();
       }
-      public synchronized void memberLeft(ClientMembershipEvent event) {
+
+      public void memberLeft(ClientMembershipEvent event) {
         System.out.println("[testClientMembershipEventsInClient] memberLeft: " + event);
-//        fail("Please update testClientMembershipEventsInClient to handle memberLeft for BridgeServer.");
       }
-      public synchronized void memberCrashed(ClientMembershipEvent event) {
+
+      public void memberCrashed(ClientMembershipEvent event) {
         System.out.println("[testClientMembershipEventsInClient] memberCrashed: " + event);
         fired[CRASHED] = true;
         member[CRASHED] = event.getMember();
         memberId[CRASHED] = event.getMemberId();
         isClient[CRASHED] = event.isClient();
-        notifyAll();
       }
     };
     ClientMembership.registerClientMembershipListener(listener);
@@ -783,31 +775,29 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final int[] ports = new int[1];
 
     // create BridgeServer in vm0...
-    vm0.invoke(new CacheSerializableRunnable("Create BridgeServer") {
-      public void run2() throws CacheException {
-        try {
-          System.out.println("[testClientMembershipEventsInClient] Create BridgeServer");
-          getSystem();
-          AttributesFactory factory = new AttributesFactory();
-          factory.setScope(Scope.LOCAL);
-          Region region = createRegion(name, factory.create());
-          assertNotNull(region);
-          assertNotNull(getRootRegion().getSubregion(name));
-          testClientMembershipEventsInClient_port = startBridgeServer(0);
-        }
-        catch(IOException e) {
-          getSystem().getLogWriter().fine(new Exception(e));
-          fail("Failed to start CacheServer on VM1: " + e.getMessage());
-        }
+    vm0.invoke("create Bridge Server", () -> {
+      try {
+        System.out.println("[testClientMembershipEventsInClient] Create BridgeServer");
+        getSystem();
+        AttributesFactory factory = new AttributesFactory();
+        factory.setScope(Scope.LOCAL);
+        Region region = createRegion(name, factory.create());
+        assertNotNull(region);
+        assertNotNull(getRootRegion().getSubregion(name));
+        testClientMembershipEventsInClient_port = startBridgeServer(0);
+      } catch (IOException e) {
+        getSystem().getLogWriter().fine(new Exception(e));
+        fail("Failed to start CacheServer: " + e.getMessage());
       }
     });
-    
+
     // gather details for later creation of ConnectionPool...
     ports[0] = vm0.invoke("getTestClientMembershipEventsInClient_port",
         () -> ClientMembershipDUnitTest.getTestClientMembershipEventsInClient_port());
     assertTrue(ports[0] != 0);
 
-    DistributedMember serverMember = (DistributedMember) vm0.invoke("get distributed member", () -> ClientMembershipDUnitTest.getDistributedMember());
+    DistributedMember serverMember = (DistributedMember) vm0.invoke("get distributed member", () ->
+        ClientMembershipDUnitTest.getDistributedMember());
 
     String serverMemberId = serverMember.toString();
 
@@ -827,17 +817,20 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(member[CRASHED]);
     assertNull(memberId[CRASHED]);
     assertFalse(isClient[CRASHED]);
-    
+
     // sanity check...
     System.out.println("[testClientMembershipEventsInClient] sanity check");
     DistributedMember test = new TestDistributedMember("test");
     InternalClientMembership.notifyJoined(test, SERVER);
-    synchronized(listener) {
-      if (!fired[JOINED] && !fired[CRASHED]) {
-        listener.wait(2000);
-      }
-    }
-    
+
+    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
+        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
+//    synchronized (listener) {
+//      if (!fired[JOINED] && !fired[CRASHED]) {
+//        listener.wait(2000);
+//      }
+//    }
+
     assertTrue(fired[JOINED]);
     assertEquals(test, member[JOINED]);
     assertEquals(test.getId(), memberId[JOINED]);
@@ -851,14 +844,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(memberId[CRASHED]);
     assertFalse(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     // create bridge client in controller vm...
     System.out.println("[testClientMembershipEventsInClient] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
     getSystem(config);
-    
+
     try {
       getCache();
       AttributesFactory factory = new AttributesFactory();
@@ -866,26 +859,28 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(Host.getHost(0)), ports, true, -1, -1, null);
       createRegion(name, factory.create());
       assertNotNull(getRootRegion().getSubregion(name));
-    }
-    catch (CacheException ex) {
+    } catch (CacheException ex) {
       Assert.fail("While creating Region on Edge", ex);
     }
-    synchronized(listener) {
-      if (!fired[JOINED] && !fired[CRASHED]) {
-        listener.wait(60 * 1000);
-      }
-    }
-    
+
+    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
+        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
+//    synchronized(listener) {
+//      if (!fired[JOINED] && !fired[CRASHED]) {
+//        listener.wait(60 * 1000);
+//      }
+//    }
+
     System.out.println("[testClientMembershipEventsInClient] assert client detected server join");
-    
+
     // first check the getCurrentServers() result
-    ClientCache clientCache = (ClientCache)getCache();
+    ClientCache clientCache = (ClientCache) getCache();
     Set<InetSocketAddress> servers = clientCache.getCurrentServers();
     assertTrue(!servers.isEmpty());
     InetSocketAddress serverAddr = servers.iterator().next();
     InetSocketAddress expectedAddr = new InetSocketAddress(serverMember.getHost(), ports[0]);
     assertEquals(expectedAddr, serverAddr);
-    
+
     // now check listener results
     assertTrue(fired[JOINED]);
     assertNotNull(member[JOINED]);
@@ -903,18 +898,17 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertFalse(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
 
-    vm0.invoke(new SerializableRunnable("Stop BridgeServer") {
-      public void run() {
-        System.out.println("[testClientMembershipEventsInClient] Stop BridgeServer");
-        stopBridgeServers(getCache());
-      }
-    });
-    synchronized(listener) {
-      if (!fired[JOINED] && !fired[CRASHED]) {
-        listener.wait(60 * 1000);
-      }
-    }
-    
+    vm0.invoke("Stop BridgeServer", () -> stopBridgeServers(getCache()));
+
+//    synchronized (listener) {
+//      if (!fired[JOINED] && !fired[CRASHED]) {
+//        listener.wait(60 * 1000);
+//      }
+//    }
+
+    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
+        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
+
     System.out.println("[testClientMembershipEventsInClient] assert client detected server departure");
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
@@ -931,26 +925,26 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(serverMemberId, memberId[CRASHED]);
     assertFalse(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     //now test that we redisover the bridge server
-    vm0.invoke(new CacheSerializableRunnable("Recreate BridgeServer") {
-      public void run2() throws CacheException {
-        try {
-          System.out.println("[testClientMembershipEventsInClient] restarting BridgeServer");
-          startBridgeServer(ports[0]);
-        }
-        catch(IOException e) {
-          getSystem().getLogWriter().fine(new Exception(e));
-          fail("Failed to start CacheServer on VM1: " + e.getMessage());
-        }
+    vm0.invoke("Recreate BridgeServer", () -> {
+      try {
+        System.out.println("[testClientMembershipEventsInClient] restarting BridgeServer");
+        startBridgeServer(ports[0]);
+      } catch (IOException e) {
+        getSystem().getLogWriter().fine(new Exception(e));
+        fail("Failed to start CacheServer on VM1: " + e.getMessage());
       }
     });
-    synchronized(listener) {
-      if (!fired[JOINED] && !fired[CRASHED]) {
-        listener.wait(60 * 1000);
-      }
-    }
-    
+//    synchronized (listener) {
+//      if (!fired[JOINED] && !fired[CRASHED]) {
+//        listener.wait(60 * 1000);
+//      }
+//    }
+
+    Awaitility.await().pollInterval(50,TimeUnit.MILLISECONDS).timeout(300,TimeUnit.SECONDS)
+        .pollDelay(50,TimeUnit.MILLISECONDS).until(()->fired[JOINED] || fired[CRASHED]);
+
     System.out.println("[testClientMembershipEventsInClient] assert client detected server recovery");
     assertTrue(fired[JOINED]);
     assertNotNull(member[JOINED]);
@@ -966,7 +960,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(member[CRASHED]);
     assertNull(memberId[CRASHED]);
   }
-  
+
   /**
    * Tests notification of events in server process. Bridge servers detect
    * client joins when the client connects to the server.
@@ -976,34 +970,33 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final DistributedMember[] member = new DistributedMember[3];
     final String[] memberId = new String[3];
     final boolean[] isClient = new boolean[3];
-    
+
     // create and register ClientMembershipListener in controller vm...
     ClientMembershipListener listener = new ClientMembershipListener() {
-      public synchronized void memberJoined(ClientMembershipEvent event) {
+      public void memberJoined(ClientMembershipEvent event) {
         System.out.println("[testClientMembershipEventsInServer] memberJoined: " + event);
         fired[JOINED] = true;
         member[JOINED] = event.getMember();
         memberId[JOINED] = event.getMemberId();
         isClient[JOINED] = event.isClient();
-        notifyAll();
         assertFalse(fired[LEFT] || fired[CRASHED]);
       }
-      public synchronized void memberLeft(ClientMembershipEvent event) {
+
+      public void memberLeft(ClientMembershipEvent event) {
         System.out.println("[testClientMembershipEventsInServer] memberLeft: " + event);
         fired[LEFT] = true;
         member[LEFT] = event.getMember();
         memberId[LEFT] = event.getMemberId();
         isClient[LEFT] = event.isClient();
-        notifyAll();
         assertFalse(fired[JOINED] || fired[CRASHED]);
       }
-      public synchronized void memberCrashed(ClientMembershipEvent event) {
+
+      public void memberCrashed(ClientMembershipEvent event) {
         System.out.println("[testClientMembershipEventsInServer] memberCrashed: " + event);
         fired[CRASHED] = true;
         member[CRASHED] = event.getMember();
         memberId[CRASHED] = event.getMemberId();
         isClient[CRASHED] = event.isClient();
-        notifyAll();
         assertFalse(fired[JOINED] || fired[LEFT]);
       }
     };
@@ -1021,12 +1014,12 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     Region region = createRegion(name, factory.create());
     assertNotNull(region);
     assertNotNull(getRootRegion().getSubregion(name));
-    
+
     ports[0] = startBridgeServer(0);
     assertTrue(ports[0] != 0);
     DistributedMember serverMember = getMemberId();
     String serverMemberId = serverMember.toString();
-    
+
     System.out.println("[testClientMembershipEventsInServer] ports[0]=" + ports[0]);
     System.out.println("[testClientMembershipEventsInServer] serverMemberId=" + serverMemberId);
     System.out.println("[testClientMembershipEventsInServer] serverMember=" + serverMember);
@@ -1043,12 +1036,12 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(member[CRASHED]);
     assertNull(memberId[CRASHED]);
     assertFalse(isClient[CRASHED]);
-    
+
     // sanity check...
     System.out.println("[testClientMembershipEventsInServer] sanity check");
     DistributedMember test = new TestDistributedMember("test");
     InternalClientMembership.notifyJoined(test, CLIENT);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
         listener.wait(2000);
       }
@@ -1066,42 +1059,42 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(memberId[CRASHED]);
     assertFalse(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     final Host host = Host.getHost(0);
     SerializableCallable createConnectionPool =
-    new SerializableCallable("Create connectionPool") {
-      public Object call() {
-        System.out.println("[testClientMembershipEventsInServer] create bridge client");
-        Properties config = new Properties();
-        config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-        config.setProperty(DistributionConfig.LOCATORS_NAME, "");
-        properties = config;
-        DistributedSystem s = getSystem(config);
-        AttributesFactory factory = new AttributesFactory();
-        Pool pool = ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, 2, null);
-        createRegion(name, factory.create());
-        assertNotNull(getRootRegion().getSubregion(name));
-        assertTrue(s == basicGetSystem()); // see geode-1078
-        return getMemberId();
-      }
-    };
+        new SerializableCallable("Create connectionPool") {
+          public Object call() {
+            System.out.println("[testClientMembershipEventsInServer] create bridge client");
+            Properties config = new Properties();
+            config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+            config.setProperty(DistributionConfig.LOCATORS_NAME, "");
+            properties = config;
+            DistributedSystem s = getSystem(config);
+            AttributesFactory factory = new AttributesFactory();
+            Pool pool = ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, 2, null);
+            createRegion(name, factory.create());
+            assertNotNull(getRootRegion().getSubregion(name));
+            assertTrue(s == basicGetSystem()); // see geode-1078
+            return getMemberId();
+          }
+        };
 
     // create bridge client in vm0...
-    DistributedMember clientMember = (DistributedMember)vm0.invoke(createConnectionPool);
+    DistributedMember clientMember = (DistributedMember) vm0.invoke(createConnectionPool);
     String clientMemberId = clientMember.toString();
-                                                
-    synchronized(listener) {
+
+    synchronized (listener) {
       if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
         listener.wait(60000);
       }
     }
-    
+
     System.out.println("[testClientMembershipEventsInServer] assert server detected client join");
     assertTrue(fired[JOINED]);
     assertEquals(member[JOINED] + " should equal " + clientMember,
-      clientMember, member[JOINED]);
+        clientMember, member[JOINED]);
     assertEquals(memberId[JOINED] + " should equal " + clientMemberId,
-      clientMemberId, memberId[JOINED]);
+        clientMemberId, memberId[JOINED]);
     assertTrue(isClient[JOINED]);
     assertFalse(fired[LEFT]);
     assertNull(member[LEFT]);
@@ -1114,26 +1107,26 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     resetArraysForTesting(fired, member, memberId, isClient);
 
     pauseForClientToJoin();
-    
+
     vm0.invoke(new SerializableRunnable("Stop bridge client") {
       public void run() {
         System.out.println("[testClientMembershipEventsInServer] Stop bridge client");
         getRootRegion().getSubregion(name).close();
         Map m = PoolManager.getAll();
         Iterator mit = m.values().iterator();
-        while(mit.hasNext()) {
-          Pool p = (Pool)mit.next();
+        while (mit.hasNext()) {
+          Pool p = (Pool) mit.next();
           p.destroy();
         }
       }
     });
 
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
         listener.wait(60000);
       }
     }
-    
+
     System.out.println("[testClientMembershipEventsInServer] assert server detected client left");
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
@@ -1151,13 +1144,13 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     // reconnect bridge client to test for crashed event
     clientMemberId = vm0.invoke(createConnectionPool).toString();
-                                                
-    synchronized(listener) {
+
+    synchronized (listener) {
       if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
         listener.wait(60000);
       }
     }
-    
+
     System.out.println("[testClientMembershipEventsInServer] assert server detected client re-join");
     assertTrue(fired[JOINED]);
     assertEquals(clientMember, member[JOINED]);
@@ -1172,7 +1165,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(memberId[CRASHED]);
     assertFalse(isClient[CRASHED]);
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     pauseForClientToJoin();
 
     ServerConnection.setForceClientCrashEvent(true);
@@ -1183,19 +1176,19 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           getRootRegion().getSubregion(name).close();
           Map m = PoolManager.getAll();
           Iterator mit = m.values().iterator();
-          while(mit.hasNext()) {
-            Pool p = (Pool)mit.next();
+          while (mit.hasNext()) {
+            Pool p = (Pool) mit.next();
             p.destroy();
           }
         }
       });
-  
-      synchronized(listener) {
+
+      synchronized (listener) {
         if (!fired[JOINED] && !fired[LEFT] && !fired[CRASHED]) {
           listener.wait(60000);
         }
       }
-      
+
       System.out.println("[testClientMembershipEventsInServer] assert server detected client crashed");
       assertFalse(fired[JOINED]);
       assertNull(member[JOINED]);
@@ -1209,12 +1202,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       assertEquals(clientMember, member[CRASHED]);
       assertEquals(clientMemberId, memberId[CRASHED]);
       assertTrue(isClient[CRASHED]);
-    }
-    finally {
+    } finally {
       ServerConnection.setForceClientCrashEvent(false);
     }
   }
-  
+
   /**
    * The joined event fires when the first client handshake is processed.
    * This pauses long enough to allow the rest of the client sockets to
@@ -1227,17 +1219,17 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
   private void pauseForClientToJoin() {
     Wait.pause(2000);
   }
-  
-  /** 
-   * Tests registration and event notification in conjunction with 
-   * disconnecting and reconnecting to DistributedSystem. 
+
+  /**
+   * Tests registration and event notification in conjunction with
+   * disconnecting and reconnecting to DistributedSystem.
    */
   public void testLifecycle() throws Exception {
     final boolean[] fired = new boolean[3];
     final DistributedMember[] member = new DistributedMember[3];
     final String[] memberId = new String[3];
     final boolean[] isClient = new boolean[3];
-    
+
     // create and register ClientMembershipListener in controller vm...
     ClientMembershipListener listener = new ClientMembershipListener() {
       public synchronized void memberJoined(ClientMembershipEvent event) {
@@ -1251,24 +1243,26 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         isClient[JOINED] = event.isClient();
         notifyAll();
       }
+
       public synchronized void memberLeft(ClientMembershipEvent event) {
       }
+
       public synchronized void memberCrashed(ClientMembershipEvent event) {
       }
     };
     ClientMembership.registerClientMembershipListener(listener);
-    
+
     // create loner in controller vm...
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
     properties = config;
     getSystem(config);
-    
+
     // assert that event is fired while connected
     DistributedMember serverJoined = new TestDistributedMember("serverJoined");
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[JOINED]) {
         listener.wait(2000);
       }
@@ -1278,13 +1272,12 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(serverJoined.getId(), memberId[JOINED]);
     assertFalse(isClient[JOINED]);
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     // assert that event is NOT fired while disconnected
     disconnectFromDS();
-    
 
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized(listener) {
+    synchronized (listener) {
       listener.wait(20);
     }
     assertFalse(fired[JOINED]);
@@ -1292,14 +1285,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertNull(memberId[JOINED]);
     assertFalse(isClient[JOINED]);
     resetArraysForTesting(fired, member, memberId, isClient);
-    
+
     // assert that event is fired again after reconnecting
     properties = config;
     InternalDistributedSystem sys = getSystem(config);
     assertTrue(sys.isConnected());
 
     InternalClientMembership.notifyJoined(serverJoined, SERVER);
-    synchronized(listener) {
+    synchronized (listener) {
       if (!fired[JOINED]) {
         listener.wait(2000);
       }
@@ -1309,15 +1302,15 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(serverJoined.getId(), memberId[JOINED]);
     assertFalse(isClient[JOINED]);
   }
-  
+
   /**
    * Starts up server in controller vm and 4 clients, then calls and tests
-   * ClientMembership.getConnectedClients(). 
+   * ClientMembership.getConnectedClients().
    */
   public void testGetConnectedClients() throws Exception {
     final String name = this.getUniqueName();
     final int[] ports = new int[1];
-    
+
     IgnoredException.addIgnoredException("ConnectException");
 
     // create BridgeServer in controller vm...
@@ -1328,7 +1321,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     Region region = createRegion(name, factory.create());
     assertNotNull(region);
     assertNotNull(getRootRegion().getSubregion(name));
-    
+
     ports[0] = startBridgeServer(0);
     assertTrue(ports[0] != 0);
     String serverMemberId = getSystem().getDistributedMember().toString();
@@ -1338,39 +1331,40 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     final Host host = Host.getHost(0);
     SerializableCallable createPool =
-    new SerializableCallable("Create connection pool") {
-      public Object call() {
-        System.out.println("[testGetConnectedClients] create bridge client");
-        properties = new Properties();
-        properties.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-        properties.setProperty(DistributionConfig.LOCATORS_NAME, "");
-        getSystem(properties);
-        AttributesFactory factory = new AttributesFactory();
-        factory.setScope(Scope.LOCAL);
-        Pool p = ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, -1, null);
-        createRegion(name, factory.create());
-        assertNotNull(getRootRegion().getSubregion(name));
-        assertTrue(p.getServers().size() > 0);
-        return getMemberId();
-      }
-    };
+        new SerializableCallable("Create connection pool") {
+          public Object call() {
+            System.out.println("[testGetConnectedClients] create bridge client");
+            properties = new Properties();
+            properties.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+            properties.setProperty(DistributionConfig.LOCATORS_NAME, "");
+            getSystem(properties);
+            AttributesFactory factory = new AttributesFactory();
+            factory.setScope(Scope.LOCAL);
+            Pool p = ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, -1, null);
+            createRegion(name, factory.create());
+            assertNotNull(getRootRegion().getSubregion(name));
+            assertTrue(p.getServers().size() > 0);
+            return getMemberId();
+          }
+        };
 
     // create bridge client in vm0...
     final String[] clientMemberIdArray = new String[host.getVMCount()];
-    
-    for (int i = 0; i < host.getVMCount(); i++) { 
+
+    for (int i = 0; i < host.getVMCount(); i++) {
       final VM vm = Host.getHost(0).getVM(i);
-      System.out.println("creating pool in vm_"+i);
+      System.out.println("creating pool in vm_" + i);
       clientMemberIdArray[i] = vm.invoke(createPool).toString();
     }
     Collection clientMemberIds = Arrays.asList(clientMemberIdArray);
-                                                
+
     {
       final int expectedClientCount = clientMemberIds.size();
       WaitCriterion wc = new WaitCriterion() {
         public String description() {
           return "wait for clients";
         }
+
         public boolean done() {
           Map connectedClients = InternalClientMembership.getConnectedClients(false);
           if (connectedClients == null) {
@@ -1384,32 +1378,32 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       };
       Wait.waitForCriterion(wc, 30000, 100, false);
     }
-    
+
     Map connectedClients = InternalClientMembership.getConnectedClients(false);
     assertNotNull(connectedClients);
     assertEquals(clientMemberIds.size(), connectedClients.size());
     System.out.println("connectedClients: " + connectedClients + "; clientMemberIds: " + clientMemberIds);
-    for (Iterator iter = connectedClients.keySet().iterator(); iter.hasNext();) {
-      String connectedClient = (String)iter.next();
+    for (Iterator iter = connectedClients.keySet().iterator(); iter.hasNext(); ) {
+      String connectedClient = (String) iter.next();
       System.out.println("[testGetConnectedClients] checking for client " + connectedClient);
       assertTrue(clientMemberIds.contains(connectedClient));
-      Object[] result = (Object[])connectedClients.get(connectedClient);
-      System.out.println("[testGetConnectedClients] result: " + 
-                          (result==null? "none"
-                              : String.valueOf(result[0])+"; connections="+result[1]));
+      Object[] result = (Object[]) connectedClients.get(connectedClient);
+      System.out.println("[testGetConnectedClients] result: " +
+          (result == null ? "none"
+              : String.valueOf(result[0]) + "; connections=" + result[1]));
     }
   }
 
   /**
    * Starts up 4 server and the controller vm as a client, then calls and tests
-   * ClientMembership.getConnectedServers(). 
+   * ClientMembership.getConnectedServers().
    */
   public void testGetConnectedServers() throws Exception {
     final Host host = Host.getHost(0);
     final String name = this.getUniqueName();
     final int[] ports = new int[host.getVMCount()];
-    
-    for (int i = 0; i < host.getVMCount(); i++) { 
+
+    for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
       vm.invoke(new CacheSerializableRunnable("Create bridge server") {
@@ -1419,32 +1413,31 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           getSystem();
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
-          Region region = createRegion(name+"_"+whichVM, factory.create());
+          Region region = createRegion(name + "_" + whichVM, factory.create());
           assertNotNull(region);
-          assertNotNull(getRootRegion().getSubregion(name+"_"+whichVM));
+          assertNotNull(getRootRegion().getSubregion(name + "_" + whichVM));
           region.put("KEY-1", "VAL-1");
-          
+
           try {
             testGetConnectedServers_port = startBridgeServer(0);
-          }
-          catch (IOException e) {
+          } catch (IOException e) {
             com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
             fail("startBridgeServer threw IOException " + e.getMessage());
           }
-          
+
           assertTrue(testGetConnectedServers_port != 0);
-      
-          System.out.println("[testGetConnectedServers] port=" + 
-            ports[whichVM]);
-          System.out.println("[testGetConnectedServers] serverMemberId=" + 
-            getDistributedMember());
+
+          System.out.println("[testGetConnectedServers] port=" +
+              ports[whichVM]);
+          System.out.println("[testGetConnectedServers] serverMemberId=" +
+              getDistributedMember());
         }
       });
       ports[whichVM] = vm.invoke("getTestGetConnectedServers_port",
           () -> ClientMembershipDUnitTest.getTestGetConnectedServers_port());
       assertTrue(ports[whichVM] != 0);
     }
-    
+
     System.out.println("[testGetConnectedServers] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
@@ -1452,17 +1445,17 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     properties = config;
     getSystem(config);
     getCache();
-    
+
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
 
     for (int i = 0; i < ports.length; i++) {
-      System.out.println("[testGetConnectedServers] creating connectionpool for " + 
-        NetworkUtils.getServerHostName(host) + " " + ports[i]);
+      System.out.println("[testGetConnectedServers] creating connectionpool for " +
+          NetworkUtils.getServerHostName(host) + " " + ports[i]);
       int[] thisServerPorts = new int[] { ports[i] };
-      ClientServerTestCase.configureConnectionPoolWithName(factory, NetworkUtils.getServerHostName(host), thisServerPorts, false, -1, -1, null,"pooly"+i);
-      Region region = createRegion(name+"_"+i, factory.create());
-      assertNotNull(getRootRegion().getSubregion(name+"_"+i));
+      ClientServerTestCase.configureConnectionPoolWithName(factory, NetworkUtils.getServerHostName(host), thisServerPorts, false, -1, -1, null, "pooly" + i);
+      Region region = createRegion(name + "_" + i, factory.create());
+      assertNotNull(getRootRegion().getSubregion(name + "_" + i));
       region.get("KEY-1");
     }
 
@@ -1472,6 +1465,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         public String description() {
           return "wait for pools and servers";
         }
+
         public boolean done() {
           if (PoolManager.getAll().size() != expectedVMCount) {
             return false;
@@ -1491,25 +1485,25 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     {
       assertEquals(host.getVMCount(), PoolManager.getAll().size());
-      
+
     }
-    
+
     Map connectedServers = InternalClientMembership.getConnectedServers();
     assertNotNull(connectedServers);
     assertEquals(host.getVMCount(), connectedServers.size());
-    for (Iterator iter = connectedServers.keySet().iterator(); iter.hasNext();) {
+    for (Iterator iter = connectedServers.keySet().iterator(); iter.hasNext(); ) {
       String connectedServer = (String) iter.next();
-      System.out.println("[testGetConnectedServers]  value for connectedServer: " + 
-                          connectedServers.get(connectedServer));
+      System.out.println("[testGetConnectedServers]  value for connectedServer: " +
+          connectedServers.get(connectedServer));
     }
   }
 
   protected static int testGetConnectedServers_port;
+
   private static int getTestGetConnectedServers_port() {
     return testGetConnectedServers_port;
   }
 
-  
   public Properties getDistributedSystemProperties() {
     if (properties == null) {
       properties = new Properties();
@@ -1525,8 +1519,8 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final Host host = Host.getHost(0);
     final String name = this.getUniqueName();
     final int[] ports = new int[host.getVMCount()];
-    
-    for (int i = 0; i < host.getVMCount(); i++) { 
+
+    for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
       vm.invoke(new CacheSerializableRunnable("Create bridge server") {
@@ -1539,28 +1533,27 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           assertNotNull(region);
           assertNotNull(getRootRegion().getSubregion(name));
           region.put("KEY-1", "VAL-1");
-          
+
           try {
             testGetNotifiedClients_port = startBridgeServer(0);
-          }
-          catch (IOException e) {
+          } catch (IOException e) {
             com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
             fail("startBridgeServer threw IOException " + e.getMessage());
           }
-          
+
           assertTrue(testGetNotifiedClients_port != 0);
-      
-          System.out.println("[testGetNotifiedClients] port=" + 
-            ports[whichVM]);
-          System.out.println("[testGetNotifiedClients] serverMemberId=" + 
-            getMemberId());
+
+          System.out.println("[testGetNotifiedClients] port=" +
+              ports[whichVM]);
+          System.out.println("[testGetNotifiedClients] serverMemberId=" +
+              getMemberId());
         }
       });
       ports[whichVM] = vm.invoke("getTestGetNotifiedClients_port",
           () -> ClientMembershipDUnitTest.getTestGetNotifiedClients_port());
       assertTrue(ports[whichVM] != 0);
     }
-    
+
     System.out.println("[testGetNotifiedClients] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
@@ -1568,7 +1561,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     properties = config;
     getSystem();
     getCache();
-    
+
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
 
@@ -1580,14 +1573,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     region.get("KEY-1");
 
     final String clientMemberId = getMemberId().toString();
-    
+
     pauseForClientToJoin();
-    
+
     // assertions go here
     int[] clientCounts = new int[host.getVMCount()];
-    
+
     // only one server vm will have that client for updating
-    for (int i = 0; i < host.getVMCount(); i++) { 
+    for (int i = 0; i < host.getVMCount(); i++) {
       final int whichVM = i;
       final VM vm = Host.getHost(0).getVM(i);
       vm.invoke(new CacheSerializableRunnable("Create bridge server") {
@@ -1604,7 +1597,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       clientCounts[whichVM] = vm.invoke("getTestGetNotifiedClients_clientCount",
           () -> ClientMembershipDUnitTest.getTestGetNotifiedClients_clientCount());
     }
-    
+
     // only one server should have a notifier for this client...
     int totalClientCounts = 0;
     for (int i = 0; i < clientCounts.length; i++) {
@@ -1613,20 +1606,24 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // this assertion fails because the count is 4
     //assertEquals(1, totalClientCounts);
   }
+
   protected static int testGetNotifiedClients_port;
+
   private static int getTestGetNotifiedClients_port() {
     return testGetNotifiedClients_port;
   }
+
   protected static int testGetNotifiedClients_clientCount;
+
   private static int getTestGetNotifiedClients_clientCount() {
     return testGetNotifiedClients_clientCount;
   }
 
   // Simple DistributedMember implementation
   static final class TestDistributedMember implements DistributedMember {
-    
+
     private String host;
-    
+
     public TestDistributedMember(String host) {
       this.host = host;
     }
@@ -1650,29 +1647,29 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     public String getId() {
       return this.host;
     }
-    
+
     public int compareTo(DistributedMember o) {
       if ((o == null) || !(o instanceof TestDistributedMember)) {
         throw new InternalGemFireException("Invalidly comparing TestDistributedMember to " + o);
       }
-      
+
       TestDistributedMember tds = (TestDistributedMember) o;
       return getHost().compareTo(tds.getHost());
     }
-    
+
     public boolean equals(Object obj) {
       if ((obj == null) || !(obj instanceof TestDistributedMember)) {
         return false;
       }
-      return compareTo((TestDistributedMember)obj) == 0;
+      return compareTo((TestDistributedMember) obj) == 0;
     }
-    
+
     public int hashCode() {
       return getHost().hashCode();
     }
-    
+
     public DurableClientAttributes getDurableClientAttributes() {
-      
+
       return null;
     }
 


[44/63] [abbrv] incubator-geode git commit: Merge branch 'feature/GEODE-17-2' into develop

Posted by kl...@apache.org.
Merge branch 'feature/GEODE-17-2' into develop


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/72be65ff
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/72be65ff
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/72be65ff

Branch: refs/heads/feature/GEODE-1276
Commit: 72be65fffe0378ce203ba8d45ed63f17610c2403
Parents: b2f8e59 53760ec
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 12:21:45 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 12:21:45 2016 -0700

----------------------------------------------------------------------
 geode-assembly/build.gradle                     |    1 +
 .../LauncherLifecycleCommandsDUnitTest.java     |    3 +
 .../SharedConfigurationEndToEndDUnitTest.java   |    1 -
 .../src/test/resources/expected_jars.txt        |    1 +
 geode-core/build.gradle                         |    4 +-
 .../cache/operations/OperationContext.java      |  556 ++++------
 .../internal/AbstractDistributionConfig.java    |   48 +-
 .../distributed/internal/ConfigAttribute.java   |    1 -
 .../internal/ConfigAttributeChecker.java        |    1 -
 .../internal/ConfigAttributeDesc.java           |    3 -
 .../internal/ConfigAttributeGetter.java         |    3 -
 .../internal/ConfigAttributeSetter.java         |    3 -
 .../internal/DistributionConfig.java            |    7 +
 .../internal/DistributionConfigImpl.java        |   38 +-
 .../gemfire/internal/AbstractConfig.java        |   28 +-
 .../internal/security/AuthorizeRequest.java     |    5 -
 .../internal/security/GeodeSecurityUtil.java    |  167 +++
 .../security/shiro/CustomAuthRealm.java         |  176 +++
 .../security/shiro/JMXShiroAuthenticator.java   |   69 ++
 .../management/AsyncEventQueueMXBean.java       |    5 +
 .../gemfire/management/CacheServerMXBean.java   |   12 +-
 .../gemfire/management/DiskStoreMXBean.java     |    9 +
 .../DistributedLockServiceMXBean.java           |    8 +-
 .../management/DistributedRegionMXBean.java     |    5 +
 .../management/DistributedSystemMXBean.java     |   30 +-
 .../management/GatewayReceiverMXBean.java       |    8 +-
 .../gemfire/management/GatewaySenderMXBean.java |   12 +-
 .../gemfire/management/LocatorMXBean.java       |    5 +
 .../gemfire/management/LockServiceMXBean.java   |   10 +-
 .../gemfire/management/ManagerMXBean.java       |   12 +-
 .../gemfire/management/MemberMXBean.java        |   15 +-
 .../gemfire/management/RegionMXBean.java        |    4 +
 .../management/internal/ManagementAgent.java    |  126 ++-
 .../internal/SystemManagementService.java       |   36 +-
 .../internal/beans/GatewaySenderMBean.java      |    8 +-
 .../internal/beans/MemberMBeanBridge.java       |   17 +-
 .../management/internal/cli/CommandManager.java |    3 -
 .../internal/cli/commands/ClientCommands.java   |   50 +-
 .../internal/cli/commands/ConfigCommands.java   |   44 +-
 .../CreateAlterDestroyRegionCommands.java       |   42 +-
 .../internal/cli/commands/DataCommands.java     |   36 +-
 .../internal/cli/commands/DeployCommands.java   |   21 +-
 .../cli/commands/DiskStoreCommands.java         |   64 +-
 .../cli/commands/DurableClientCommands.java     |   45 +-
 ...ExportImportSharedConfigurationCommands.java |   23 +-
 .../internal/cli/commands/FunctionCommands.java |   33 +-
 .../internal/cli/commands/GfshHelpCommands.java |   13 +-
 .../internal/cli/commands/IndexCommands.java    |   39 +-
 .../cli/commands/LauncherLifecycleCommands.java |   81 +-
 .../internal/cli/commands/MemberCommands.java   |   27 +-
 .../cli/commands/MiscellaneousCommands.java     |   99 +-
 .../internal/cli/commands/PDXCommands.java      |   33 +-
 .../internal/cli/commands/QueueCommands.java    |   25 +-
 .../internal/cli/commands/RegionCommands.java   |   17 +-
 .../internal/cli/commands/ShellCommands.java    |  106 +-
 .../internal/cli/commands/StatusCommands.java   |   24 +-
 .../internal/cli/commands/WanCommands.java      |  336 +++---
 .../internal/cli/remote/CommandProcessor.java   |   24 +-
 .../internal/cli/result/ErrorResultData.java    |   10 +-
 .../internal/cli/result/ResultBuilder.java      |    7 +-
 .../internal/cli/shell/JmxOperationInvoker.java |   70 +-
 .../internal/security/AccessControl.java        |   51 -
 .../internal/security/AccessControlContext.java |   37 -
 .../internal/security/AccessControlMBean.java   |   41 +
 .../internal/security/AccessControlMXBean.java  |    6 +-
 .../internal/security/CLIOperationContext.java  |  138 ---
 .../internal/security/JMXOperationContext.java  |  177 ---
 .../internal/security/JSONAuthorization.java    |  308 ------
 .../internal/security/MBeanServerWrapper.java   |  168 ++-
 .../security/ManagementInterceptor.java         |  271 -----
 .../management/internal/security/Resource.java  |   26 -
 .../internal/security/ResourceConstants.java    |   91 +-
 .../internal/security/ResourceOperation.java    |   13 +-
 .../security/ResourceOperationContext.java      |  252 ++---
 .../controllers/AbstractCommandsController.java |   82 +-
 .../controllers/ConfigCommandsController.java   |   20 +-
 .../web/controllers/DataCommandsController.java |   26 +-
 .../DiskStoreCommandsController.java            |   14 +-
 .../controllers/FunctionCommandsController.java |    9 +-
 .../MiscellaneousCommandsController.java        |    8 +-
 .../web/controllers/WanCommandsController.java  |    2 +-
 .../EnvironmentVariablesHandlerInterceptor.java |   92 --
 .../support/LoginHandlerInterceptor.java        |  122 ++
 .../web/http/support/SimpleHttpRequester.java   |  105 +-
 .../web/shell/AbstractHttpOperationInvoker.java |   28 +-
 .../web/shell/RestHttpOperationInvoker.java     |   26 +-
 .../web/shell/SimpleHttpOperationInvoker.java   |   11 +-
 .../gemfire/security/AccessControl.java         |   16 +-
 .../gemfire/security/Authenticator.java         |   18 +-
 .../internal/DistributionConfigJUnitTest.java   |   74 +-
 .../gemfire/internal/ConfigSourceJUnitTest.java |    8 +-
 .../extension/mock/MockExtensionCommands.java   |   23 +-
 .../internal/cli/CommandManagerJUnitTest.java   |   45 +-
 .../internal/cli/GfshParserJUnitTest.java       |   45 +-
 .../management/internal/cli/HeadlessGfsh.java   |    2 +-
 .../cli/commands/CliCommandTestBase.java        |  134 ++-
 .../cli/commands/ConfigCommandsDUnitTest.java   |   26 +-
 ...eateAlterDestroyRegionCommandsDUnitTest.java |   34 +-
 .../cli/commands/DeployCommandsDUnitTest.java   |   14 +-
 .../commands/DiskStoreCommandsDUnitTest.java    |   30 +-
 .../commands/DiskStoreCommandsJUnitTest.java    |    1 +
 .../cli/commands/FunctionCommandsDUnitTest.java |   40 +-
 .../commands/GemfireDataCommandsDUnitTest.java  |   28 +-
 ...WithCacheLoaderDuringCacheMissDUnitTest.java |   15 +-
 .../cli/commands/IndexCommandsDUnitTest.java    |   21 +-
 ...stAndDescribeDiskStoreCommandsDUnitTest.java |   14 +-
 .../ListAndDescribeRegionDUnitTest.java         |   13 +-
 .../cli/commands/ListIndexCommandDUnitTest.java |   14 +-
 .../MiscellaneousCommandsDUnitTest.java         |   40 +-
 ...laneousCommandsExportLogsPart1DUnitTest.java |   15 +-
 ...laneousCommandsExportLogsPart2DUnitTest.java |   16 +-
 ...laneousCommandsExportLogsPart3DUnitTest.java |   24 +-
 ...laneousCommandsExportLogsPart4DUnitTest.java |   15 +-
 .../cli/commands/QueueCommandsDUnitTest.java    |   16 +-
 .../SharedConfigurationCommandsDUnitTest.java   |   16 +-
 .../cli/commands/ShellCommandsDUnitTest.java    |   12 +-
 .../cli/commands/ShowMetricsDUnitTest.java      |   16 +-
 .../cli/commands/ShowStackTraceDUnitTest.java   |   14 +-
 .../cli/commands/UserCommandsDUnitTest.java     |   16 +-
 .../shell/GfshExecutionStrategyJUnitTest.java   |   36 +-
 .../cli/shell/GfshHistoryJUnitTest.java         |    2 -
 .../security/AccessControlMBeanJUnitTest.java   |   58 +
 ...rDistributedSystemMXBeanIntegrationTest.java |   50 -
 ...horizeOperationForMBeansIntegrationTest.java |  323 ------
 ...erationForRegionCommandsIntegrationTest.java |  136 ---
 ...CacheServerMBeanAuthenticationJUnitTest.java |   58 +
 .../CacheServerMBeanAuthorizationJUnitTest.java |   90 ++
 .../CacheServerMBeanShiroJUnitTest.java         |   93 ++
 .../security/CliCommandsSecurityTest.java       |   83 ++
 .../security/DataCommandsSecurityTest.java      |   83 ++
 .../DiskStoreMXBeanSecurityJUnitTest.java       |   83 ++
 .../GatewayReceiverMBeanSecurityTest.java       |   90 ++
 .../GatewaySenderMBeanSecurityTest.java         |  105 ++
 .../GeodeSecurityUtilCustomRealmJUnitTest.java  |   52 +
 .../GeodeSecurityUtilWithIniFileJUnitTest.java  |  147 +++
 .../security/GfshCommandsSecurityTest.java      |  165 +++
 .../security/GfshShellConnectionRule.java       |  109 ++
 .../security/JMXConnectionConfiguration.java    |   33 +
 .../internal/security/JSONAuthorization.java    |  204 ++++
 ...JSONAuthorizationDetailsIntegrationTest.java |  163 ---
 .../JsonAuthorizationCacheStartRule.java        |   83 ++
 .../LockServiceMBeanAuthorizationJUnitTest.java |   90 ++
 .../security/MBeanSecurityJUnitTest.java        |  117 ++
 .../security/MBeanServerConnectionRule.java     |  130 +++
 .../ManagerMBeanAuthorizationJUnitTest.java     |   78 ++
 .../security/MemberMBeanSecurityJUnitTest.java  |  110 ++
 ...tionCodesForDataCommandsIntegrationTest.java |  101 --
 ...tionCodesForDistributedSystemMXBeanTest.java |   76 --
 .../ResourceOperationContextJUnitTest.java      |   88 ++
 .../internal/security/ShiroCacheStartRule.java  |   63 ++
 .../internal/security/TestCommand.java          |  227 ++++
 .../ReadOpFileAccessControllerJUnitTest.java    |   19 +-
 .../security/ClientAuthorizationDUnitTest.java  |    7 +-
 .../security/ClientAuthorizationTestCase.java   |   20 +-
 .../DeltaClientPostAuthorizationDUnitTest.java  |   21 +-
 .../security/templates/XmlAuthorization.java    |    2 +-
 .../com/gemstone/gemfire/test/dunit/VM.java     |   62 +-
 .../gemstone/gemfire/util/test/TestUtil.java    |    8 +-
 .../gemfire/codeAnalysis/excludedClasses.txt    |   28 +-
 .../codeAnalysis/sanctionedSerializables.txt    |    2 +
 .../management/internal/security/auth1.json     |   28 +-
 .../management/internal/security/auth3.json     |   55 +-
 .../internal/security/cacheServer.json          |  188 ++++
 .../management/internal/security/shiro-ini.json |   87 ++
 .../internal/security/testInheritRole.json      |   40 -
 .../security/testSimpleUserAndRole.json         |   14 -
 .../testUserAndRoleRegionServerGroup.json       |   16 -
 .../internal/security/testUserMultipleRole.json |   20 -
 geode-core/src/test/resources/shiro.ini         |   40 +
 .../cli/commands/ClientCommandsDUnitTest.java   |   53 +-
 .../DurableClientCommandsDUnitTest.java         |    2 +-
 .../junit/rules/DescribedExternalResource.java  |   63 ++
 .../LuceneFunctionReadPathDUnitTest.java        |   17 +-
 geode-pulse/build.gradle                        |    1 +
 .../tools/pulse/internal/PulseAppListener.java  |   68 +-
 .../internal/controllers/PulseController.java   |   28 +-
 .../tools/pulse/internal/data/Cluster.java      |   14 +-
 .../pulse/internal/data/JMXDataUpdater.java     |  217 ++--
 .../pulse/internal/data/PulseConstants.java     |   14 +
 .../tools/pulse/internal/data/Repository.java   |   54 +-
 .../pulse/internal/log/PulseLogWriter.java      |    4 -
 .../security/GemFireAuthentication.java         |   91 ++
 .../security/GemFireAuthenticationProvider.java |   80 ++
 .../pulse/internal/security/LogoutHandler.java  |   55 +
 geode-pulse/src/main/resources/pulse.properties |   12 +-
 .../src/main/webapp/WEB-INF/spring-security.xml |   59 +-
 .../controllers/PulseControllerJUnitTest.java   |   56 +-
 .../tools/pulse/testbed/driver/PulseUITest.java |    2 +-
 .../pulse/tests/DataBrowserResultLoader.java    |   14 +-
 .../tools/pulse/tests/PulseAbstractTest.java    | 1047 ++++++++++++++++++
 .../tools/pulse/tests/PulseAuthTest.java        |   33 +
 .../tools/pulse/tests/PulseAutomatedTest.java   |   17 +-
 .../tools/pulse/tests/PulseBaseTest.java        |    4 +-
 .../tools/pulse/tests/PulseNoAuthTest.java      |   33 +
 .../gemfire/tools/pulse/tests/PulseTest.java    | 1039 -----------------
 .../gemfire/tools/pulse/tests/Region.java       |    2 +-
 .../gemfire/tools/pulse/tests/Server.java       |  156 ++-
 geode-pulse/src/test/resources/pulse-auth.json  |   21 +
 .../wan/wancommand/WANCommandTestBase.java      |  104 +-
 ...anCommandCreateGatewayReceiverDUnitTest.java |  174 +--
 .../WanCommandCreateGatewaySenderDUnitTest.java |  192 ++--
 ...WanCommandGatewayReceiverStartDUnitTest.java |  120 +-
 .../WanCommandGatewayReceiverStopDUnitTest.java |  120 +-
 .../WanCommandGatewaySenderStartDUnitTest.java  |  177 ++-
 .../WanCommandGatewaySenderStopDUnitTest.java   |  158 +--
 .../wan/wancommand/WanCommandListDUnitTest.java |  135 ++-
 .../WanCommandPauseResumeDUnitTest.java         |  370 +++----
 .../wancommand/WanCommandStatusDUnitTest.java   |  174 +--
 .../src/main/webapp/WEB-INF/gemfire-servlet.xml |    2 +-
 ...entVariablesHandlerInterceptorJUnitTest.java |  267 -----
 .../LoginHandlerInterceptorJUnitTest.java       |  274 +++++
 gradle/dependency-versions.properties           |    1 +
 212 files changed, 8790 insertions(+), 6365 deletions(-)
----------------------------------------------------------------------



[03/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java
deleted file mode 100644
index 52470d0..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedReader.SerializedComparator;
-
-/**
- * Delegates object comparisons to one or more embedded comparators.
- *  
- */
-public interface DelegatingSerializedComparator extends SerializedComparator {
-  /**
-   * Injects the embedded comparators.
-   * @param comparators the comparators for delegation
-   */
-  void setComparators(SerializedComparator[] comparators);
-  
-  /**
-   * Returns the embedded comparators.
-   * @return the comparators
-   */
-  SerializedComparator[] getComparators();
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java
deleted file mode 100644
index fdf3852..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import static com.gemstone.gemfire.distributed.internal.DistributionStats.getStatTime;
-
-import com.gemstone.gemfire.StatisticDescriptor;
-import com.gemstone.gemfire.Statistics;
-import com.gemstone.gemfire.StatisticsFactory;
-import com.gemstone.gemfire.StatisticsType;
-import com.gemstone.gemfire.StatisticsTypeFactory;
-import com.gemstone.gemfire.internal.DummyStatisticsFactory;
-import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
-
-public class HFileStoreStatistics {
-  private final Statistics stats;
-  
-  private final CacheOperation blockCache;
-  
-  public HFileStoreStatistics(String typeName, String name) {
-    this(new DummyStatisticsFactory(), typeName, name);
-  }
-  
-  public HFileStoreStatistics(StatisticsFactory factory, String typeName, String name) {
-    StatisticsTypeFactory tf = StatisticsTypeFactoryImpl.singleton();
-    
-    StatisticDescriptor bcMisses = tf.createLongCounter("blockCacheMisses", "The total number of block cache misses", "misses");
-    StatisticDescriptor bcHits = tf.createLongCounter("blockCacheHits", "The total number of block cache hits", "hits");
-    StatisticDescriptor bcCached = tf.createLongGauge("blocksCached", "The current number of cached blocks", "blocks");
-    StatisticDescriptor bcBytesCached = tf.createLongGauge("blockBytesCached", "The current number of bytes cached", "bytes");
-    StatisticDescriptor bcBytesEvicted = tf.createLongCounter("blockBytesEvicted", "The total number of bytes cached", "bytes");
-
-    
-    StatisticsType type = tf.createType(typeName, 
-        "Statistics about structured I/O operations for a region", new StatisticDescriptor[] {
-        bcMisses, bcHits, bcCached, bcBytesCached, bcBytesEvicted
-    });
-
-    blockCache = new CacheOperation(bcMisses.getId(), bcHits.getId(), bcCached.getId(), bcBytesCached.getId(), bcBytesEvicted.getId());
-
-
-    stats = factory.createAtomicStatistics(type, name);
-  }
-
-  public void close() {
-    stats.close();
-  }
-  
-  public Statistics getStats() {
-    return stats;
-  }
-  
-  public CacheOperation getBlockCache() {
-    return blockCache;
-  }
-  
-  
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("blockCache = {").append(blockCache).append("}\n");
-    
-    return sb.toString();
-  }
-  
-  public class TimedOperation {
-    protected final int countId;
-    protected final int inProgressId;
-    protected final int timeId;
-    private final int errorsId;
-    
-    public TimedOperation(int count, int inProgress, int time, int errors) {
-      this.countId = count;
-      this.inProgressId = inProgress;
-      this.timeId = time;
-      this.errorsId = errors;
-    }
-    
-    public long begin() {
-      stats.incLong(inProgressId, 1);
-      return getStatTime();
-    }
-    
-    public long end(long start) {
-      stats.incLong(inProgressId, -1);
-      stats.incLong(countId, 1);
-      stats.incLong(timeId, getStatTime() - start);
-      return getStatTime();
-    }
-    
-    public void error(long start) {
-      end(start);
-      stats.incLong(errorsId, 1);
-    }
-    
-    public long getCount() {
-      return stats.getLong(countId);
-    }
-    
-    public long getInProgress() {
-      return stats.getLong(inProgressId);
-    }
-    
-    public long getTime() {
-      return stats.getLong(timeId);
-    }
-    
-    public long getErrors() {
-      return stats.getLong(errorsId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("count=").append(getCount());
-      sb.append(";inProgress=").append(getInProgress());
-      sb.append(";errors=").append(getErrors());
-      sb.append(";time=").append(getTime());
-      
-      return sb.toString();
-    }
-  }
-  
-  public class CacheOperation {
-    private final int missesId;
-    private final int hitsId;
-    private final int cachedId;
-    private final int bytesCachedId;
-    private final int bytesEvictedId;
-    
-    public CacheOperation(int missesId, int hitsId, int cachedId, 
-        int bytesCachedId, int bytesEvictedId) {
-      this.missesId = missesId;
-      this.hitsId = hitsId;
-      this.cachedId = cachedId;
-      this.bytesCachedId = bytesCachedId;
-      this.bytesEvictedId = bytesEvictedId;
-    }
-    
-    public void store(long bytes) {
-      stats.incLong(cachedId, 1);
-      stats.incLong(bytesCachedId, bytes);
-    }
-    
-    public void evict(long bytes) {
-      stats.incLong(cachedId, -1);
-      stats.incLong(bytesCachedId, -bytes);
-      stats.incLong(bytesEvictedId, bytes);
-    }
-    
-    public void hit() {
-      stats.incLong(hitsId, 1);
-    }
-    
-    public void miss() {
-      stats.incLong(missesId, 1);
-    }
-    
-    public long getMisses() {
-      return stats.getLong(missesId);
-    }
-    
-    public long getHits() {
-      return stats.getLong(hitsId);
-    }
-    
-    public long getCached() {
-      return stats.getLong(cachedId);
-    }
-    
-    public long getBytesCached() {
-      return stats.getLong(bytesCachedId);
-    }
-    
-    public long getBytesEvicted() {
-      return stats.getLong(bytesEvictedId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("misses=").append(getMisses());
-      sb.append(";hits=").append(getHits());
-      sb.append(";cached=").append(getCached());
-      sb.append(";bytesCached=").append(getBytesCached());
-      sb.append(";bytesEvicted=").append(getBytesEvicted());
-      
-      return sb.toString();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java
deleted file mode 100644
index df7e1ac..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import java.util.Iterator;
-
-/**
- * Provides an {@link Iterator} view over a collection of keys and values.  The
- * implementor must provide access to the current key/value as well as a means
- * to move to the next pair.
- * 
- *
- * @param <K> the key type
- * @param <V> the value type
- */
-public interface KeyValueIterator<K, V> extends Iterator<K> {
-  /**
-   * Returns the key at the current position.
-   * @return the key
-   */
-  public K key();
-  
-  /**
-   * Returns the value at the current position.
-   * @return the value
-   */
-  public abstract V value();
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java
deleted file mode 100644
index 35baafb..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java
+++ /dev/null
@@ -1,505 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import static com.gemstone.gemfire.distributed.internal.DistributionStats.getStatTime;
-
-import com.gemstone.gemfire.StatisticDescriptor;
-import com.gemstone.gemfire.Statistics;
-import com.gemstone.gemfire.StatisticsFactory;
-import com.gemstone.gemfire.StatisticsType;
-import com.gemstone.gemfire.StatisticsTypeFactory;
-import com.gemstone.gemfire.internal.DummyStatisticsFactory;
-import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
-
-public class SortedOplogStatistics {
-  private final Statistics stats;
-  
-  private final IOOperation read;
-  private final ScanOperation scan;
-  private final IOOperation write;
-  private final IOOperation put;
-  private final IOOperation flush;
-  private final IOOperation minorCompaction;
-  private final IOOperation majorCompaction;
-  private final BloomOperation bloom;
-  private final TimedOperation clear;
-  private final TimedOperation destroy;
-  
-  private final IOOperation blockRead;
-  private final CacheOperation blockCache;
-  
-  private final int activeFilesId;
-  private final int inactiveFilesId;
-  private final int activeReadersId;
-  
-  private final int storeUsageBytesId;
-
-  public SortedOplogStatistics(String typeName, String name) {
-    this(new DummyStatisticsFactory(), typeName, name);
-  }
-  
-  public SortedOplogStatistics(StatisticsFactory factory, String typeName, String name) {
-    StatisticsTypeFactory tf = StatisticsTypeFactoryImpl.singleton();
-    
-    StatisticDescriptor readCount = tf.createLongCounter("reads", "The total number of read operations", "ops");
-    StatisticDescriptor readInProgress = tf.createLongGauge("readsInProgress", "The number of read operations in progress", "ops");
-    StatisticDescriptor readTime = tf.createLongCounter("readTime", "The total time spent reading from disk", "nanoseconds");
-    StatisticDescriptor readBytes = tf.createLongCounter("readBytes", "The total number of bytes read from disk", "bytes");
-    StatisticDescriptor readErrors = tf.createLongCounter("readErrors", "The total number of read errors", "errors");
-
-    StatisticDescriptor scanCount = tf.createLongCounter("scans", "The total number of scan operations", "ops");
-    StatisticDescriptor scanInProgress = tf.createLongGauge("scansInProgress", "The number of scan operations in progress", "ops");
-    StatisticDescriptor scanTime = tf.createLongCounter("scanTime", "The total time scanner was operational", "nanoseconds");
-    StatisticDescriptor scanBytes = tf.createLongCounter("scanBytes", "The total number of bytes scanned from disk", "bytes");
-    StatisticDescriptor scanErrors = tf.createLongCounter("scanErrors", "The total number of scan errors", "errors");
-    StatisticDescriptor scanIterations = tf.createLongCounter("scanIterations", "The total number of scan iterations", "ops");
-    StatisticDescriptor scanIterationTime = tf.createLongCounter("scanIterationTime", "The total time spent scanning from persistence layer", "nanoseconds");
-
-    StatisticDescriptor writeCount = tf.createLongCounter("writes", "The total number of write operations", "ops");
-    StatisticDescriptor writeInProgress = tf.createLongGauge("writesInProgress", "The number of write operations in progress", "ops");
-    StatisticDescriptor writeTime = tf.createLongCounter("writeTime", "The total time spent writing to disk", "nanoseconds");
-    StatisticDescriptor writeBytes = tf.createLongCounter("writeBytes", "The total number of bytes written to disk", "bytes");
-    StatisticDescriptor writeErrors = tf.createLongCounter("writeErrors", "The total number of write errors", "errors");
-
-    StatisticDescriptor putCount = tf.createLongCounter("puts", "The total number of put operations", "ops");
-    StatisticDescriptor putInProgress = tf.createLongGauge("putsInProgress", "The number of put operations in progress", "ops");
-    StatisticDescriptor putTime = tf.createLongCounter("putTime", "The total time spent in put calls", "nanoseconds");
-    StatisticDescriptor putBytes = tf.createLongCounter("putBytes", "The total number of bytes put", "bytes");
-    StatisticDescriptor putErrors = tf.createLongCounter("putErrors", "The total number of put errors", "errors");
-
-    StatisticDescriptor flushCount = tf.createLongCounter("flushes", "The total number of flush operations", "ops");
-    StatisticDescriptor flushInProgress = tf.createLongGauge("flushesInProgress", "The number of flush operations in progress", "ops");
-    StatisticDescriptor flushTime = tf.createLongCounter("flushTime", "The total time spent flushing to disk", "nanoseconds");
-    StatisticDescriptor flushBytes = tf.createLongCounter("flushBytes", "The total number of bytes flushed to disk", "bytes");
-    StatisticDescriptor flushErrors = tf.createLongCounter("flushErrors", "The total number of flush errors", "errors");
-
-    StatisticDescriptor minorCompactionCount = tf.createLongCounter("minorCompactions", "The total number of minor compaction operations", "ops");
-    StatisticDescriptor minorCompactionInProgress = tf.createLongGauge("minorCompactionsInProgress", "The number of minor compaction operations in progress", "ops");
-    StatisticDescriptor minorCompactionTime = tf.createLongCounter("minorCompactionTime", "The total time spent in minor compactions", "nanoseconds");
-    StatisticDescriptor minorCompactionBytes = tf.createLongCounter("minorCompactionBytes", "The total number of bytes collected during minor compactions", "bytes");
-    StatisticDescriptor minorCompactionErrors = tf.createLongCounter("minorCompactionErrors", "The total number of minor compaction errors", "errors");
-
-    StatisticDescriptor majorCompactionCount = tf.createLongCounter("majorCompactions", "The total number of major compaction operations", "ops");
-    StatisticDescriptor majorCompactionInProgress = tf.createLongGauge("majorCompactionsInProgress", "The number of major compaction operations in progress", "ops");
-    StatisticDescriptor majorCompactionTime = tf.createLongCounter("majorCompactionTime", "The total time spent in major compactions", "nanoseconds");
-    StatisticDescriptor majorCompactionBytes = tf.createLongCounter("majorCompactionBytes", "The total number of bytes collected during major compactions", "bytes");
-    StatisticDescriptor majorCompactionErrors = tf.createLongCounter("majorCompactionErrors", "The total number of major compaction errors", "errors");
-
-    StatisticDescriptor bloomCount = tf.createLongCounter("bloomFilterCheck", "The total number of Bloom Filter checks", "ops");
-    StatisticDescriptor bloomInProgress = tf.createLongGauge("bloomFilterChecksInProgress", "The number of Bloom Filter checks in progress", "ops");
-    StatisticDescriptor bloomTime = tf.createLongCounter("bloomFilterCheckTime", "The total time spent checking the Bloom Filter", "nanoseconds");
-    StatisticDescriptor bloomErrors = tf.createLongCounter("bloomFilterErrors", "The total number of Bloom Filter errors", "errors");
-    StatisticDescriptor bloomFalsePositive = tf.createLongCounter("bloomFilterFalsePositives", "The total number of Bloom Filter false positives", "false positives");
-
-    StatisticDescriptor clearCount = tf.createLongCounter("clears", "The total number of clear operations", "ops");
-    StatisticDescriptor clearInProgress = tf.createLongGauge("clearsInProgress", "The number of clear operations in progress", "ops");
-    StatisticDescriptor clearTime = tf.createLongCounter("clearTime", "The total time spent in clear operations", "nanoseconds");
-    StatisticDescriptor clearErrors = tf.createLongGauge("clearErrors", "The total number of clear errors", "errors");
-
-    StatisticDescriptor destroyCount = tf.createLongCounter("destroys", "The total number of destroy operations", "ops");
-    StatisticDescriptor destroyInProgress = tf.createLongGauge("destroysInProgress", "The number of destroy operations in progress", "ops");
-    StatisticDescriptor destroyTime = tf.createLongCounter("destroyTime", "The total time spent in destroy operations", "nanoseconds");
-    StatisticDescriptor destroyErrors = tf.createLongGauge("destroyErrors", "The total number of destroy errors", "errors");
-
-    StatisticDescriptor brCount = tf.createLongCounter("blockReads", "The total number of block read operations", "ops");
-    StatisticDescriptor brInProgress = tf.createLongGauge("blockReadsInProgress", "The number of block read operations in progress", "ops");
-    StatisticDescriptor brTime = tf.createLongCounter("blockReadTime", "The total time spent reading blocks from disk", "nanoseconds");
-    StatisticDescriptor brBytes = tf.createLongCounter("blockReadBytes", "The total number of block bytes read from disk", "bytes");
-    StatisticDescriptor brErrors = tf.createLongCounter("blockReadErrors", "The total number of block read errors", "errors");
-
-    StatisticDescriptor bcMisses = tf.createLongCounter("blockCacheMisses", "The total number of block cache misses", "misses");
-    StatisticDescriptor bcHits = tf.createLongCounter("blockCacheHits", "The total number of block cache hits", "hits");
-    StatisticDescriptor bcCached = tf.createLongGauge("blocksCached", "The current number of cached blocks", "blocks");
-    StatisticDescriptor bcBytesCached = tf.createLongGauge("blockBytesCached", "The current number of bytes cached", "bytes");
-    StatisticDescriptor bcBytesEvicted = tf.createLongCounter("blockBytesEvicted", "The total number of bytes cached", "bytes");
-
-    StatisticDescriptor activeFileCount = tf.createLongGauge("activeFileCount", "The total number of active files", "files");
-    StatisticDescriptor inactiveFileCount = tf.createLongGauge("inactiveFileCount", "The total number of inactive files", "files");
-    StatisticDescriptor activeReaderCount = tf.createLongGauge("activeReaderCount", "The total number of active file readers", "files");
-    
-    StatisticDescriptor storeUsageBytes = tf.createLongGauge("storeUsageBytes", "The total volume occupied on persistent store", "bytes");
-    
-    StatisticsType type = tf.createType(typeName, 
-        "Statistics about structured I/O operations for a region", new StatisticDescriptor[] {
-        readCount, readInProgress, readTime, readBytes, readErrors,
-        scanCount, scanInProgress, scanTime, scanBytes, scanErrors, scanIterations, scanIterationTime,
-        writeCount, writeInProgress, writeTime, writeBytes, writeErrors,
-        putCount, putInProgress, putTime, putBytes, putErrors,
-        flushCount, flushInProgress, flushTime, flushBytes, flushErrors,
-        minorCompactionCount, minorCompactionInProgress, minorCompactionTime, minorCompactionBytes, minorCompactionErrors,
-        majorCompactionCount, majorCompactionInProgress, majorCompactionTime, majorCompactionBytes, majorCompactionErrors,
-        bloomCount, bloomInProgress, bloomTime, bloomErrors, bloomFalsePositive,
-        clearCount, clearInProgress, clearTime, clearErrors,
-        destroyCount, destroyInProgress, destroyTime, destroyErrors,
-        brCount, brInProgress, brTime, brBytes, brErrors,
-        bcMisses, bcHits, bcCached, bcBytesCached, bcBytesEvicted,
-        activeFileCount, inactiveFileCount, activeReaderCount, storeUsageBytes
-    });
-
-    read = new IOOperation(readCount.getId(), readInProgress.getId(), readTime.getId(), readBytes.getId(), readErrors.getId());
-    scan = new ScanOperation(scanCount.getId(), scanInProgress.getId(), scanTime.getId(), scanBytes.getId(), scanErrors.getId(), scanIterations.getId(), scanIterationTime.getId());    
-    write = new IOOperation(writeCount.getId(), writeInProgress.getId(), writeTime.getId(), writeBytes.getId(), writeErrors.getId());
-    put = new IOOperation(putCount.getId(), putInProgress.getId(), putTime.getId(), putBytes.getId(), putErrors.getId());
-    flush = new IOOperation(flushCount.getId(), flushInProgress.getId(), flushTime.getId(), flushBytes.getId(), flushErrors.getId());
-    minorCompaction = new IOOperation(minorCompactionCount.getId(), minorCompactionInProgress.getId(), minorCompactionTime.getId(), minorCompactionBytes.getId(), minorCompactionErrors.getId());
-    majorCompaction = new IOOperation(majorCompactionCount.getId(), majorCompactionInProgress.getId(), majorCompactionTime.getId(), majorCompactionBytes.getId(), majorCompactionErrors.getId());
-    bloom = new BloomOperation(bloomCount.getId(), bloomInProgress.getId(), bloomTime.getId(), bloomErrors.getId(), bloomFalsePositive.getId());
-    clear = new TimedOperation(clearCount.getId(), clearInProgress.getId(), clearTime.getId(), clearErrors.getId());
-    destroy = new TimedOperation(destroyCount.getId(), destroyInProgress.getId(), destroyTime.getId(), destroyErrors.getId());
-    
-    blockRead = new IOOperation(brCount.getId(), brInProgress.getId(), brTime.getId(), brBytes.getId(), brErrors.getId());
-    blockCache = new CacheOperation(bcMisses.getId(), bcHits.getId(), bcCached.getId(), bcBytesCached.getId(), bcBytesEvicted.getId());
-
-    activeFilesId = activeFileCount.getId();
-    inactiveFilesId = inactiveFileCount.getId();
-    activeReadersId = activeReaderCount.getId();
-    storeUsageBytesId = storeUsageBytes.getId();
-
-    stats = factory.createAtomicStatistics(type, name);
-  }
-
-  public void close() {
-    stats.close();
-  }
-  
-  public Statistics getStats() {
-    return stats;
-  }
-  
-  public IOOperation getRead() {
-    return read;
-  }
-  
-  public ScanOperation getScan() {
-    return scan;
-  }
-  
-  public IOOperation getWrite() {
-    return write;
-  }
-  
-  public IOOperation getPut() {
-    return put;
-  }
-  
-  public IOOperation getFlush() {
-    return flush;
-  }
-  
-  public IOOperation getMinorCompaction() {
-    return minorCompaction;
-  }
-  
-  public IOOperation getMajorCompaction() {
-    return majorCompaction;
-  }
-  
-  public BloomOperation getBloom() {
-    return bloom;
-  }
-  
-  public TimedOperation getClear() {
-    return clear;
-  }
-  
-  public TimedOperation getDestroy() {
-    return destroy;
-  }
-
-  public IOOperation getBlockRead() {
-    return blockRead;
-  }
-  
-  public CacheOperation getBlockCache() {
-    return blockCache;
-  }
-  
-  public long getActiveFileCount() {
-    return stats.getLong(activeFilesId);
-  }
-  
-  public long getInactiveFileCount() {
-    return stats.getLong(inactiveFilesId);
-  }
-  
-  public long getActiveReaderCount() {
-    return stats.getLong(activeReadersId);
-  }
-  
-  public void incActiveFiles(int amt) {
-    stats.incLong(activeFilesId, amt);
-    assert stats.getLong(activeFilesId) >= 0;
-  }
-  
-  public void incInactiveFiles(int amt) {
-    stats.incLong(inactiveFilesId, amt);
-    assert stats.getLong(inactiveFilesId) >= 0;
-  }
-  
-  public void incActiveReaders(int amt) {
-    stats.incLong(activeReadersId, amt);
-    assert stats.getLong(activeReadersId) >= 0;
-  }
-  
-  public long getStoreUsageBytes() {
-    return stats.getLong(storeUsageBytesId);
-  }
-  
-  public void incStoreUsageBytes(long amt) {
-    stats.incLong(storeUsageBytesId, amt);
-    assert stats.getLong(storeUsageBytesId) >= 0;
-  }
-  
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("read = {").append(read).append("}\n");
-    sb.append("scan = {").append(scan).append("}\n");
-    sb.append("write = {").append(write).append("}\n");
-    sb.append("put = {").append(put).append("}\n");
-    sb.append("flush = {").append(flush).append("}\n");
-    sb.append("minorCompaction = {").append(minorCompaction).append("}\n");
-    sb.append("majorCompaction = {").append(majorCompaction).append("}\n");
-    sb.append("bloom = {").append(bloom).append("}\n");
-    sb.append("clear = {").append(clear).append("}\n");
-    sb.append("destroy = {").append(destroy).append("}\n");
-    sb.append("blockRead = {").append(blockRead).append("}\n");
-    sb.append("blockCache = {").append(blockCache).append("}\n");
-    sb.append("activeFiles = ").append(stats.getLong(activeFilesId)).append("\n");
-    sb.append("inactiveFiles = ").append(stats.getLong(inactiveFilesId)).append("\n");
-    sb.append("activeReaders = ").append(stats.getLong(activeReadersId)).append("\n");
-    sb.append("storeUsageBytes = ").append(stats.getLong(storeUsageBytesId)).append("\n");
-    
-    return sb.toString();
-  }
-  
-  public class TimedOperation {
-    protected final int countId;
-    protected final int inProgressId;
-    protected final int timeId;
-    private final int errorsId;
-    
-    public TimedOperation(int count, int inProgress, int time, int errors) {
-      this.countId = count;
-      this.inProgressId = inProgress;
-      this.timeId = time;
-      this.errorsId = errors;
-    }
-    
-    public long begin() {
-      stats.incLong(inProgressId, 1);
-      return getStatTime();
-    }
-    
-    public long end(long start) {
-      stats.incLong(inProgressId, -1);
-      stats.incLong(countId, 1);
-      stats.incLong(timeId, getStatTime() - start);
-      return getStatTime();
-    }
-    
-    public void error(long start) {
-      end(start);
-      stats.incLong(errorsId, 1);
-    }
-    
-    public long getCount() {
-      return stats.getLong(countId);
-    }
-    
-    public long getInProgress() {
-      return stats.getLong(inProgressId);
-    }
-    
-    public long getTime() {
-      return stats.getLong(timeId);
-    }
-    
-    public long getErrors() {
-      return stats.getLong(errorsId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("count=").append(getCount());
-      sb.append(";inProgress=").append(getInProgress());
-      sb.append(";errors=").append(getErrors());
-      sb.append(";time=").append(getTime());
-      
-      return sb.toString();
-    }
-  }
-  
-  public class IOOperation extends TimedOperation {
-    protected final int bytesId;
-    
-    public IOOperation(int count, int inProgress, int time, int bytes, int errors) {
-      super(count, inProgress, time, errors);
-      this.bytesId = bytes;
-    }
-    
-    public long end(long bytes, long start) {
-      stats.incLong(bytesId, bytes);
-      return super.end(start);
-    }
-    
-    public long getBytes() {
-      return stats.getLong(bytesId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder(super.toString());
-      sb.append(";bytes=").append(getBytes());
-      
-      return sb.toString();
-    }
-  }
-
-  public class ScanOperation extends IOOperation {
-    private final int iterationsId;
-    private final int iterationTimeId;
-
-    public ScanOperation(int count, int inProgress, int time, int bytes, int errors, int iterCount, int iterTime) {
-      super(count, inProgress, time, bytes, errors);
-      iterationsId = iterCount;
-      iterationTimeId = iterTime;
-    }
-    
-    public long beginIteration() {
-      return getStatTime();
-    }
-    
-    public void endIteration(long bytes, long start){
-      stats.incLong(iterationsId, 1);
-      stats.incLong(bytesId, bytes);
-      stats.incLong(iterationTimeId, getStatTime() - start);
-    }
-    
-    public long getIterations() {
-      return stats.getLong(iterationsId);
-    }
-    
-    public long getIterationTime() {
-      return stats.getLong(iterationTimeId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder(super.toString());
-      sb.append(";iterations=").append(getIterations());
-      sb.append(";iterationTime=").append(getIterationTime());
-      
-      return sb.toString();
-    }
-  }
-
-  public class BloomOperation extends TimedOperation {
-    private final int falsePositiveId;
-    
-    public BloomOperation(int count, int inProgress, int time, int errors, int falsePositive) {
-      super(count, inProgress, time, errors);
-      this.falsePositiveId = falsePositive;
-    }
-    
-    public void falsePositive() {
-      stats.incLong(falsePositiveId, 1);
-    }
-    
-    public long getFalsePositives() {
-      return stats.getLong(falsePositiveId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder(super.toString());
-      sb.append(";falsePositives=").append(getFalsePositives());
-      
-      return sb.toString();
-    }
-  }
-  
-  public class CacheOperation {
-    private final int missesId;
-    private final int hitsId;
-    private final int cachedId;
-    private final int bytesCachedId;
-    private final int bytesEvictedId;
-    
-    public CacheOperation(int missesId, int hitsId, int cachedId, 
-        int bytesCachedId, int bytesEvictedId) {
-      this.missesId = missesId;
-      this.hitsId = hitsId;
-      this.cachedId = cachedId;
-      this.bytesCachedId = bytesCachedId;
-      this.bytesEvictedId = bytesEvictedId;
-    }
-    
-    public void store(long bytes) {
-      stats.incLong(cachedId, 1);
-      stats.incLong(bytesCachedId, bytes);
-    }
-    
-    public void evict(long bytes) {
-      stats.incLong(cachedId, -1);
-      stats.incLong(bytesCachedId, -bytes);
-      stats.incLong(bytesEvictedId, bytes);
-    }
-    
-    public void hit() {
-      stats.incLong(hitsId, 1);
-    }
-    
-    public void miss() {
-      stats.incLong(missesId, 1);
-    }
-    
-    public long getMisses() {
-      return stats.getLong(missesId);
-    }
-    
-    public long getHits() {
-      return stats.getLong(hitsId);
-    }
-    
-    public long getCached() {
-      return stats.getLong(cachedId);
-    }
-    
-    public long getBytesCached() {
-      return stats.getLong(bytesCachedId);
-    }
-    
-    public long getBytesEvicted() {
-      return stats.getLong(bytesEvictedId);
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("misses=").append(getMisses());
-      sb.append(";hits=").append(getHits());
-      sb.append(";cached=").append(getCached());
-      sb.append(";bytesCached=").append(getBytesCached());
-      sb.append(";bytesEvicted=").append(getBytesEvicted());
-      
-      return sb.toString();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java
deleted file mode 100644
index 1042e22..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Comparator;
-import org.apache.hadoop.io.RawComparator;
-
-/**
- * Defines a means to read sorted data including performing range scans.
- * 
- * @param <V> type of value returned by the sorted reader
- * 
- */
-public interface SortedReader<V> extends Closeable {
-  /**
-   * Defines the names of additional data that may be associated with a sorted
-   * reader.
-   */
-  public enum Metadata {
-    /** identifies the disk store associated with the soplog, optional */
-    DISK_STORE,
-    
-    /** identifies the RVV data, optional */
-    RVV;
-
-    /**
-     * Converts the metadata name to bytes.
-     * @return the bytes
-     */
-    public byte[] bytes() {
-      return ("gemfire." + name()).getBytes();
-    }
-  }
-  
-  /**
-   * Filters data based on metadata values.
-   */
-  public interface MetadataFilter {
-    /**
-     * Returns the name this filter acts upon.
-     * @return the name
-     */
-    Metadata getName();
-    
-    /**
-     * Returns true if the metadata value passes the filter.
-     * @param value the value to check; may be null if the metadata value does
-     *              not exist or has not been assigned yet
-     * @return true if accepted
-     */
-    boolean accept(byte[] value);
-  }
-  
-  /**
-   * Allows comparisons between serialized objects.
-   */
-  public interface SerializedComparator extends RawComparator<byte[]> {
-  }
-  
-  /**
-   * Allows sorted iteration through a set of keys and values.
-   */
-  public interface SortedIterator<V> extends KeyValueIterator<ByteBuffer, V> {
-    /**
-     * Closes the iterator and frees any retained resources.
-     */
-    public abstract void close();
-  }
-
-  /**
-   * Defines the statistics available on a sorted file.
-   */
-  public interface SortedStatistics {
-    /**
-     * Returns the number of keys in the file.
-     * @return the key count
-     */
-    long keyCount();
-    
-    /**
-     * Returns the first key in the file.
-     * @return the first key
-     */
-    byte[] firstKey();
-    
-    /**
-     * Returns the last key in the file.
-     * @return the last key
-     */
-    byte[] lastKey();
-    
-    /**
-     * Returns the average key size in bytes.
-     * @return the average key size
-     */
-    double avgKeySize();
-    
-    /**
-     * Returns the average value size in bytes.
-     * @return the average value size
-     */
-    double avgValueSize();
-    
-    /**
-     * Frees any resources held by for statistics generation.
-     */
-    void close();
-  }
-  
-  /**
-   * Returns true if the bloom filter might contain the supplied key.  The 
-   * nature of the bloom filter is such that false positives are allowed, but
-   * false negatives cannot occur.
-   * 
-   * @param key the key to test
-   * @return true if the key might be present
-   * @throws IOException read error
-   */
-  boolean mightContain(byte[] key) throws IOException;
-
-  /**
-   * Returns the value associated with the given key.
-   * 
-   * @param key the key
-   * @return the value, or null if the key is not found
-   * @throws IOException read error
-   */
-  V read(byte[] key) throws IOException;
-
-  /**
-   * Iterates from the first key in the file to the requested key.
-   * @param to the ending key
-   * @param inclusive true if the ending key is included in the iteration
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> head(byte[] to, boolean inclusive) throws IOException;
-  
-  /**
-   * Iterates from the requested key to the last key in the file.
-   * @param from the starting key
-   * @param inclusive true if the starting key should be included in the iteration
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> tail(byte[] from, boolean inclusive) throws IOException;
-
-  /**
-   * Iterators over the entire contents of the sorted file.
-   * 
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> scan() throws IOException;
-  
-  /**
-   * Scans the available keys and allows iteration over the interval [from, to) 
-   * where the starting key is included and the ending key is excluded from 
-   * the results.
-   * 
-   * @param from the start key
-   * @param to the end key
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> scan(byte[] from, byte[] to) throws IOException;
-
-  /**
-   * Scans the keys and returns an iterator over the interval [equalTo, equalTo].
-   * 
-   * @param equalTo the key to match
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> scan(byte[] equalTo) throws IOException;
-  
-  /**
-   * Scans the keys and allows iteration between the given keys.
-   * 
-   * @param from the start key
-   * @param fromInclusive true if the start key is included in the scan
-   * @param to the end key
-   * @param toInclusive true if the end key is included in the scan
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> scan(byte[] from, boolean fromInclusive, 
-      byte[] to, boolean toInclusive) throws IOException;
-
-  /**
-   * Scans the keys and allows iteration between the given keys after applying
-   * the metdata filter and the order flag.  These parameters override values
-   * configured using <code>withAscending</code> or <code>withFilter</code>.
-   * 
-   * @param from the start key
-   * @param fromInclusive true if the start key is included in the scan
-   * @param to the end key
-   * @param toInclusive true if the end key is included in the scan
-   * @param ascending true if ascending
-   * @param filter filters data based on metadata values
-   * @return the sorted iterator
-   * @throws IOException scan error
-   */
-  SortedIterator<V> scan(
-      byte[] from, boolean fromInclusive, 
-      byte[] to, boolean toInclusive,
-      boolean ascending,
-      MetadataFilter filter) throws IOException;
-
-  /**
-   * Changes the iteration order of subsequent operations.
-   * 
-   * @param ascending true if ascending order (default)
-   * @return the reader
-   */
-  SortedReader<V> withAscending(boolean ascending);
-  
-  /**
-   * Applies a metadata filter to subsequent operations.
-   * 
-   * @param filter the filter to apply
-   * @return the reader
-   */
-  SortedReader<V> withFilter(MetadataFilter filter);
-  
-  /**
-   * Returns the comparator used for sorting keys.
-   * @return the comparator
-   */
-  SerializedComparator getComparator();
-  
-  /**
-   * Returns the statistics regarding the keys present in the sorted file.
-   * @return the statistics
-   * @throws IOException unable retrieve statistics
-   */
-  SortedStatistics getStatistics() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java
deleted file mode 100644
index 2934f07..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache.persistence.soplog;
-
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Tracks the usage of a reference.
- * 
- *
- * @param <T> the reference type
- */
-public final class TrackedReference<T> {
-  /** the referent */
-  private final T ref;
-  
-  /** the number of uses */
-  private final AtomicInteger uses;
-  
-  /** list of users using this reference. Mainly for debugging */
-  final ConcurrentHashMap<String, AtomicInteger> users;
-
-  /**
-   * Decrements the use count of each reference.
-   * @param refs the references to decrement
-   */
-  public static <T> void decrementAll(Iterable<TrackedReference<T>> refs) {
-    for (TrackedReference<?> tr : refs) {
-      tr.decrement();
-    }
-  }
-  
-  public TrackedReference(T ref) {
-    this.ref = ref;
-    uses = new AtomicInteger(0);
-    users = new ConcurrentHashMap<String, AtomicInteger>();
-  }
-  
-  /**
-   * Returns the referent.
-   * @return the referent
-   */
-  public final T get() {
-    return ref;
-  }
-  
-  /**
-   * Returns the current count.
-   * @return the current uses
-   */
-  public int uses() {
-    return uses.get();
-  }
-  
-  /**
-   * Returns true if the reference is in use.
-   * @return true if used
-   */
-  public boolean inUse() {
-    return uses() > 0;
-  }
-  
-  /**
-   * Increments the use count and returns the reference.
-   * @return the reference
-   */
-  public T getAndIncrement() {
-    increment();
-    return ref;
-  }
-  
-  /**
-   * Increments the use counter and returns the current count.
-   * @return the current uses
-   */
-  public int increment() {
-    return increment(null);
-  }
-  
-  /**
-   * Increments the use counter and returns the current count.
-   * @return the current uses
-   */
-  public int increment(String user) {
-    int val = uses.incrementAndGet();
-    if (user != null) {
-      AtomicInteger counter = users.get(user);
-      if (counter == null) {
-        counter = new AtomicInteger();
-        users.putIfAbsent(user, counter);
-        counter = users.get(user);
-      }
-      counter.incrementAndGet();
-    }
-    assert val >= 1;
-    
-    return val;
-  }
-  
-  /**
-   * Decrements the use counter and returns the current count.
-   * @return the current uses
-   */
-  public int decrement() {
-    return decrement(null);
-  }
-  
-  /**
-   * Decrements the use counter and returns the current count.
-   * @return the current uses
-   */
-  public int decrement(String user) {
-    int val = uses.decrementAndGet();
-    assert val >= 0;
-    if (user != null) {
-      AtomicInteger counter = users.get(user);
-      if (counter != null) {
-        counter.decrementAndGet();
-      }
-    }
-    
-    return val;
-  }
-  
-  @Override
-  public String toString() {
-    if (users != null) {
-      StringBuffer sb = new StringBuffer();
-      sb.append(ref.toString()).append(": ").append(uses());
-      for (Entry<String, AtomicInteger> user : users.entrySet()) {
-        sb.append(" ").append(user.getKey()).append(":").append(user.getValue().intValue());
-      }
-      return sb.toString();
-    }
-    return uses() + ": " + ref.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
index e6c07d9..ca7818a 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
@@ -1145,7 +1145,7 @@ public abstract class BaseCommand implements Command {
         VersionTagHolder versionHolder = new VersionTagHolder();
         ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
         // From Get70.getValueAndIsObject()
-        Object data = region.get(entryKey, null, true, true, true, id, versionHolder, true, false);
+        Object data = region.get(entryKey, null, true, true, true, id, versionHolder, true);
         VersionTag vt = versionHolder.getVersionTag();
 
         updateValues(values, entryKey, data, vt);
@@ -1252,7 +1252,7 @@ public abstract class BaseCommand implements Command {
         }
 
         ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
-        data = region.get(key, null, true, true, true, id, versionHolder, true, false);
+        data = region.get(key, null, true, true, true, id, versionHolder, true);
         versionTag = versionHolder.getVersionTag();
         updateValues(values, key, data, versionTag);
 
@@ -1345,7 +1345,7 @@ public abstract class BaseCommand implements Command {
       key = it.next();
       versionHolder = new VersionTagHolder();
 
-      Object value = region.get(key, null, true, true, true, requestingClient, versionHolder, true, false);
+      Object value = region.get(key, null, true, true, true, requestingClient, versionHolder, true);
       
       updateValues(values, key, value, versionHolder.getVersionTag());
 
@@ -1548,7 +1548,7 @@ public abstract class BaseCommand implements Command {
           ClientProxyMembershipID id = servConn == null ? null : servConn
               .getProxyID();
           data = region.get(key, null, true, true, true, id, versionHolder,
-              true, false);
+              true);
           versionTag = versionHolder.getVersionTag();
           updateValues(values, key, data, versionTag);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
index 7898b3c..55047c7 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
@@ -24,7 +24,6 @@ import com.gemstone.gemfire.cache.client.internal.GetOp;
 import com.gemstone.gemfire.cache.operations.GetOperationContext;
 import com.gemstone.gemfire.cache.operations.internal.GetOperationContextImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionStats;
-import com.gemstone.gemfire.internal.Assert;
 import com.gemstone.gemfire.internal.cache.CachedDeserializable;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
@@ -305,7 +304,7 @@ public class Get70 extends BaseCommand {
 //    } else {
       ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
       VersionTagHolder versionHolder = new VersionTagHolder();
-      data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, versionHolder, true, true /*allowReadFromHDFS*/);
+      data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, versionHolder, true);
 //    }
     versionTag = versionHolder.getVersionTag();
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
index 2a617a8..69d54a1 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
@@ -242,7 +242,7 @@ public class Request extends BaseCommand {
 
     boolean isObject = true;
     ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
-    Object data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, null, false, true/*allowReadFromHDFS*/);
+    Object data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, null, false);
     
     // If the value in the VM is a CachedDeserializable,
     // get its value. If it is Token.REMOVED, Token.DESTROYED,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
index e896649..90522b2 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
@@ -67,8 +67,8 @@ public class ClientTXRegionStub implements TXRegionStub {
 
   
   public Object findObject(KeyInfo keyInfo, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean preferCD,
-      ClientProxyMembershipID requestingClient, EntryEventImpl event, boolean allowReadFromHDFS) {
+                           boolean generateCallbacks, Object value, boolean preferCD,
+                           ClientProxyMembershipID requestingClient, EntryEventImpl event) {
     return proxy.get(keyInfo.getKey(), keyInfo.getCallbackArg(), event);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
index 7c7df53..1637c4a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
@@ -17,12 +17,10 @@
 package com.gemstone.gemfire.internal.cache.tx;
 
 import java.util.Collections;
-import java.util.Map;
 
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
-import com.gemstone.gemfire.cache.RemoteTransactionException;
 import com.gemstone.gemfire.cache.TransactionDataNodeHasDepartedException;
 import com.gemstone.gemfire.cache.TransactionDataNotColocatedException;
 import com.gemstone.gemfire.cache.TransactionException;
@@ -32,7 +30,6 @@ import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedM
 import com.gemstone.gemfire.internal.cache.DistributedPutAllOperation;
 import com.gemstone.gemfire.internal.cache.DistributedRemoveAllOperation;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.KeyInfo;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionException;
@@ -54,7 +51,6 @@ import com.gemstone.gemfire.internal.cache.partitioned.RemoteSizeMessage;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.internal.cache.tier.sockets.VersionedObjectList;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.util.concurrent.StoppableReentrantReadWriteLock;
 
 public class DistributedTXRegionStub extends AbstractPeerTXRegionStub {
   
@@ -159,9 +155,13 @@ public class DistributedTXRegionStub extends AbstractPeerTXRegionStub {
   }
 
   
-  public Object findObject(KeyInfo keyInfo, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean preferCD,
-      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean allowReadFromHDFS) {
+  public Object findObject(KeyInfo keyInfo,
+                           boolean isCreate,
+                           boolean generateCallbacks,
+                           Object value,
+                           boolean preferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent) {
     Object retVal = null;
     final Object key = keyInfo.getKey();
     final Object callbackArgument = keyInfo.getCallbackArg();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
index 6723646..01b1ed8 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
@@ -275,15 +275,15 @@ public class PartitionedTXRegionStub extends AbstractPeerTXRegionStub {
 
   
   public Object findObject(KeyInfo keyInfo, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean peferCD,
-      ClientProxyMembershipID requestingClient,
-      EntryEventImpl clientEvent, boolean allowReadFromHDFS) {
+                           boolean generateCallbacks, Object value, boolean peferCD,
+                           ClientProxyMembershipID requestingClient,
+                           EntryEventImpl clientEvent) {
     Object retVal = null;
     final Object key = keyInfo.getKey();
     final Object callbackArgument = keyInfo.getCallbackArg();
     PartitionedRegion pr = (PartitionedRegion)region;
     try {
-      retVal = pr.getRemotely((InternalDistributedMember)state.getTarget(), keyInfo.getBucketId(), key, callbackArgument, peferCD, requestingClient, clientEvent, false, allowReadFromHDFS);
+      retVal = pr.getRemotely((InternalDistributedMember)state.getTarget(), keyInfo.getBucketId(), key, callbackArgument, peferCD, requestingClient, clientEvent, false);
     } catch (TransactionException e) {
       RuntimeException re = getTransactionException(keyInfo, e);
       re.initCause(e.getCause());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
index 482882f..f2859f1 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
@@ -42,8 +42,8 @@ public interface TXRegionStub {
   boolean containsValueForKey(KeyInfo keyInfo);
 
   Object findObject(KeyInfo keyInfo, boolean isCreate,
-      boolean generateCallbacks, Object value, boolean preferCD,
-      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean allowReadFromHDFS);
+                    boolean generateCallbacks, Object value, boolean preferCD,
+                    ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent);
 
   Object getEntryForIterator(KeyInfo keyInfo, boolean allowTombstone);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
index 94524bd..fe09d03 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
@@ -157,8 +157,6 @@ public abstract class AbstractGatewaySender implements GatewaySender,
   
   protected boolean isBucketSorted;
   
-  protected boolean isHDFSQueue;
-  
   protected boolean isMetaQueue;
   
   private int parallelismForReplicatedRegion;
@@ -260,7 +258,6 @@ public abstract class AbstractGatewaySender implements GatewaySender,
     this.maxMemoryPerDispatcherQueue = this.queueMemory / this.dispatcherThreads;
     this.myDSId = InternalDistributedSystem.getAnyInstance().getDistributionManager().getDistributedSystemId();
     this.serialNumber = DistributionAdvisor.createSerialNumber();
-    this.isHDFSQueue = attrs.isHDFSQueue();
     this.isMetaQueue = attrs.isMetaQueue();
     if (!(this.cache instanceof CacheCreation)) {
       this.stopper = new Stopper(cache.getCancelCriterion());
@@ -269,8 +266,7 @@ public abstract class AbstractGatewaySender implements GatewaySender,
         this.statistics = new GatewaySenderStats(cache.getDistributedSystem(),
             id);
       }
-      if (!attrs.isHDFSQueue())
-        initializeEventIdIndex();
+      initializeEventIdIndex();
     }
     this.isBucketSorted = attrs.isBucketSorted();
   }
@@ -318,12 +314,10 @@ public abstract class AbstractGatewaySender implements GatewaySender,
             cache.getDistributedSystem(), AsyncEventQueueImpl
                 .getAsyncEventQueueIdFromSenderId(id));
       }
-      if (!attrs.isHDFSQueue())
-        initializeEventIdIndex();
+      initializeEventIdIndex();
     }
     this.isBucketSorted = attrs.isBucketSorted();
-    this.isHDFSQueue = attrs.isHDFSQueue();
-   
+
   }
   
   public GatewaySenderAdvisor getSenderAdvisor() {
@@ -482,10 +476,6 @@ public abstract class AbstractGatewaySender implements GatewaySender,
     return this.isBucketSorted;
   }
 
-  public boolean getIsHDFSQueue() {
-    return this.isHDFSQueue;
-  }
-  
   public boolean getIsMetaQueue() {
     return this.isMetaQueue;
   }
@@ -863,12 +853,6 @@ public abstract class AbstractGatewaySender implements GatewaySender,
       return;
     }
     
-    if (getIsHDFSQueue() && event.getOperation().isEviction()) {
-      if (logger.isDebugEnabled())
-        logger.debug("Eviction event not queued: " + event);
-      stats.incEventsNotQueued();
-      return;
-    }
     // this filter is defined by Asif which exist in old wan too. new wan has
     // other GatewaEventFilter. Do we need to get rid of this filter. Cheetah is
     // not cinsidering this filter

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
index 025616d..1cef940 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
@@ -30,7 +30,6 @@ import com.gemstone.gemfire.cache.wan.GatewayTransportFilter;
 public class GatewaySenderAttributes {
 
   public static final boolean DEFAULT_IS_BUCKETSORTED = true;
-  public static final boolean DEFAULT_IS_HDFSQUEUE = false;
   public static final boolean DEFAULT_IS_META_QUEUE = false;
 
 
@@ -82,7 +81,6 @@ public class GatewaySenderAttributes {
   
   public boolean isBucketSorted = GatewaySenderAttributes.DEFAULT_IS_BUCKETSORTED;
   
-  public boolean isHDFSQueue = GatewaySenderAttributes.DEFAULT_IS_HDFSQUEUE;
   public boolean isMetaQueue = GatewaySenderAttributes.DEFAULT_IS_META_QUEUE;
   
   public int getSocketBufferSize() {
@@ -191,9 +189,6 @@ public class GatewaySenderAttributes {
   public GatewayEventSubstitutionFilter getGatewayEventSubstitutionFilter() {
     return this.eventSubstitutionFilter;
   }
-  public boolean isHDFSQueue() {
-    return this.isHDFSQueue;
-  }
   public boolean isMetaQueue() {
     return this.isMetaQueue;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
index b63c7cb..07a3be5 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
@@ -36,9 +36,6 @@ import com.gemstone.gemfire.InternalGemFireException;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue;
 import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EnumListenerEvent;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
index 8524ccf..f995ba4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
@@ -22,8 +22,6 @@ package com.gemstone.gemfire.internal.cache.wan.parallel;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.CacheListener;
 import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
@@ -188,11 +186,6 @@ public class ConcurrentParallelGatewaySenderQueue implements RegionQueue {
    getPGSProcessor( bucketId).notifyEventProcessorIfRequired(bucketId);
   }
   
-  public HDFSBucketRegionQueue getBucketRegionQueue(PartitionedRegion region,
-    int bucketId) throws ForceReattemptException {
-	return getPGSProcessor(bucketId).getBucketRegionQueue(region, bucketId);
-  }
-  
   public void clear(PartitionedRegion pr, int bucketId) {
   	getPGSProcessor(bucketId).clear(pr, bucketId);
   }
@@ -207,11 +200,6 @@ public class ConcurrentParallelGatewaySenderQueue implements RegionQueue {
   	getPGSProcessor(bucketId).conflateEvent(conflatableObject, bucketId, tailKey);
   }
   
-  public HDFSGatewayEventImpl get(PartitionedRegion region, byte[] regionKey,
-      int bucketId) throws ForceReattemptException {
-    return getPGSProcessor(bucketId).get(region, regionKey, bucketId);
-  }
-  
   public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) {
 	for(int i =0; i< processors.length; i++){
   	 processors[i].addShadowPartitionedRegionForUserRR(userRegion);;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
index 417ba13..11502af 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
@@ -28,9 +28,6 @@ import org.apache.logging.log4j.Logger;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue;
 import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
@@ -104,10 +101,7 @@ public class ParallelGatewaySenderEventProcessor extends
     }
     
     ParallelGatewaySenderQueue queue;
-    if (sender.getIsHDFSQueue())
-      queue = new HDFSParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher);
-    else
-      queue = new ParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher);
+    queue = new ParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher);
     
     queue.start();
     this.queue = queue;
@@ -145,12 +139,8 @@ public class ParallelGatewaySenderEventProcessor extends
 
       // while merging 42004, kept substituteValue as it is(it is barry's
       // change 42466). bucketID is merged with eventID.getBucketID
-	 if (!sender.getIsHDFSQueue())
       gatewayQueueEvent = new GatewaySenderEventImpl(operation, event,
           substituteValue, true, eventID.getBucketID());
-    else
-      gatewayQueueEvent = new HDFSGatewayEventImpl(operation,
-          event, substituteValue, true, eventID.getBucketID());
 
       if (getSender().beforeEnqueue(gatewayQueueEvent)) {
         long start = getSender().getStatistics().startTime();
@@ -208,16 +198,6 @@ public class ParallelGatewaySenderEventProcessor extends
   	((ParallelGatewaySenderQueue)this.queue).conflateEvent(conflatableObject, bucketId, tailKey);
   }
   
-  public HDFSGatewayEventImpl get(PartitionedRegion region, byte[] regionKey,
-    int bucketId) throws ForceReattemptException {
-    return ((HDFSParallelGatewaySenderQueue)this.queue).get(region, regionKey, bucketId);
-  }
-  
-  public HDFSBucketRegionQueue getBucketRegionQueue(PartitionedRegion region,
-    int bucketId) throws ForceReattemptException {
-  	return ((HDFSParallelGatewaySenderQueue)this.queue).getBucketRegionQueue(region, bucketId);
-  }
-  
   public void addShadowPartitionedRegionForUserPR(PartitionedRegion pr) {
 	// TODO Auto-generated method stub
 	((ParallelGatewaySenderQueue)this.queue).addShadowPartitionedRegionForUserPR(pr);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
index b0b1a32..46ff263 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
@@ -492,7 +492,7 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
       if (this.userRegionNameToshadowPRMap.containsKey(regionName))
         return;
       
-      if(!isUsedForHDFS() && userPR.getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()){
+      if(userPR.getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()){
         throw new GatewaySenderException(
             LocalizedStrings.ParallelGatewaySenderQueue_NON_PERSISTENT_GATEWAY_SENDER_0_CAN_NOT_BE_ATTACHED_TO_PERSISTENT_REGION_1
                 .toLocalizedString(new Object[] { this.sender.getId(),
@@ -552,7 +552,7 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
         }
 
         ParallelGatewaySenderQueueMetaRegion meta = metaRegionFactory.newMetataRegion(cache,
-            prQName, ra, sender, isUsedForHDFS());
+            prQName, ra, sender);
 
         try {
           prQ = (PartitionedRegion)cache
@@ -630,10 +630,6 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
       bucketRegion.clear();
     }
   }
-  protected boolean isUsedForHDFS()
-  {
-    return false;
-  }
   protected void afterRegionAdd (PartitionedRegion userPR) {
 
   }
@@ -1857,18 +1853,12 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
     public ParallelGatewaySenderQueueMetaRegion(String regionName,
         RegionAttributes attrs, LocalRegion parentRegion,
         GemFireCacheImpl cache, AbstractGatewaySender pgSender) {
-      this( regionName, attrs, parentRegion, cache, pgSender, false);
-    }
-    public ParallelGatewaySenderQueueMetaRegion(String regionName,
-        RegionAttributes attrs, LocalRegion parentRegion,
-        GemFireCacheImpl cache, AbstractGatewaySender pgSender, boolean isUsedForHDFS) {
       super(regionName, attrs, parentRegion, cache,
           new InternalRegionArguments().setDestroyLockFlag(true)
               .setRecreateFlag(false).setSnapshotInputStream(null)
               .setImageTarget(null)
               .setIsUsedForParallelGatewaySenderQueue(true)
-              .setParallelGatewaySender((AbstractGatewaySender)pgSender)
-              .setIsUsedForHDFSParallelGatewaySenderQueue(isUsedForHDFS));
+              .setParallelGatewaySender((AbstractGatewaySender)pgSender));
       this.sender = (AbstractGatewaySender)pgSender;
       
     }
@@ -1925,9 +1915,9 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
   
   static class MetaRegionFactory {
     ParallelGatewaySenderQueueMetaRegion newMetataRegion(
-        GemFireCacheImpl cache, final String prQName, final RegionAttributes ra, AbstractGatewaySender sender, boolean isUsedForHDFS) {
+        GemFireCacheImpl cache, final String prQName, final RegionAttributes ra, AbstractGatewaySender sender) {
       ParallelGatewaySenderQueueMetaRegion meta = new ParallelGatewaySenderQueueMetaRegion(
-          prQName, ra, null, cache, sender, isUsedForHDFS);
+          prQName, ra, null, cache, sender);
       return meta;
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
index 77f9596..0015665 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
@@ -41,7 +41,6 @@ public class AsyncEventQueueCreation implements AsyncEventQueue {
   private int maxQueueMemory = 0;
   private boolean isParallel = false;
   private boolean isBucketSorted = false;
-  private boolean isHDFSQueue = false;
   private int dispatcherThreads = 1;
   private OrderPolicy orderPolicy = OrderPolicy.KEY;
   
@@ -62,7 +61,6 @@ public class AsyncEventQueueCreation implements AsyncEventQueue {
     this.orderPolicy = senderAttrs.policy;
     this.asyncEventListener = eventListener;
     this.isBucketSorted = senderAttrs.isBucketSorted; 
-    this.isHDFSQueue = senderAttrs.isHDFSQueue;
     this.gatewayEventSubstitutionFilter = senderAttrs.eventSubstitutionFilter;
   }
   
@@ -213,11 +211,4 @@ public class AsyncEventQueueCreation implements AsyncEventQueue {
   public void setBucketSorted(boolean isBucketSorted) {
     this.isBucketSorted = isBucketSorted;
   }
-  public boolean isHDFSQueue() {
-    return this.isHDFSQueue;
-  }
-  
-  public void setIsHDFSQueue(boolean isHDFSQueue) {
-    this.isHDFSQueue = isHDFSQueue;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
index 915bde9..d52d05e 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
@@ -91,11 +91,6 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.i18n.LogWriterI18n;
 import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.CacheConfig;
 import com.gemstone.gemfire.internal.cache.CacheServerLauncher;
@@ -198,8 +193,7 @@ public class CacheCreation implements InternalCache {
    * This is important for unit testing 44914.
    */
   protected final Map diskStores = new LinkedHashMap();
-  protected final Map hdfsStores = new LinkedHashMap();
-  
+
   private final List<File> backups = new ArrayList<File>();
 
   private CacheConfig cacheConfig = new CacheConfig();
@@ -513,13 +507,6 @@ public class CacheCreation implements InternalCache {
       }
     }
 
-    for(Iterator iter = this.hdfsStores.entrySet().iterator(); iter.hasNext(); ) {
-      Entry entry = (Entry) iter.next();
-      HDFSStoreCreation hdfsStoreCreation = (HDFSStoreCreation) entry.getValue();
-      HDFSStoreFactory storefactory = cache.createHDFSStoreFactory(hdfsStoreCreation);
-      storefactory.create((String) entry.getKey());
-    }
-
     cache.initializePdxRegistry();
 
     
@@ -530,19 +517,6 @@ public class CacheCreation implements InternalCache {
         (RegionAttributesCreation) getRegionAttributes(id);
       creation.inheritAttributes(cache, false);
 
-      // TODO: HDFS: HDFS store/queue will be mapped against region path and not
-      // the attribute id; don't really understand what this is trying to do
-      if (creation.getHDFSStoreName() != null)
-      {
-        HDFSStoreImpl store = cache.findHDFSStore(creation.getHDFSStoreName());
-        if(store == null) {
-          HDFSIntegrationUtil.createDefaultAsyncQueueForHDFS((Cache)cache, creation.getHDFSWriteOnly(), id);
-        }
-      }
-      if (creation.getHDFSStoreName() != null && creation.getPartitionAttributes().getColocatedWith() == null) {
-        creation.addAsyncEventQueueId(HDFSStoreFactoryImpl.getEventQueueName(id));
-      }
-      
       RegionAttributes attrs;
       // Don't let the RegionAttributesCreation escape to the user
       AttributesFactory factory = new AttributesFactory(creation);
@@ -1421,17 +1395,6 @@ public class CacheCreation implements InternalCache {
   }
   
   @Override
-  public Collection<HDFSStoreImpl> getHDFSStores() {
-    return this.hdfsStores.values();
-  }
-
-  public void addHDFSStore(String name, HDFSStoreCreation hs) {
-    this.hdfsStores.put(name, hs);
-  }
-
-  
-
-  @Override
   public DistributedMember getMyId() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
index c6b0509..aa7d49a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
@@ -487,8 +487,6 @@ public abstract class CacheXml implements EntityResolver2, ErrorHandler {
   protected static final String PERSISTENT_REPLICATE_DP = "persistent-replicate";
   protected static final String PARTITION_DP = "partition";
   protected static final String PERSISTENT_PARTITION_DP = "persistent-partition";
-  protected static final String HDFS_PARTITION_DP = "hdfs-partition";
-  protected static final String HDFS_PERSISTENT_PARTITION_DP = "hdfs-persistent-partition";
 
   /** The name of the <code>keep-alive-timeout</code> attribute */
   protected static final String KEEP_ALIVE_TIMEOUT = "keep-alive-timeout";
@@ -765,35 +763,6 @@ public abstract class CacheXml implements EntityResolver2, ErrorHandler {
   public static final String ASYNC_EVENT_QUEUE = "async-event-queue";
   protected static final String ASYNC_EVENT_QUEUE_IDS = "async-event-queue-ids";
   
-  protected static final String HDFS_EVENT_QUEUE = "hdfs-event-queue";
-  protected static final String HDFS_STORE_NAME = "hdfs-store-name";
-  public static final String HDFS_STORE = "hdfs-store";
-  protected static final String HDFS_HOME_DIR = "home-dir";
-  protected static final String HDFS_READ_CACHE_SIZE = "read-cache-size";
-  protected static final String HDFS_MAX_MEMORY = "max-memory";
-  protected static final String HDFS_BATCH_SIZE = "batch-size";
-  protected static final String HDFS_BATCH_INTERVAL = "batch-interval";
-  protected static final String HDFS_DISPATCHER_THREADS = "dispatcher-threads";
-  protected static final String HDFS_BUFFER_PERSISTENT = "buffer-persistent";
-  protected static final String HDFS_SYNCHRONOUS_DISK_WRITE = "synchronous-disk-write";
-  protected static final String HDFS_DISK_STORE = "disk-store";
-  protected static final String HDFS_MAX_WRITE_ONLY_FILE_SIZE = "max-write-only-file-size";
-  public static final String HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = "write-only-file-rollover-interval";
-  
-  protected static final String HDFS_NAMENODE_URL = "namenode-url";
-  protected static final String HDFS_CLIENT_CONFIG_FILE = "hdfs-client-config-file";
-  public static final String HDFS_PURGE_INTERVAL = "purge-interval";
-  public static final String HDFS_MAJOR_COMPACTION = "major-compaction";
-  public static final String HDFS_MAJOR_COMPACTION_INTERVAL = "major-compaction-interval";
-  public static final String HDFS_MAJOR_COMPACTION_THREADS = "major-compaction-threads";
-  public static final String HDFS_MINOR_COMPACTION = "minor-compaction";
-  public static final String HDFS_MINOR_COMPACTION_THREADS = "minor-compaction-threads";   
-  
-  public static final String HDFS_TIME_FOR_FILE_ROLLOVER = "file-rollover-time-secs";
-  
-  protected static final String HDFS_WRITE_ONLY = "hdfs-write-only";
-  protected static final String HDFS_QUEUE_BATCH_SIZE = "batch-size-mb";
-  
   /** The name of the <code>compressor</code> attribute */
   protected static final String COMPRESSOR = "compressor";
   /** The name of the <code>off-heap</code> attribute


[30/63] [abbrv] incubator-geode git commit: GEODE-1240: Changed the test to use Awaitility with a maximum timeout period. This might work better than the time sensitive conditionals that this test uses.

Posted by kl...@apache.org.
GEODE-1240: Changed the test to use Awaitility with a maximum timeout period. This might work better than the time sensitive conditionals that this test uses.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7e2ca6ca
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7e2ca6ca
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7e2ca6ca

Branch: refs/heads/feature/GEODE-1276
Commit: 7e2ca6ca8fb5d9768503b30bf71362513c6a5212
Parents: 9fbf219
Author: Udo Kohlmeyer <uk...@pivotal.io>
Authored: Thu Apr 28 11:37:15 2016 +1000
Committer: Udo Kohlmeyer <uk...@pivotal.io>
Committed: Thu Apr 28 11:37:15 2016 +1000

----------------------------------------------------------------------
 .../cache30/ClientMembershipDUnitTest.java      | 34 ++++++++++----------
 1 file changed, 17 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e2ca6ca/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
index 9036e5e..f8e036b 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
@@ -81,23 +81,23 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
   private void waitForAcceptsInProgressToBe(final int target)
       throws Exception {
-    WaitCriterion ev = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        int actual = getAcceptsInProgress();
-        if (actual == getAcceptsInProgress()) {
-          return true;
-        }
-        excuse = "accepts in progress (" + actual + ") never became " + target;
-        return false;
-      }
-
-      public String description() {
-        return excuse;
-      }
-    };
-    Awaitility.await().pollInterval(200, TimeUnit.MILLISECONDS).atMost(60, TimeUnit.SECONDS)
+//    WaitCriterion ev = new WaitCriterion() {
+//      String excuse;
+//
+//      public boolean done() {
+//        int actual = getAcceptsInProgress();
+//        if (actual == getAcceptsInProgress()) {
+//          return true;
+//        }
+//        excuse = "accepts in progress (" + actual + ") never became " + target;
+//        return false;
+//      }
+//
+//      public String description() {
+//        return excuse;
+//      }
+//    };
+    Awaitility.await().pollInterval(100, TimeUnit.MILLISECONDS).pollDelay(100,TimeUnit.MILLISECONDS).timeout(300, TimeUnit.SECONDS)
         .until(() -> {
           int actual = getAcceptsInProgress();
           if (actual == getAcceptsInProgress()) {


[38/63] [abbrv] incubator-geode git commit: GEODE-17: enhance the GeodeSecurityUtil and review changes

Posted by kl...@apache.org.
GEODE-17: enhance the GeodeSecurityUtil and review changes

* allow operations that does not require any authorizations
* put/get, import/export and locate entry will check region access
* rename EnvronmentVariablesHandlerInterceptor
* rename ShiroUtil to GeodeSecurityUtil
* reformat code and review changes


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7c38f0d8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7c38f0d8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7c38f0d8

Branch: refs/heads/feature/GEODE-1276
Commit: 7c38f0d8811874509ae93dbd9a4a9f7b05ce0d01
Parents: 0c0825a
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Tue Apr 26 07:30:27 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 08:29:28 2016 -0700

----------------------------------------------------------------------
 .../cache/operations/OperationContext.java      |  13 +-
 .../management/DistributedSystemMXBean.java     |   6 +-
 .../gemfire/management/MemberMXBean.java        |  10 +-
 .../CreateAlterDestroyRegionCommands.java       |  12 +-
 .../internal/cli/commands/DataCommands.java     |  22 +-
 .../internal/cli/commands/RegionCommands.java   |   3 +-
 .../internal/cli/remote/CommandProcessor.java   |   7 +-
 .../internal/security/AccessControlMBean.java   |   4 +-
 .../internal/security/MBeanServerWrapper.java   |  23 +-
 .../internal/security/ResourceOperation.java    |  13 +-
 .../security/ResourceOperationContext.java      |  45 +-
 .../controllers/AbstractCommandsController.java |  17 +-
 .../EnvironmentVariablesHandlerInterceptor.java | 121 ---
 .../support/LoginHandlerInterceptor.java        | 122 +++
 .../web/shell/RestHttpOperationInvoker.java     |   4 -
 .../gemfire/security/CustomAuthRealm.java       |   7 +-
 .../gemfire/security/GeodeSecurityUtil.java     | 163 ++++
 .../gemfire/security/JMXShiroAuthenticator.java |   4 +-
 .../gemstone/gemfire/security/ShiroUtil.java    | 116 ---
 .../CacheServerMBeanAuthorizationJUnitTest.java |  26 +-
 .../CacheServerMBeanShiroJUnitTest.java         |  36 +-
 .../security/CliCommandsSecurityTest.java       |  17 +-
 .../security/DataCommandsSecurityTest.java      |   7 +-
 .../DiskStoreMXBeanSecurityJUnitTest.java       |  20 +-
 .../GatewayReceiverMBeanSecurityTest.java       |  17 +-
 .../GatewaySenderMBeanSecurityTest.java         |  24 +-
 .../GeodeSecurityUtilCustomRealmJUnitTest.java  |  52 ++
 .../GeodeSecurityUtilWithIniFileJUnitTest.java  | 147 +++
 .../security/GfshCommandsSecurityTest.java      |   6 +-
 .../internal/security/JSONAuthorization.java    |  77 +-
 .../LockServiceMBeanAuthorizationJUnitTest.java |  10 +-
 .../ManagerMBeanAuthorizationJUnitTest.java     |   4 +-
 .../security/MemberMBeanSecurityJUnitTest.java  |  30 +-
 .../ResourceOperationContextJUnitTest.java      |  88 ++
 .../internal/security/TestCommand.java          | 178 ++--
 .../management/internal/security/auth3.json     |   2 +-
 .../internal/security/cacheServer.json          |  10 +-
 .../management/internal/security/shiro-ini.json |  87 ++
 .../internal/security/testInheritRole.json      |  40 -
 .../security/testSimpleUserAndRole.json         |  18 -
 .../testUserAndRoleRegionServerGroup.json       |  20 -
 .../internal/security/testUserMultipleRole.json |  26 -
 geode-core/src/test/resources/shiro.ini         |  13 +-
 .../junit/rules/DescribedExternalResource.java  |  11 +-
 .../security/GemFireAuthentication.java         | 114 +--
 .../security/GemFireAuthenticationProvider.java |   9 +-
 .../pulse/internal/security/LogoutHandler.java  |  12 +-
 .../tools/pulse/tests/PulseAbstractTest.java    | 904 ++++++++++---------
 .../gemfire/tools/pulse/tests/Region.java       |   2 +-
 .../src/main/webapp/WEB-INF/gemfire-servlet.xml |   2 +-
 ...entVariablesHandlerInterceptorJUnitTest.java | 272 ------
 .../LoginHandlerInterceptorJUnitTest.java       | 274 ++++++
 52 files changed, 1758 insertions(+), 1509 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
index dd290c5..b632edb 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
@@ -33,6 +33,7 @@ import org.apache.shiro.authz.permission.WildcardPermission;
 public abstract class OperationContext extends WildcardPermission{
 
   public enum Resource {
+    NULL,
     CLUSTER,
     DATA
   };
@@ -76,6 +77,7 @@ public abstract class OperationContext extends WildcardPermission{
     EXECUTE_FUNCTION,
     @Deprecated
     GET_DURABLE_CQS,
+    NULL,
     MANAGE,
     WRITE,
     READ;
@@ -298,11 +300,15 @@ public abstract class OperationContext extends WildcardPermission{
   public abstract OperationCode getOperationCode();
 
   public Resource getResource(){
-    return Resource.DATA;
+    return Resource.NULL;
   }
 
+  /**
+   *
+   * @return
+   */
   public String getRegionName(){
-    return null;
+    return "NULL";
   }
 
   /**
@@ -356,7 +362,4 @@ public abstract class OperationContext extends WildcardPermission{
         || opCode.isRegionDestroy() || opCode.isRegionClear());
   }
 
-  public String toString(){
-    return getResource() + ":"+ getOperationCode();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
index a27d92f..6dac6af 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
@@ -72,14 +72,16 @@ import com.gemstone.gemfire.management.internal.security.ResourceOperation;
  * @since 7.0
  *
  */
-//@ResourceOperation(resource = Resource.CLUSTER, operation = OperationCode.READ)
+@ResourceOperation(resource = Resource.CLUSTER, operation = OperationCode.READ)
 public interface DistributedSystemMXBean {
 
   /**
    * Returns the ID of thie DistributedSystem.
+   * allow anyone to access this method
    *
    * @return The DistributedSystem ID or -1 if not set.
    */
+  @ResourceOperation()
   public int getDistributedSystemId();
 
   /**
@@ -351,7 +353,9 @@ public interface DistributedSystemMXBean {
   /**
    * Returns the object name for a {@link MemberMXBean} used to access
    * this distributed member.
+   * allow anyone to access this method
    */
+  @ResourceOperation()
   public ObjectName getMemberObjectName();
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
index 5f656a9..c5d9933 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
@@ -16,13 +16,12 @@
  */
 package com.gemstone.gemfire.management;
 
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+import static com.gemstone.gemfire.cache.operations.OperationContext.*;
 
 import java.util.Map;
 
-import static com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
-import static com.gemstone.gemfire.cache.operations.OperationContext.Resource;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 
 /**
  * MBean that provides access to information and management functionality for a
@@ -199,6 +198,7 @@ public interface MemberMXBean {
    * 
    * @return Result of the execution in JSON format.
    */
+  @ResourceOperation()
   String processCommand(String commandString);
   
   /**
@@ -210,6 +210,7 @@ public interface MemberMXBean {
    *          Environmental properties to use during command execution.
    * @return Result of the execution in JSON format.
    */
+  @ResourceOperation()
   String processCommand(String commandString, Map<String, String> env);
   
   /**
@@ -223,6 +224,7 @@ public interface MemberMXBean {
    *          Binary data specific to the command being executed.
    * @return Result of the execution in JSON format.
    */
+  @ResourceOperation()
   String processCommand(String commandString, Map<String, String> env, Byte[][] binaryData);
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
index cdbd3db..06c096f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
@@ -79,7 +79,7 @@ import com.gemstone.gemfire.management.internal.cli.util.RegionPath;
 import com.gemstone.gemfire.management.internal.configuration.SharedConfigurationWriter;
 import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
-import com.gemstone.gemfire.security.ShiroUtil;
+
 import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
 import org.springframework.shell.core.annotation.CliCommand;
 import org.springframework.shell.core.annotation.CliOption;
@@ -437,7 +437,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
   
   @CliCommand (value = CliStrings.ALTER_REGION, help = CliStrings.ALTER_REGION__HELP)
   @CliMetaData (relatedTopic = CliStrings.TOPIC_GEMFIRE_REGION, writesToSharedConfiguration = true)
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.MANAGE)
+  @ResourceOperation(resource=Resource.DATA, operation = OperationCode.MANAGE)
   public Result alterRegion(
       @CliOption (key = CliStrings.ALTER_REGION__REGION,
                   mandatory = true,
@@ -528,9 +528,6 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
                   specifiedDefaultValue = "0",
                   help = CliStrings.ALTER_REGION__EVICTIONMAX__HELP)
       Integer evictionMax) {
-
-    ShiroUtil.authorize("DATA", "MANAGE", regionPath);
-
     Result result = null;
     XmlEntity xmlEntity = null;
 
@@ -999,16 +996,13 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
 
   @CliCommand(value = { CliStrings.DESTROY_REGION }, help = CliStrings.DESTROY_REGION__HELP)
   @CliMetaData(shellOnly = false, relatedTopic = CliStrings.TOPIC_GEMFIRE_REGION, writesToSharedConfiguration = true)
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.MANAGE)
+  @ResourceOperation(resource=Resource.DATA, operation = OperationCode.MANAGE)
   public Result destroyRegion(
       @CliOption(key = CliStrings.DESTROY_REGION__REGION,
           optionContext = ConverterHint.REGIONPATH,
           mandatory = true,
           help = CliStrings.DESTROY_REGION__REGION__HELP)
       String regionPath) {
-
-    ShiroUtil.authorize("DATA", "MANAGE", regionPath);
-
     if (regionPath == null) {
       return ResultBuilder.createInfoResult(CliStrings.DESTROY_REGION__MSG__SPECIFY_REGIONPATH_TO_DESTROY);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
index c1c04a3..61803fe 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/DataCommands.java
@@ -73,7 +73,8 @@ import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
-import com.gemstone.gemfire.security.ShiroUtil;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
+
 import org.springframework.shell.core.CommandMarker;
 import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
 import org.springframework.shell.core.annotation.CliCommand;
@@ -834,13 +835,12 @@ public class DataCommands implements CommandMarker {
   @CliCommand(value = CliStrings.EXPORT_DATA, help = CliStrings.EXPORT_DATA__HELP)
   @CliMetaData(relatedTopic = { CliStrings.TOPIC_GEMFIRE_DATA,
       CliStrings.TOPIC_GEMFIRE_REGION })
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.READ)
   public Result exportData(
       @CliOption(key = CliStrings.EXPORT_DATA__REGION, mandatory = true, optionContext = ConverterHint.REGIONPATH, help = CliStrings.EXPORT_DATA__REGION__HELP) String regionName,
       @CliOption(key = CliStrings.EXPORT_DATA__FILE, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, mandatory = true, help = CliStrings.EXPORT_DATA__FILE__HELP) String filePath,
       @CliOption(key = CliStrings.EXPORT_DATA__MEMBER, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, optionContext = ConverterHint.MEMBERIDNAME, mandatory = true, help = CliStrings.EXPORT_DATA__MEMBER__HELP) String memberNameOrId) {
 
-    ShiroUtil.authorize("DATA", "READ", regionName);
+    GeodeSecurityUtil.authorizeRegionRead(regionName);
     final Cache cache = CacheFactory.getAnyInstance();
     final DistributedMember targetMember = CliUtil
         .getDistributedMemberByNameOrId(memberNameOrId);
@@ -891,13 +891,12 @@ public class DataCommands implements CommandMarker {
   @CliCommand(value = CliStrings.IMPORT_DATA, help = CliStrings.IMPORT_DATA__HELP)
   @CliMetaData(relatedTopic = { CliStrings.TOPIC_GEMFIRE_DATA,
       CliStrings.TOPIC_GEMFIRE_REGION })
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.WRITE)
   public Result importData(
       @CliOption(key = CliStrings.IMPORT_DATA__REGION, optionContext = ConverterHint.REGIONPATH, mandatory = true, help = CliStrings.IMPORT_DATA__REGION__HELP) String regionName,
       @CliOption(key = CliStrings.IMPORT_DATA__FILE, mandatory = true, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.IMPORT_DATA__FILE__HELP) String filePath,
       @CliOption(key = CliStrings.IMPORT_DATA__MEMBER, mandatory = true, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, optionContext = ConverterHint.MEMBERIDNAME, help = CliStrings.IMPORT_DATA__MEMBER__HELP) String memberNameOrId) {
 
-    ShiroUtil.authorize("DATA", "WRITE", regionName);
+    GeodeSecurityUtil.authorizeRegionWrite(regionName);
 
     Result result = null;
 
@@ -949,7 +948,6 @@ public class DataCommands implements CommandMarker {
   @CliMetaData(shellOnly = false, relatedTopic = {
       CliStrings.TOPIC_GEMFIRE_DATA, CliStrings.TOPIC_GEMFIRE_REGION })
   @CliCommand(value = { CliStrings.PUT }, help = CliStrings.PUT__HELP)
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.WRITE)
   public Result put(
       @CliOption(key = { CliStrings.PUT__KEY }, mandatory = true, help = CliStrings.PUT__KEY__HELP) String key,
       @CliOption(key = { CliStrings.PUT__VALUE }, mandatory = true, help = CliStrings.PUT__VALUE__HELP) String value,
@@ -958,7 +956,7 @@ public class DataCommands implements CommandMarker {
       @CliOption(key = { CliStrings.PUT__VALUEKLASS }, help = CliStrings.PUT__VALUEKLASS__HELP) String valueClass,
       @CliOption(key = { CliStrings.PUT__PUTIFABSENT }, help = CliStrings.PUT__PUTIFABSENT__HELP, unspecifiedDefaultValue = "false") boolean putIfAbsent) {
 
-    ShiroUtil.authorize("DATA", "WRITE", regionPath);
+    GeodeSecurityUtil.authorizeRegionWrite(regionPath);
     Cache cache = CacheFactory.getAnyInstance();
     DataCommandResult dataResult = null;
     if (regionPath == null || regionPath.isEmpty()) {
@@ -1018,7 +1016,6 @@ public class DataCommands implements CommandMarker {
   @CliMetaData(shellOnly = false, relatedTopic = {
       CliStrings.TOPIC_GEMFIRE_DATA, CliStrings.TOPIC_GEMFIRE_REGION })
   @CliCommand(value = { CliStrings.GET }, help = CliStrings.GET__HELP)
-  @ResourceOperation(resource = Resource.DATA, operation= OperationCode.READ)
   public Result get(
       @CliOption(key = { CliStrings.GET__KEY }, mandatory = true, help = CliStrings.GET__KEY__HELP) String key,
       @CliOption(key = { CliStrings.GET__REGIONNAME }, mandatory = true, help = CliStrings.GET__REGIONNAME__HELP, optionContext = ConverterHint.REGIONPATH) String regionPath,
@@ -1026,7 +1023,7 @@ public class DataCommands implements CommandMarker {
       @CliOption(key = { CliStrings.GET__VALUEKLASS }, help = CliStrings.GET__VALUEKLASS__HELP) String valueClass,
       @CliOption(key = CliStrings.GET__LOAD, unspecifiedDefaultValue = "true", specifiedDefaultValue = "true", help = CliStrings.GET__LOAD__HELP) Boolean loadOnCacheMiss)
   {
-    ShiroUtil.authorize("DATA", "READ", regionPath);
+    GeodeSecurityUtil.authorizeRegionRead(regionPath);
 
     Cache cache = CacheFactory.getAnyInstance();
     DataCommandResult dataResult = null;
@@ -1074,14 +1071,14 @@ public class DataCommands implements CommandMarker {
   @CliMetaData(shellOnly = false, relatedTopic = {
       CliStrings.TOPIC_GEMFIRE_DATA, CliStrings.TOPIC_GEMFIRE_REGION })
   @CliCommand(value = { CliStrings.LOCATE_ENTRY }, help = CliStrings.LOCATE_ENTRY__HELP)
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.READ)
   public Result locateEntry(
       @CliOption(key = { CliStrings.LOCATE_ENTRY__KEY }, mandatory = true, help = CliStrings.LOCATE_ENTRY__KEY__HELP) String key,
       @CliOption(key = { CliStrings.LOCATE_ENTRY__REGIONNAME }, mandatory = true, help = CliStrings.LOCATE_ENTRY__REGIONNAME__HELP, optionContext = ConverterHint.REGIONPATH) String regionPath,
       @CliOption(key = { CliStrings.LOCATE_ENTRY__KEYCLASS }, help = CliStrings.LOCATE_ENTRY__KEYCLASS__HELP) String keyClass,
       @CliOption(key = { CliStrings.LOCATE_ENTRY__VALUEKLASS }, help = CliStrings.LOCATE_ENTRY__VALUEKLASS__HELP) String valueClass,
       @CliOption(key = { CliStrings.LOCATE_ENTRY__RECURSIVE }, help = CliStrings.LOCATE_ENTRY__RECURSIVE__HELP, unspecifiedDefaultValue = "false") boolean recursive) {
-    ShiroUtil.authorize("DATA", "READ", regionPath);
+
+    GeodeSecurityUtil.authorizeRegionRead(regionPath);
     // Cache cache = CacheFactory.getAnyInstance();
     DataCommandResult dataResult = null;
 
@@ -1122,13 +1119,12 @@ public class DataCommands implements CommandMarker {
   @CliMetaData(shellOnly = false, relatedTopic = {
       CliStrings.TOPIC_GEMFIRE_DATA, CliStrings.TOPIC_GEMFIRE_REGION })
   @CliCommand(value = { CliStrings.REMOVE }, help = CliStrings.REMOVE__HELP)
-  @ResourceOperation(resource = Resource.DATA, operation = OperationCode.MANAGE)
+  @ResourceOperation(resource=Resource.DATA, operation = OperationCode.MANAGE)
   public Result remove(
       @CliOption(key = { CliStrings.REMOVE__KEY }, help = CliStrings.REMOVE__KEY__HELP) String key,
       @CliOption(key = { CliStrings.REMOVE__REGION }, mandatory = true, help = CliStrings.REMOVE__REGION__HELP, optionContext = ConverterHint.REGIONPATH) String regionPath,
       @CliOption(key = CliStrings.REMOVE__ALL, help = CliStrings.REMOVE__ALL__HELP, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false") boolean removeAllKeys,
       @CliOption(key = { CliStrings.REMOVE__KEYCLASS }, help = CliStrings.REMOVE__KEYCLASS__HELP) String keyClass) {
-
     Cache cache = CacheFactory.getAnyInstance();
     DataCommandResult dataResult = null;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/RegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/RegionCommands.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/RegionCommands.java
index ac69d32..0408675 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/RegionCommands.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/RegionCommands.java
@@ -52,7 +52,7 @@ import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.cli.util.RegionAttributesNames;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
-import com.gemstone.gemfire.security.ShiroUtil;
+
 import org.springframework.shell.core.CommandMarker;
 import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
 import org.springframework.shell.core.annotation.CliCommand;
@@ -163,7 +163,6 @@ public class RegionCommands implements CommandMarker {
       mandatory = true)
       String regionName) {
 
-    ShiroUtil.authorize("CLUSTER", "READ", regionName);
     Result result = null;
     try {
       

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
index 87053cc..c3b0b7f 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/remote/CommandProcessor.java
@@ -31,7 +31,8 @@ import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 import com.gemstone.gemfire.management.internal.cli.util.CommentSkipHelper;
 import com.gemstone.gemfire.management.internal.security.ResourceOperation;
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.ShiroUtil;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
+
 import org.springframework.shell.core.Parser;
 import org.springframework.shell.event.ParseResult;
 
@@ -108,9 +109,7 @@ public class CommandProcessor {
         //do general authorization check here
         Method method = parseResult.getMethod();
         ResourceOperation resourceOperation = method.getAnnotation(ResourceOperation.class);
-        if(resourceOperation!=null){
-          ShiroUtil.authorize(resourceOperation);
-        }
+        GeodeSecurityUtil.authorize(resourceOperation);
 
         result = executionStrategy.execute(parseResult);
         if (result instanceof Result) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
index 9921538..33b80e2 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/AccessControlMBean.java
@@ -17,7 +17,7 @@
 package com.gemstone.gemfire.management.internal.security;
 
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.ShiroUtil;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
 
 /**
  * AccessControlMBean Implementation. This retrieves JMXPrincipal from AccessController
@@ -30,7 +30,7 @@ public class AccessControlMBean implements AccessControlMXBean {
   @Override
   public boolean authorize(String resource, String permission) {
     try {
-      ShiroUtil.authorize(resource, permission);
+      GeodeSecurityUtil.authorize(resource, permission);
       return true;
     }
     catch (GemFireSecurityException e){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
index bbc0442..8d1031a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/MBeanServerWrapper.java
@@ -46,7 +46,7 @@ import javax.management.remote.MBeanServerForwarder;
 
 import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.security.GemFireSecurityException;
-import com.gemstone.gemfire.security.ShiroUtil;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
 
 /**
  * This class intercepts all MBean requests for GemFire MBeans and passed it to
@@ -60,14 +60,6 @@ public class MBeanServerWrapper implements MBeanServerForwarder {
   public MBeanServerWrapper(){
   }
 
-  private void doAuthorization(ResourceOperationContext context){
-    // allow operations which requires no permissions
-    if(context == null)
-      return;
-
-    ShiroUtil.authorize(context);
-  }
-
   private void doAuthorizationPost(ResourceOperationContext context){
     if(context == null)
       return;
@@ -161,7 +153,7 @@ public class MBeanServerWrapper implements MBeanServerForwarder {
   public Object getAttribute(ObjectName name, String attribute) throws MBeanException, InstanceNotFoundException,
       ReflectionException {
     ResourceOperationContext ctx = getOperationContext(name, attribute, false);
-    doAuthorization(ctx);
+    GeodeSecurityUtil.authorize(ctx);
     Object result;
     try {
       result = mbs.getAttribute(name, attribute);
@@ -195,7 +187,7 @@ public class MBeanServerWrapper implements MBeanServerForwarder {
   public void setAttribute(ObjectName name, Attribute attribute) throws InstanceNotFoundException,
       AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException {
     ResourceOperationContext ctx = getOperationContext(name, attribute.getName(), false);
-    doAuthorization(ctx);
+    GeodeSecurityUtil.authorize(ctx);
     mbs.setAttribute(name, attribute);
   }
 
@@ -216,12 +208,9 @@ public class MBeanServerWrapper implements MBeanServerForwarder {
   @Override
   public Object invoke(ObjectName name, String operationName, Object[] params, String[] signature)
       throws InstanceNotFoundException, MBeanException, ReflectionException {
-    // skip authorization check if operation is "processCommand" since we will check authorization in the command itself
-    ResourceOperationContext ctx = null;
-    if(!"processCommand".equals(operationName)) {
-      ctx = getOperationContext(name, operationName, true);
-      doAuthorization(ctx);
-    }
+
+    ResourceOperationContext ctx = getOperationContext(name, operationName, true);
+    GeodeSecurityUtil.authorize(ctx);
 
     Object result = mbs.invoke(name, operationName, params, signature);
     if(ctx!=null)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperation.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperation.java
index f72a835..8b50183 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperation.java
@@ -16,25 +16,22 @@
  */
 package com.gemstone.gemfire.management.internal.security;
 
-import javax.management.DescriptorKey;
+import static com.gemstone.gemfire.cache.operations.OperationContext.*;
+
 import java.lang.annotation.ElementType;
 import java.lang.annotation.Inherited;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
-
-import static com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
-import static com.gemstone.gemfire.cache.operations.OperationContext.Resource;
+import javax.management.DescriptorKey;
 
 @Target({ElementType.METHOD, ElementType.TYPE})
 @Retention(RetentionPolicy.RUNTIME)
 @Inherited
 public @interface ResourceOperation {
   @DescriptorKey("resource")
-  Resource resource();
-
-  String label() default ResourceConstants.DEFAULT_LABEL;
+  Resource resource() default Resource.NULL;
 
   @DescriptorKey("operation")
-  OperationCode operation();
+  OperationCode operation() default OperationCode.NULL;
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
index 396cdac..50f9b78 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/security/ResourceOperationContext.java
@@ -18,6 +18,8 @@ package com.gemstone.gemfire.management.internal.security;
 
 import com.gemstone.gemfire.cache.operations.OperationContext;
 
+import org.apache.shiro.authz.Permission;
+
 /**
  * This is base class for OperationContext for resource (JMX and CLI) operations
  */
@@ -25,32 +27,25 @@ public class ResourceOperationContext extends OperationContext {
 
   private boolean isPostOperation = false;
   private Object opResult = null;
-  private Resource resource = null;
-  private OperationCode operation = null;
-  private String regionName = null;
+  private Resource resource = Resource.NULL;
+  private OperationCode operation = OperationCode.NULL;
+
+  private String regionName = "NULL";
 
   public ResourceOperationContext() {
+    this(null, null, null);
   }
 
-  public ResourceOperationContext(Resource resource, OperationCode operation) {
-    setParts(resource.name()+":"+operation.name(), false);
-    this.resource = resource;
-    this.operation = operation;
+  public ResourceOperationContext(String resource, String operation) {
+    this(resource, operation, null);
   }
 
   public ResourceOperationContext(String resource, String operation, String regionName) {
-    setParts(resource+":"+operation+":"+regionName, false);
     if (resource != null) this.resource = Resource.valueOf(resource);
     if (operation != null) this.operation = OperationCode.valueOf(operation);
-    this.regionName = regionName;
-  }
+    if (regionName !=null ) this.regionName = regionName;
 
-  public void setResourceOperation(ResourceOperation op) {
-    if (op != null) {
-      resource = op.resource();
-      operation = op.operation();
-      setParts(resource.name()+":"+operation.name(), false);
-    }
+    setParts(this.resource.name()+":"+this.operation.name()+":"+regionName);
   }
 
   @Override
@@ -87,20 +82,8 @@ public class ResourceOperationContext extends OperationContext {
     return this.opResult;
   }
 
-  public String toString(){
-    if(this.regionName==null)
-      return getResource() + ":"+ getOperationCode();
-    else
-      return getResource() + ":"+ getOperationCode()+ ":" +this.regionName;
-  }
-
-  public boolean equals(Object o){
-    if(! (o instanceof ResourceOperationContext))
-      return false;
-
-    ResourceOperationContext other = (ResourceOperationContext)o;
-    return (this.resource==other.getResource() && this.operation==other.getOperationCode());
+  @Override
+  public boolean implies(Permission p){
+    return super.implies(p);
   }
-
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
index 211d0b1..08865b4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/AbstractCommandsController.java
@@ -47,10 +47,11 @@ import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.management.internal.web.controllers.support.EnvironmentVariablesHandlerInterceptor;
+import com.gemstone.gemfire.management.internal.web.controllers.support.LoginHandlerInterceptor;
 import com.gemstone.gemfire.management.internal.web.controllers.support.MemberMXBeanAdapter;
 import com.gemstone.gemfire.management.internal.web.util.UriUtils;
-import com.gemstone.gemfire.security.ShiroUtil;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
+
 import org.apache.logging.log4j.Logger;
 import org.springframework.beans.propertyeditors.StringArrayPropertyEditor;
 import org.springframework.http.HttpStatus;
@@ -488,12 +489,12 @@ public abstract class AbstractCommandsController {
    * Gets the environment setup during this HTTP/command request for the current command process execution.
    * 
    * @return a mapping of environment variables to values.
-   * @see com.gemstone.gemfire.management.internal.web.controllers.support.EnvironmentVariablesHandlerInterceptor#getEnvironment()
+   * @see LoginHandlerInterceptor#getEnvironment()
    */
   protected Map<String, String> getEnvironment() {
     final Map<String, String> environment = new HashMap<String, String>();
 
-    environment.putAll(EnvironmentVariablesHandlerInterceptor.getEnvironment());
+    environment.putAll(LoginHandlerInterceptor.getEnvironment());
     environment.put(Gfsh.ENV_APP_NAME, Gfsh.GFSH_APP_NAME);
 
     return environment;
@@ -541,7 +542,7 @@ public abstract class AbstractCommandsController {
    * @param command a String value containing a valid command String as would be entered by the user in Gfsh.
    * @return a result of the command execution as a String, typically marshalled in JSON to be serialized back to Gfsh.
    * @see com.gemstone.gemfire.management.internal.cli.shell.Gfsh
-   * @see com.gemstone.gemfire.management.internal.web.controllers.support.EnvironmentVariablesHandlerInterceptor#getEnvironment()
+   * @see LoginHandlerInterceptor#getEnvironment()
    * @see #getEnvironment()
    * @see #processCommand(String, java.util.Map, byte[][])
    */
@@ -558,7 +559,7 @@ public abstract class AbstractCommandsController {
         return new ResponseEntity<String>(processCommand(command, fileData), HttpStatus.OK);
       }
     };
-    return ShiroUtil.associateWith(callable);
+    return GeodeSecurityUtil.associateWith(callable);
   }
 
 
@@ -571,7 +572,7 @@ public abstract class AbstractCommandsController {
    * the Manager, usually for the 'deploy' Gfsh command.
    * @return a result of the command execution as a String, typically marshalled in JSON to be serialized back to Gfsh.
    * @see com.gemstone.gemfire.management.internal.cli.shell.Gfsh
-   * @see com.gemstone.gemfire.management.internal.web.controllers.support.EnvironmentVariablesHandlerInterceptor#getEnvironment()
+   * @see LoginHandlerInterceptor#getEnvironment()
    * @see #getEnvironment()
    * @see #processCommand(String, java.util.Map, byte[][])
    */
@@ -590,7 +591,7 @@ public abstract class AbstractCommandsController {
    * between Gfsh and the Manager, and thus need to specify this key/value pair mapping.
    * @return a result of the command execution as a String, typically marshalled in JSON to be serialized back to Gfsh.
    * @see com.gemstone.gemfire.management.internal.cli.shell.Gfsh
-   * @see com.gemstone.gemfire.management.internal.web.controllers.support.EnvironmentVariablesHandlerInterceptor#getEnvironment()
+   * @see LoginHandlerInterceptor#getEnvironment()
    * @see #processCommand(String, java.util.Map, byte[][])
    */
   protected String processCommand(final String command, final Map<String, String> environment) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptor.java
deleted file mode 100644
index bb7a27d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptor.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.management.internal.web.controllers.support;
-
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Map;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.security.ResourceConstants;
-import com.gemstone.gemfire.security.Authenticator;
-import com.gemstone.gemfire.security.ShiroUtil;
-import org.apache.logging.log4j.Logger;
-import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
-
-/**
- * The GetEnvironmentHandlerInterceptor class handles extracting Gfsh environment variables encoded in the HTTP request
- * message as request parameters.
- * <p/>
- * @see javax.servlet.http.HttpServletRequest
- * @see javax.servlet.http.HttpServletResponse
- * @see org.springframework.web.servlet.handler.HandlerInterceptorAdapter
- * @since 8.0
- */
-@SuppressWarnings("unused")
-public class EnvironmentVariablesHandlerInterceptor extends HandlerInterceptorAdapter {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private Cache cache;
-
-  private Authenticator auth = null;
-
-  private static final ThreadLocal<Map<String, String>> ENV = new ThreadLocal<Map<String, String>>() {
-    @Override
-    protected Map<String, String> initialValue() {
-      return Collections.emptyMap();
-    }
-  };
-
-  protected static final String ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX = "vf.gf.env.";
-
-  protected static final String SECURITY_VARIABLE_REQUEST_HEADER_PREFIX = "security-";
-
-  public static Map<String, String> getEnvironment() {
-    return ENV.get();
-  }
-
-  @Override
-  public boolean preHandle(final HttpServletRequest request, final HttpServletResponse response, final Object handler)
-    throws Exception
-  {
-    final Map<String, String> requestParameterValues = new HashMap<String, String>();
-
-    for (Enumeration<String> requestParameters = request.getParameterNames(); requestParameters.hasMoreElements(); ) {
-      final String requestParameter = requestParameters.nextElement();
-
-      if (requestParameter.startsWith(ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX)) {
-        requestParameterValues.put(requestParameter.substring(ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX.length()),
-          request.getParameter(requestParameter));
-      }
-    }
-
-
-
-    for (Enumeration<String> requestHeaders = request.getHeaderNames(); requestHeaders.hasMoreElements();) {
-
-      final String requestHeader = requestHeaders.nextElement();
-
-      if (requestHeader.startsWith(SECURITY_VARIABLE_REQUEST_HEADER_PREFIX)) {
-        requestParameterValues.put(requestHeader, request.getHeader(requestHeader));
-      }
-
-    }
-
-    String username = requestParameterValues.get(ResourceConstants.USER_NAME);
-    String password = requestParameterValues.get(ResourceConstants.PASSWORD);
-    ShiroUtil.login(username, password);
-
-    ENV.set(requestParameterValues);
-
-    return true;
-  }
-
-
-  @Override
-  public void afterCompletion(final HttpServletRequest request,
-                              final HttpServletResponse response,
-                              final Object handler,
-                              final Exception ex)
-    throws Exception
-  {
-    afterConcurrentHandlingStarted(request, response, handler);
-    ShiroUtil.logout();
-  }
-
-  @Override
-  public void afterConcurrentHandlingStarted(
-    HttpServletRequest request, HttpServletResponse response, Object handler)
-    throws Exception {
-    ENV.remove();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
new file mode 100644
index 0000000..5465ea3
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptor.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.web.controllers.support;
+
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.security.ResourceConstants;
+import com.gemstone.gemfire.security.Authenticator;
+import com.gemstone.gemfire.security.GeodeSecurityUtil;
+
+import org.apache.logging.log4j.Logger;
+import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
+
+/**
+ * The GetEnvironmentHandlerInterceptor class handles extracting Gfsh environment variables encoded in the HTTP request
+ * message as request parameters.
+ * <p/>
+ * @see javax.servlet.http.HttpServletRequest
+ * @see javax.servlet.http.HttpServletResponse
+ * @see org.springframework.web.servlet.handler.HandlerInterceptorAdapter
+ * @since 8.0
+ */
+@SuppressWarnings("unused")
+public class LoginHandlerInterceptor extends HandlerInterceptorAdapter {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private Cache cache;
+
+  private Authenticator auth = null;
+
+  private static final ThreadLocal<Map<String, String>> ENV = new ThreadLocal<Map<String, String>>() {
+    @Override
+    protected Map<String, String> initialValue() {
+      return Collections.emptyMap();
+    }
+  };
+
+  protected static final String ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX = "vf.gf.env.";
+
+  protected static final String SECURITY_VARIABLE_REQUEST_HEADER_PREFIX = "security-";
+
+  public static Map<String, String> getEnvironment() {
+    return ENV.get();
+  }
+
+  @Override
+  public boolean preHandle(final HttpServletRequest request, final HttpServletResponse response, final Object handler)
+    throws Exception
+  {
+    final Map<String, String> requestParameterValues = new HashMap<String, String>();
+
+    for (Enumeration<String> requestParameters = request.getParameterNames(); requestParameters.hasMoreElements(); ) {
+      final String requestParameter = requestParameters.nextElement();
+
+      if (requestParameter.startsWith(ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX)) {
+        requestParameterValues.put(requestParameter.substring(ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX.length()),
+          request.getParameter(requestParameter));
+      }
+    }
+
+
+
+    for (Enumeration<String> requestHeaders = request.getHeaderNames(); requestHeaders.hasMoreElements();) {
+
+      final String requestHeader = requestHeaders.nextElement();
+
+      if (requestHeader.startsWith(SECURITY_VARIABLE_REQUEST_HEADER_PREFIX)) {
+        requestParameterValues.put(requestHeader, request.getHeader(requestHeader));
+      }
+
+    }
+
+    String username = requestParameterValues.get(ResourceConstants.USER_NAME);
+    String password = requestParameterValues.get(ResourceConstants.PASSWORD);
+    GeodeSecurityUtil.login(username, password);
+
+    ENV.set(requestParameterValues);
+
+    return true;
+  }
+
+
+  @Override
+  public void afterCompletion(final HttpServletRequest request,
+                              final HttpServletResponse response,
+                              final Object handler,
+                              final Exception ex)
+    throws Exception
+  {
+    afterConcurrentHandlingStarted(request, response, handler);
+    GeodeSecurityUtil.logout();
+  }
+
+  @Override
+  public void afterConcurrentHandlingStarted(
+    HttpServletRequest request, HttpServletResponse response, Object handler)
+    throws Exception {
+    ENV.remove();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/shell/RestHttpOperationInvoker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/shell/RestHttpOperationInvoker.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/shell/RestHttpOperationInvoker.java
index 439e2b4..0ead2d7 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/shell/RestHttpOperationInvoker.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/shell/RestHttpOperationInvoker.java
@@ -33,7 +33,6 @@ import com.gemstone.gemfire.internal.util.CollectionUtils;
 import com.gemstone.gemfire.management.internal.cli.CommandRequest;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
-import com.gemstone.gemfire.management.internal.security.ResourceConstants;
 import com.gemstone.gemfire.management.internal.web.domain.Link;
 import com.gemstone.gemfire.management.internal.web.domain.LinkIndex;
 import com.gemstone.gemfire.management.internal.web.http.ClientHttpRequest;
@@ -230,9 +229,6 @@ public class RestHttpOperationInvoker extends AbstractHttpOperationInvoker imple
   protected ClientHttpRequest createHttpRequest(final CommandRequest command) {
     ClientHttpRequest request = createHttpRequest(findLink(command));
 
-    //request.getParameters().setAll(new HashMap<String, Object>(CollectionUtils.removeKeys(
-    //  new HashMap<String, String>(command.getParameters()), ExcludeNoValueFilter.INSTANCE)));
-
     Map<String, String> commandParameters = command.getParameters();
 
     for (Map.Entry<String, String> entry : commandParameters.entrySet()) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java b/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
index 76fc852..706a7cc 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/security/CustomAuthRealm.java
@@ -35,7 +35,7 @@ import com.gemstone.gemfire.internal.ClassLoadUtil;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.internal.lang.StringUtils;
 import com.gemstone.gemfire.management.internal.security.ResourceConstants;
-import com.gemstone.gemfire.management.internal.security.ResourceOperationContext;
+
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.apache.shiro.authc.AuthenticationException;
@@ -66,7 +66,6 @@ public class CustomAuthRealm extends AuthorizingRealm{
     this.authenticatorFactoryName = securityProps.getProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME);
     this.cachedAuthZCallback = new ConcurrentHashMap<>();
     this.cachedPostAuthZCallback = new ConcurrentHashMap<>();
-    logger.info("Started Management interceptor on JMX connector");
   }
 
   @Override
@@ -93,13 +92,13 @@ public class CustomAuthRealm extends AuthorizingRealm{
 
   @Override
   public boolean isPermitted(PrincipalCollection principals, Permission permission) {
-    ResourceOperationContext context =(ResourceOperationContext)permission;
+    OperationContext context =(OperationContext)permission;
     Principal principal = (Principal)principals.getPrimaryPrincipal();
     // if no access control is specified, then we allow all
     if(StringUtils.isBlank(authzFactoryName))
       return true;
     AccessControl accessControl = getAccessControl(principal, false);
-    return accessControl.authorizeOperation(null, context);
+    return accessControl.authorizeOperation(context.getRegionName(), context);
   }
 
   public AccessControl getAccessControl(Principal principal, boolean isPost) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java
new file mode 100644
index 0000000..148a963
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/security/GeodeSecurityUtil.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.security;
+
+import java.util.concurrent.Callable;
+
+import com.gemstone.gemfire.cache.operations.OperationContext;
+import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
+import com.gemstone.gemfire.cache.operations.OperationContext.Resource;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+import com.gemstone.gemfire.management.internal.security.ResourceOperationContext;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.logging.log4j.Logger;
+import org.apache.shiro.SecurityUtils;
+import org.apache.shiro.ShiroException;
+import org.apache.shiro.UnavailableSecurityManagerException;
+import org.apache.shiro.authc.UsernamePasswordToken;
+import org.apache.shiro.subject.Subject;
+import org.apache.shiro.util.ThreadContext;
+
+public class GeodeSecurityUtil {
+
+  private static Logger logger = LogService.getLogger();
+
+  public static void login(String username, String password){
+    if(!isShiroConfigured())
+      return;
+
+    Subject currentUser = SecurityUtils.getSubject();
+
+    UsernamePasswordToken token =
+        new UsernamePasswordToken(username, password);
+    try {
+      logger.info("Logging in "+username+"/"+password);
+      currentUser.login(token);
+    } catch (ShiroException e) {
+      throw new AuthenticationFailedException(e.getMessage(), e);
+    }
+  }
+
+  public static void logout(){
+    if(!isShiroConfigured())
+      return;
+
+    Subject currentUser = SecurityUtils.getSubject();
+    try {
+      logger.info("Logging out "+currentUser.getPrincipal());
+      currentUser.logout();
+    }
+    catch(ShiroException e){
+      throw new AuthenticationFailedException(e.getMessage(), e);
+    }
+    // clean out Shiro's thread local content
+    ThreadContext.remove();
+  }
+
+  public static Callable associateWith(Callable callable){
+    if(!isShiroConfigured())
+      return callable;
+
+    Subject currentUser = SecurityUtils.getSubject();
+    return currentUser.associateWith(callable);
+  }
+
+  public static void authorize(ResourceOperation resourceOperation) {
+    if(resourceOperation==null)
+      return;
+
+    authorize(resourceOperation.resource().name(),
+      resourceOperation.operation().name(),
+      null);
+  }
+
+  public static void authorizeClusterManage(){
+    authorize("CLUSTER", "MANAGE");
+  }
+
+  public static void authorizeClusterWrite(){
+    authorize("CLUSTER", "WRITE");
+  }
+
+  public static void authorizeClusterRead(){
+    authorize("CLUSTER", "READ");
+  }
+
+  public static void authorizeDataManage(){
+    authorize("DATA", "MANAGE");
+  }
+
+  public static void authorizeDataWrite(){
+    authorize("DATA", "WRITE");
+  }
+
+  public static void authorizeDataRead(){
+    authorize("DATA", "READ");
+  }
+
+  public static void authorizeRegionWrite(String regionName){
+    authorize("DATA", "WRITE", regionName);
+  }
+
+  public static void authorizeRegionRead(String regionName){
+    authorize("DATA", "READ", regionName);
+  }
+
+  public static void authorize(String resource, String operation){
+    authorize(resource, operation, null);
+  }
+
+  private static void authorize(String resource, String operation, String regionName){
+    regionName = StringUtils.stripStart(regionName, "/");
+    authorize(new ResourceOperationContext(resource, operation, regionName));
+  }
+
+  public static void authorize(OperationContext context) {
+    if(context==null)
+      return;
+
+    if(context.getResource()== Resource.NULL && context.getOperationCode()== OperationCode.NULL)
+      return;
+
+    if(!isShiroConfigured())
+      return;
+
+
+    Subject currentUser = SecurityUtils.getSubject();
+    try {
+      currentUser.checkPermission(context);
+    }
+    catch(ShiroException e){
+      logger.info(currentUser.getPrincipal() + " not authorized for " + context);
+      throw new GemFireSecurityException(e.getMessage(), e);
+    }
+  }
+
+  private static boolean isShiroConfigured(){
+    try{
+      SecurityUtils.getSecurityManager();
+    }
+    catch(UnavailableSecurityManagerException e){
+      return false;
+    }
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java b/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
index 8f86c38..c55e700 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/security/JMXShiroAuthenticator.java
@@ -49,7 +49,7 @@ public class JMXShiroAuthenticator implements JMXAuthenticator, NotificationList
       throw new SecurityException(WRONGE_CREDENTIALS_MESSAGE);
     }
 
-    ShiroUtil.login(username, password);
+    GeodeSecurityUtil.login(username, password);
 
     return new Subject(true, Collections.singleton(new JMXPrincipal(username)), Collections.EMPTY_SET,
       Collections.EMPTY_SET);
@@ -61,7 +61,7 @@ public class JMXShiroAuthenticator implements JMXAuthenticator, NotificationList
       JMXConnectionNotification cxNotification = (JMXConnectionNotification) notification;
       String type = cxNotification.getType();
       if (JMXConnectionNotification.CLOSED.equals(type)) {
-        ShiroUtil.logout();
+        GeodeSecurityUtil.logout();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
deleted file mode 100644
index 01914e4..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/security/ShiroUtil.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.security;
-
-import java.util.concurrent.Callable;
-
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.security.ResourceOperation;
-import com.gemstone.gemfire.management.internal.security.ResourceOperationContext;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.shiro.SecurityUtils;
-import org.apache.shiro.ShiroException;
-import org.apache.shiro.UnavailableSecurityManagerException;
-import org.apache.shiro.authc.UsernamePasswordToken;
-import org.apache.shiro.subject.Subject;
-import org.apache.shiro.util.ThreadContext;
-
-public class ShiroUtil {
-
-  private static Logger logger = LogService.getLogger();
-
-  public static void login(String username, String password){
-    if(!isShiroConfigured())
-      return;
-
-    Subject currentUser = SecurityUtils.getSubject();
-
-    UsernamePasswordToken token =
-        new UsernamePasswordToken(username, password);
-    try {
-      logger.info("Logging in "+username+"/"+password);
-      currentUser.login(token);
-    } catch (ShiroException e) {
-      throw new AuthenticationFailedException(e.getMessage(), e);
-    }
-  }
-
-  public static void logout(){
-    if(!isShiroConfigured())
-      return;
-
-    Subject currentUser = SecurityUtils.getSubject();
-    try {
-      logger.info("Logging out "+currentUser.getPrincipal());
-      currentUser.logout();
-    }
-    catch(ShiroException e){
-      throw new AuthenticationFailedException(e.getMessage(), e);
-    }
-    // clean out Shiro's thread local content
-    ThreadContext.remove();
-  }
-
-  public static Callable associateWith(Callable callable){
-    if(!isShiroConfigured())
-      return callable;
-
-    Subject currentUser = SecurityUtils.getSubject();
-    return currentUser.associateWith(callable);
-  }
-
-  public static void authorize(ResourceOperationContext context) {
-    authorize(context.getResource().name(), context.getOperationCode().name(), context.getRegionName());
-  }
-
-  public static void authorize(ResourceOperation resourceOperation) {
-    authorize(resourceOperation.resource().name(), resourceOperation.operation().name());
-  }
-
-  public static void authorize(String resource, String operation){
-    authorize(resource, operation, null);
-  }
-
-  public static void authorize(String resource, String operation, String regionName){
-    if(!isShiroConfigured())
-      return;
-
-    ResourceOperationContext permission = new ResourceOperationContext(resource, operation, regionName);
-    Subject currentUser = SecurityUtils.getSubject();
-    try {
-      currentUser.checkPermission(permission);
-    }
-    catch(ShiroException e){
-      logger.info(currentUser.getPrincipal() + " not authorized for "+resource+":"+operation+":"+regionName);
-      throw new GemFireSecurityException(e.getMessage(), e);
-    }
-  }
-
-  private static boolean isShiroConfigured(){
-    try{
-      SecurityUtils.getSecurityManager();
-    }
-    catch(UnavailableSecurityManagerException e){
-      return false;
-    }
-    return true;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
index baa8393..3ded1dc 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanAuthorizationJUnitTest.java
@@ -49,7 +49,7 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   @JMXConnectionConfiguration(user = "data-admin", password = "1234567")
   public void testDataAdmin() throws Exception {
     bean.removeIndex("foo");
-    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining("DATA:READ");
+    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
     bean.fetchLoadProbe();
     bean.getActiveCQCount();
     bean.stopContinuousQuery("bar");
@@ -61,8 +61,8 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "cluster-admin", password = "1234567")
   public void testClusterAdmin() throws Exception {
-    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining("DATA:READ");
+    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
     bean.fetchLoadProbe();
   }
 
@@ -70,21 +70,21 @@ public class CacheServerMBeanAuthorizationJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "data-user", password = "1234567")
   public void testDataUser() throws Exception {
-    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining("DATA:MANAGE");
+    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
     bean.executeContinuousQuery("bar");
-    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining("CLUSTER:READ");
+    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());
   }
 
   @Test
   @JMXConnectionConfiguration(user = "stranger", password = "1234567")
   public void testNoAccess() throws Exception {
-    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining("DATA:READ");
-    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getActiveCQCount()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.stopContinuousQuery("bar")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.closeAllContinuousQuery("bar")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.isRunning()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.showClientQueueDetails("bar")).hasMessageContaining("CLUSTER:READ");
+    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
+    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getActiveCQCount()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.stopContinuousQuery("bar")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.closeAllContinuousQuery("bar")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.isRunning()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.showClientQueueDetails("bar")).hasMessageContaining(TestCommand.clusterRead.toString());
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanShiroJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanShiroJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanShiroJUnitTest.java
index e55623d..85a55a7 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanShiroJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CacheServerMBeanShiroJUnitTest.java
@@ -61,13 +61,33 @@ public class CacheServerMBeanShiroJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "guest", password = "guest")
   public void testNoAccess() throws Exception {
-    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining("DATA:READ");
-    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getActiveCQCount()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.stopContinuousQuery("bar")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.closeAllContinuousQuery("bar")).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.isRunning()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.showClientQueueDetails("bar")).hasMessageContaining("CLUSTER:READ");
+    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
+    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getActiveCQCount()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.stopContinuousQuery("bar")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.closeAllContinuousQuery("bar")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.isRunning()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.showClientQueueDetails("bar")).hasMessageContaining(TestCommand.clusterRead.toString());
+  }
+
+  @Test
+  @JMXConnectionConfiguration(user = "regionAReader", password = "password")
+  public void testRegionAccess() throws Exception{
+    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getActiveCQCount()).hasMessageContaining(TestCommand.clusterRead.toString());
+
+    assertThatThrownBy(() -> bean.executeContinuousQuery("bar")).hasMessageContaining(TestCommand.dataRead.toString());
+  }
+
+  @Test
+  @JMXConnectionConfiguration(user = "dataReader", password = "12345")
+  public void testDataRead() throws Exception{
+    assertThatThrownBy(() -> bean.removeIndex("foo")).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.fetchLoadProbe()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getActiveCQCount()).hasMessageContaining(TestCommand.clusterRead.toString());
+
+    bean.executeContinuousQuery("bar");
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CliCommandsSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CliCommandsSecurityTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CliCommandsSecurityTest.java
index 5e49f92..0864e52 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CliCommandsSecurityTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/CliCommandsSecurityTest.java
@@ -24,21 +24,17 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.management.MemberMXBean;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
 import org.junit.Before;
 import org.junit.ClassRule;
-import org.junit.FixMethodOrder;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.junit.runners.MethodSorters;
 
 /**
- * tests will be run alphabetically, in this test class, we run non-admin test first,
- * since we don't want to have the server stopped for the rest of the tests.
  */
 
 @Category(IntegrationTest.class)
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
 public class CliCommandsSecurityTest {
   private static int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
 
@@ -60,11 +56,8 @@ public class CliCommandsSecurityTest {
 
   @Test
   @JMXConnectionConfiguration(user = "stranger", password = "1234567")
-  // the tests are run in alphabetical order, so the naming of the tests do matter
-  public void a_testNoAccess(){
-//    List<TestCommand> clusterReads = new ArrayList<>();
-//    clusterReads.add(new TestCommand("deploy --jar=group1_functions.jar --group=Group1", "CLUSTER:MANAGE"));
-    for (TestCommand command:commands) {
+  public void testNoAccess(){
+   for (TestCommand command:commands) {
       LogService.getLogger().info("processing: "+command.getCommand());
       // for those commands that don't require any permission, any user can execute them
       if(command.getPermission()==null){
@@ -72,14 +65,14 @@ public class CliCommandsSecurityTest {
       }
       else {
         assertThatThrownBy(() -> bean.processCommand(command.getCommand()))
-            .hasMessageContaining(command.getPermission());
+            .hasMessageContaining(command.getPermission().toString());
       }
     }
   }
 
   @Test
   @JMXConnectionConfiguration(user = "super-user", password = "1234567")
-  public void b_testAdminUser() throws Exception {
+  public void testAdminUser() throws Exception {
     for (TestCommand command:commands) {
       LogService.getLogger().info("processing: "+command.getCommand());
       bean.processCommand(command.getCommand());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DataCommandsSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DataCommandsSecurityTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DataCommandsSecurityTest.java
index 7517f49..97260d8 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DataCommandsSecurityTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DataCommandsSecurityTest.java
@@ -68,15 +68,16 @@ public class DataCommandsSecurityTest {
   @Test
   public void testRegionAcess(){
     assertThatThrownBy(() -> bean.processCommand("rebalance --include-region=region2")).isInstanceOf(GemFireSecurityException.class)
-        .hasMessageContaining("DATA:MANAGE");
+        .hasMessageContaining(TestCommand.dataManage.toString());
 
     assertThatThrownBy(() -> bean.processCommand("export data --region=region2 --file=foo.txt --member=value")).isInstanceOf(GemFireSecurityException.class);
     assertThatThrownBy(() -> bean.processCommand("import data --region=region2 --file=foo.txt --member=value")).isInstanceOf(GemFireSecurityException.class);
 
     assertThatThrownBy(() -> bean.processCommand("put --key=key1 --value=value1 --region=region2")).isInstanceOf(GemFireSecurityException.class)
-        .hasMessageContaining("DATA:WRITE");
+        .hasMessageContaining("[data]:[write]:[region2]");
 
-    assertThatThrownBy(() -> bean.processCommand("get --key=key1 --region=region2")).isInstanceOf(GemFireSecurityException.class);
+    assertThatThrownBy(() -> bean.processCommand("get --key=key1 --region=region2")).isInstanceOf(GemFireSecurityException.class)
+        .hasMessageContaining("[data]:[read]:[region2]");
     }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
index f248736..05d3e3d 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/DiskStoreMXBeanSecurityJUnitTest.java
@@ -69,15 +69,15 @@ public class DiskStoreMXBeanSecurityJUnitTest {
   @Test
   @JMXConnectionConfiguration(user = "data-user", password = "1234567")
   public void testNoAccess() throws Exception {
-    assertThatThrownBy(() -> bean.flush()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.forceCompaction()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.forceRoll()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.getCompactionThreshold()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getDiskDirectories()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getDiskReadsRate()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.isAutoCompact()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.isForceCompactionAllowed()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.setDiskUsageCriticalPercentage(0.5f)).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.setDiskUsageWarningPercentage(0.5f)).hasMessageContaining("DATA:MANAGE");
+    assertThatThrownBy(() -> bean.flush()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.forceCompaction()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.forceRoll()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.getCompactionThreshold()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getDiskDirectories()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getDiskReadsRate()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.isAutoCompact()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.isForceCompactionAllowed()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.setDiskUsageCriticalPercentage(0.5f)).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.setDiskUsageWarningPercentage(0.5f)).hasMessageContaining(TestCommand.dataManage.toString());
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewayReceiverMBeanSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewayReceiverMBeanSecurityTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewayReceiverMBeanSecurityTest.java
index b28069f..6c97694 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewayReceiverMBeanSecurityTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewayReceiverMBeanSecurityTest.java
@@ -16,10 +16,16 @@
  */
 package com.gemstone.gemfire.management.internal.security;
 
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import javax.management.ObjectName;
+
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.management.GatewayReceiverMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -28,11 +34,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import javax.management.ObjectName;
-
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static org.mockito.Mockito.mock;
-
 @Category(IntegrationTest.class)
 public class GatewayReceiverMBeanSecurityTest {
   private static int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
@@ -81,9 +82,9 @@ public class GatewayReceiverMBeanSecurityTest {
   @Test
   @JMXConnectionConfiguration(user = "data-user", password = "1234567")
   public void testNoAccess() throws Exception {
-    assertThatThrownBy(() -> bean.getTotalConnectionsTimedOut()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.start()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.stop()).hasMessageContaining("DATA:MANAGE");
+    assertThatThrownBy(() -> bean.getTotalConnectionsTimedOut()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.start()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.stop()).hasMessageContaining(TestCommand.dataManage.toString());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewaySenderMBeanSecurityTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewaySenderMBeanSecurityTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewaySenderMBeanSecurityTest.java
index 3a9412d..4806464 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewaySenderMBeanSecurityTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/security/GatewaySenderMBeanSecurityTest.java
@@ -88,18 +88,18 @@ public class GatewaySenderMBeanSecurityTest {
   @Test
   @JMXConnectionConfiguration(user = "stranger", password = "1234567")
   public void testNoAccess() throws Exception {
-    assertThatThrownBy(() -> bean.getAlertThreshold()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getAverageDistributionTimePerBatch()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getBatchSize()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getMaximumQueueMemory()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.getOrderPolicy()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.isBatchConflationEnabled()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.isManualStart()).hasMessageContaining("CLUSTER:READ");
-    assertThatThrownBy(() -> bean.pause()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.rebalance()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.resume()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.start()).hasMessageContaining("DATA:MANAGE");
-    assertThatThrownBy(() -> bean.stop()).hasMessageContaining("DATA:MANAGE");
+    assertThatThrownBy(() -> bean.getAlertThreshold()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getAverageDistributionTimePerBatch()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getBatchSize()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getMaximumQueueMemory()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.getOrderPolicy()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.isBatchConflationEnabled()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.isManualStart()).hasMessageContaining(TestCommand.clusterRead.toString());
+    assertThatThrownBy(() -> bean.pause()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.rebalance()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.resume()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.start()).hasMessageContaining(TestCommand.dataManage.toString());
+    assertThatThrownBy(() -> bean.stop()).hasMessageContaining(TestCommand.dataManage.toString());
   }
 
 }


[53/63] [abbrv] incubator-geode git commit: GEODE-11-Added-xml-support-for-analyzer-per-field

Posted by kl...@apache.org.
GEODE-11-Added-xml-support-for-analyzer-per-field


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/2a786ee7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/2a786ee7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/2a786ee7

Branch: refs/heads/feature/GEODE-1276
Commit: 2a786ee7aa3de5da4ffeeb841f6bbbcce55186f2
Parents: 8e74498
Author: Barry Oglesby <bo...@pivotal.io>
Authored: Fri Apr 29 15:58:49 2016 -0700
Committer: Barry Oglesby <bo...@pivotal.io>
Committed: Mon May 2 14:03:07 2016 -0700

----------------------------------------------------------------------
 .../gemfire/internal/i18n/LocalizedStrings.java |  2 +
 .../gemfire/cache/lucene/LuceneIndex.java       |  2 +-
 .../LuceneIndexForReplicatedRegion.java         |  2 +-
 .../cache/lucene/internal/LuceneIndexImpl.java  | 14 ++-
 .../lucene/internal/LuceneServiceImpl.java      | 21 +++--
 .../internal/xml/LuceneIndexCreation.java       | 42 +++++----
 .../internal/xml/LuceneIndexXmlGenerator.java   |  5 +
 .../lucene/internal/xml/LuceneXmlConstants.java |  1 +
 .../lucene/internal/xml/LuceneXmlParser.java    | 27 +++++-
 .../geode.apache.org/lucene/lucene-1.0.xsd      |  1 +
 ...uceneIndexXmlParserIntegrationJUnitTest.java | 52 +++++++++--
 .../xml/LuceneIndexXmlParserJUnitTest.java      | 97 ++++++++++++++------
 ...erIntegrationJUnitTest.createIndex.cache.xml |  7 +-
 ...nJUnitTest.parseIndexWithAnalyzers.cache.xml | 36 ++++++++
 14 files changed, 241 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
index ff960ca..a6bbb86 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
@@ -2127,6 +2127,8 @@ public class LocalizedStrings extends ParentLocalizedStrings {
   public static final StringId SwaggerConfig_DOC_TITLE = new StringId(6621, "Apache Geode Documentation");
   public static final StringId SwaggerConfig_DOC_LINK = new StringId(6622, "http://geode.incubator.apache.org/docs/");
 
+  public static final StringId LuceneXmlParser_CLASS_0_IS_NOT_AN_INSTANCE_OF_ANALYZER = new StringId(6623, "Class \"{0}\" is not an instance of Analyzer.");
+
   /** Testing strings, messageId 90000-99999 **/
   
   /** These are simple messages for testing, translated with Babelfish. **/

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/LuceneIndex.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/LuceneIndex.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/LuceneIndex.java
index 7475fde..743045b 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/LuceneIndex.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/LuceneIndex.java
@@ -54,6 +54,6 @@ public interface LuceneIndex {
   /**
    * @return the field to analyzer map
    */
-  public Map<String, Analyzer> getFieldAnalyzerMap();
+  public Map<String, Analyzer> getFieldAnalyzers();
   
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexForReplicatedRegion.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexForReplicatedRegion.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexForReplicatedRegion.java
index 7c585cf..cd07672 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexForReplicatedRegion.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexForReplicatedRegion.java
@@ -41,7 +41,7 @@ public class LuceneIndexForReplicatedRegion extends LuceneIndexImpl {
   }
 
   @Override
-  public Map<String, Analyzer> getFieldAnalyzerMap() {
+  public Map<String, Analyzer> getFieldAnalyzers() {
     throw new UnsupportedOperationException("Lucene indexes on replicated regions is not yet implemented");
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
index f869755..f530f8c 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexImpl.java
@@ -19,6 +19,7 @@
 
 package com.gemstone.gemfire.cache.lucene.internal;
 
+import java.util.Collections;
 import java.util.Map;
 
 import org.apache.logging.log4j.Logger;
@@ -47,6 +48,7 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex {
   protected String indexName;
   protected String regionPath;
   protected boolean hasInitialized = false;
+  protected Map<String, Analyzer> fieldAnalyzers;
 
   @Override
   public String getName() {
@@ -68,10 +70,8 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex {
   }
 
   @Override
-  public Map<String, Analyzer> getFieldAnalyzerMap() {
-    // TODO Auto-generated method stub
-    // Will do that later: Gester
-    return null;
+  public Map<String, Analyzer> getFieldAnalyzers() {
+    return this.fieldAnalyzers;
   }
 
   public RepositoryManager getRepositoryManager() {
@@ -90,6 +90,10 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex {
     return this.analyzer;
   }
 
+  public void setFieldAnalyzers(Map<String, Analyzer> fieldAnalyzers) {
+    this.fieldAnalyzers = Collections.unmodifiableMap(fieldAnalyzers);
+  }
+
   protected abstract void initialize();
   
   /**
@@ -101,7 +105,7 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex {
     creation.setName(this.getName());
     creation.addFieldNames(this.getFieldNames());
     creation.setRegion(dataRegion);
-    creation.setFieldFieldAnalyzerMap(this.getFieldAnalyzerMap());
+    creation.setFieldAnalyzers(this.getFieldAnalyzers());
     dataRegion.getExtensionPoint().addExtension(creation);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImpl.java
index 9d6aed4..58a9b20 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImpl.java
@@ -22,6 +22,7 @@ package com.gemstone.gemfire.cache.lucene.internal;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.logging.log4j.Logger;
 import org.apache.lucene.analysis.Analyzer;
@@ -102,19 +103,21 @@ public class LuceneServiceImpl implements InternalLuceneService {
   public void createIndex(String indexName, String regionPath, String... fields) {
     StandardAnalyzer analyzer = new StandardAnalyzer();
     
-    createIndex(indexName, regionPath, analyzer, fields);
+    createIndex(indexName, regionPath, analyzer, null, fields);
   }
   
   @Override
-  public void createIndex(String indexName, String regionPath, Map<String, Analyzer> analyzerPerField) {
-    Analyzer analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), analyzerPerField);
-    String[] fields = (String[])analyzerPerField.keySet().toArray(new String[analyzerPerField.keySet().size()]);
+  public void createIndex(String indexName, String regionPath, Map<String, Analyzer> fieldAnalyzers) {
+    Analyzer analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), fieldAnalyzers);
+    Set<String> fieldsSet = fieldAnalyzers.keySet();
+    String[] fields = (String[])fieldsSet.toArray(new String[fieldsSet.size()]);
 
-    createIndex(indexName, regionPath, analyzer, fields);
+    createIndex(indexName, regionPath, analyzer, fieldAnalyzers, fields);
   }
 
   private void createIndex(final String indexName, String regionPath,
-      final Analyzer analyzer, final String... fields) {
+      final Analyzer analyzer, final Map<String, Analyzer> fieldAnalyzers,
+      final String... fields) {
 
     if(!regionPath.startsWith("/")) {
       regionPath = "/" + regionPath;
@@ -143,7 +146,7 @@ public class LuceneServiceImpl implements InternalLuceneService {
       @Override
       public void afterCreate(Region region) {
         if(region.getFullPath().equals(dataRegionPath)) {
-          afterDataRegionCreated(indexName, analyzer, dataRegionPath, fields);
+          afterDataRegionCreated(indexName, analyzer, dataRegionPath, fieldAnalyzers, fields);
           cache.removeRegionListener(this);
         }
       }
@@ -158,11 +161,11 @@ public class LuceneServiceImpl implements InternalLuceneService {
    */
   public void afterDataRegionCreated(final String indexName,
       final Analyzer analyzer, final String dataRegionPath,
-      final String... fields) {
+      final Map<String, Analyzer> fieldAnalyzers, final String... fields) {
     LuceneIndexImpl index = createIndexRegions(indexName, dataRegionPath);
     index.setSearchableFields(fields);
-    // for this API, set index to use the default StandardAnalyzer for each field
     index.setAnalyzer(analyzer);
+    index.setFieldAnalyzers(fieldAnalyzers);
     index.initialize();
     registerIndex(index);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexCreation.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexCreation.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexCreation.java
index e664895..86a10e4 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexCreation.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexCreation.java
@@ -19,12 +19,10 @@
 
 package com.gemstone.gemfire.cache.lucene.internal.xml;
 
-import java.util.Arrays;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 
 import com.gemstone.gemfire.cache.Cache;
@@ -41,7 +39,7 @@ public class LuceneIndexCreation implements LuceneIndex, Extension<Region<?, ?>>
   private Region region;
   private String name;
   private Set<String> fieldNames = new LinkedHashSet<String>();
-  private Map<String, Analyzer> fieldFieldAnalyzerMap;
+  private Map<String, Analyzer> fieldAnalyzers;
 
   
   public void setRegion(Region region) {
@@ -52,18 +50,17 @@ public class LuceneIndexCreation implements LuceneIndex, Extension<Region<?, ?>>
     this.name = name;
   }
 
-  public Map<String, Analyzer> getFieldFieldAnalyzerMap() {
-    return fieldFieldAnalyzerMap;
-  }
-
-  public void setFieldFieldAnalyzerMap(
-      Map<String, Analyzer> fieldFieldAnalyzerMap) {
-    this.fieldFieldAnalyzerMap = fieldFieldAnalyzerMap;
+  public void setFieldAnalyzers(
+      Map<String, Analyzer> fieldAnalyzers) {
+    this.fieldAnalyzers = fieldAnalyzers;
   }
   
   @Override
-  public Map<String, Analyzer> getFieldAnalyzerMap() {
-    return this.fieldFieldAnalyzerMap;
+  public Map<String, Analyzer> getFieldAnalyzers() {
+    if (this.fieldAnalyzers == null) {
+      this.fieldAnalyzers = new HashMap<>();
+    }
+    return this.fieldAnalyzers;
   }
 
   public String getName() {
@@ -87,7 +84,6 @@ public class LuceneIndexCreation implements LuceneIndex, Extension<Region<?, ?>>
   @Override
   public void onCreate(Extensible<Region<?, ?>> source,
       Extensible<Region<?, ?>> target) {
-    target.getExtensionPoint().addExtension(this);
     Cache cache = target.getExtensionPoint().getTarget().getCache();
     LuceneServiceImpl service = (LuceneServiceImpl) LuceneServiceProvider.get(cache);
     Region region = target.getExtensionPoint().getTarget();
@@ -97,15 +93,25 @@ public class LuceneIndexCreation implements LuceneIndex, Extension<Region<?, ?>>
     //TODO - this may only work for PRs. We need to intercept the attributes
     //before the region is created with a RegionListener.
     region.getAttributesMutator().addAsyncEventQueueId(aeqId);
-    service.afterDataRegionCreated(getName(), new StandardAnalyzer(), getRegionPath(), getFieldNames());
+    Analyzer analyzer = null;
+    if (this.fieldAnalyzers == null) {
+      analyzer = new StandardAnalyzer();
+    } else {
+      analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), this.fieldAnalyzers);
+    }
+    service.afterDataRegionCreated(getName(), analyzer, getRegionPath(), this.fieldAnalyzers, getFieldNames());
+  }
+
+  protected void addField(String name) {
+    this.fieldNames.add(name);
   }
 
-  public void addField(String name) {
+  protected void addFieldAndAnalyzer(String name, Analyzer analyzer) {
     this.fieldNames.add(name);
+    getFieldAnalyzers().put(name, analyzer);
   }
 
   public void addFieldNames(String[] fieldNames) {
     this.fieldNames.addAll(Arrays.asList(fieldNames));
-    
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGenerator.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGenerator.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGenerator.java
index 6399a80..37c9ca2 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGenerator.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlGenerator.java
@@ -21,6 +21,7 @@ package com.gemstone.gemfire.cache.lucene.internal.xml;
 
 import static com.gemstone.gemfire.cache.lucene.internal.xml.LuceneXmlConstants.*;
 
+import org.apache.lucene.analysis.Analyzer;
 import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
@@ -57,6 +58,10 @@ public class LuceneIndexXmlGenerator implements XmlGenerator<Region<?, ?>> {
     for(String field : index.getFieldNames()) {
       AttributesImpl fieldAttr = new AttributesImpl();
       XmlGeneratorUtils.addAttribute(fieldAttr, NAME, field);
+      Analyzer analyzer = index.getFieldAnalyzers().get(field);
+      if (analyzer != null) {
+        XmlGeneratorUtils.addAttribute(fieldAttr, ANALYZER, analyzer.getClass().getName());
+      }
       XmlGeneratorUtils.emptyElement(handler, PREFIX, FIELD, fieldAttr);
     }
     XmlGeneratorUtils.endElement(handler, PREFIX, INDEX);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
index bc80180..91d1643 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
@@ -27,5 +27,6 @@ public class LuceneXmlConstants {
   public static final String REGION = "index";
   public static final String INDEX = "index";
   public static final String FIELD = "field";
+  public static final String ANALYZER = "analyzer";
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlParser.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlParser.java b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlParser.java
index 764f461..40bf0ac 100644
--- a/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlParser.java
+++ b/geode-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlParser.java
@@ -21,6 +21,10 @@ package com.gemstone.gemfire.cache.lucene.internal.xml;
 
 import static com.gemstone.gemfire.cache.lucene.internal.xml.LuceneXmlConstants.*;
 
+import com.gemstone.gemfire.cache.CacheXmlException;
+import com.gemstone.gemfire.internal.InternalDataSerializer;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import org.apache.lucene.analysis.Analyzer;
 import org.xml.sax.Attributes;
 import org.xml.sax.SAXException;
 
@@ -58,7 +62,13 @@ public class LuceneXmlParser extends AbstractXmlParser {
     }
     LuceneIndexCreation creation = (LuceneIndexCreation) stack.peek();
     String name = atts.getValue(NAME);
-    creation.addField(name);
+    String className = atts.getValue(ANALYZER);
+    if (className == null) {
+      creation.addField(name);
+    } else {
+      Analyzer analyzer = createAnalyzer(className);
+      creation.addFieldAndAnalyzer(name, analyzer);
+    }
   }
 
   private void startIndex(Attributes atts) {
@@ -94,4 +104,19 @@ public class LuceneXmlParser extends AbstractXmlParser {
     //Remove the index creation from the stack
     stack.pop();
   }
+
+  private Analyzer createAnalyzer(String className) {
+    Object obj;
+    try {
+      Class c = InternalDataSerializer.getCachedClass(className);
+      obj = c.newInstance();
+    }
+    catch (Exception ex) {
+      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_WHILE_INSTANTIATING_A_0.toLocalizedString(className), ex);
+    }
+    if (!(obj instanceof Analyzer)) {
+      throw new CacheXmlException(LocalizedStrings.LuceneXmlParser_CLASS_0_IS_NOT_AN_INSTANCE_OF_ANALYZER.toLocalizedString(className));
+    }
+    return (Analyzer) obj;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd b/geode-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
index 6fd7306..ec82c2f 100644
--- a/geode-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
+++ b/geode-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
@@ -48,6 +48,7 @@ XML schema for Lucene indexes in Geode.
     	  <xsd:element name="field" maxOccurs="unbounded">
 			<xsd:complexType>
 				<xsd:attribute name="name" type="xsd:string" />
+				<xsd:attribute name="analyzer" type="xsd:string" />
 			</xsd:complexType>
     	  </xsd:element>
     	</xsd:sequence>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.java
index 62b4f5a..f2972d4 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.java
@@ -27,6 +27,10 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.core.SimpleAnalyzer;
+import org.apache.lucene.analysis.standard.ClassicAnalyzer;
 import org.junit.After;
 import org.junit.Rule;
 import org.junit.Test;
@@ -65,21 +69,55 @@ public class LuceneIndexXmlParserIntegrationJUnitTest {
    */
   @Test
   public void parseIndex() throws FileNotFoundException {
-    CacheXmlParser parser = CacheXmlParser.parse(new FileInputStream(getXmlFileForTest()));
-    CacheCreation cache = parser.getCacheCreation();
-    RegionCreation region = (RegionCreation) cache.getRegion("region");
+    RegionCreation region = createRegionCreation("region");
     Map<String, String[]> expectedIndexes = new HashMap<String, String[]>();
     expectedIndexes.put("index1", new String[] {"a", "b", "c", "d"});
-    expectedIndexes.put("index2", new String[] { "f", "g"});
+    expectedIndexes.put("index2", new String[] {"f", "g"});
+    validateExpectedIndexes(region, expectedIndexes);
+  }
+
+  @Test
+  public void parseIndexWithAnalyzers() throws FileNotFoundException {
+    RegionCreation region = createRegionCreation("region");
+
+    // Validate expected indexes
+    Map<String, String[]> expectedIndexes = new HashMap<String, String[]>();
+    expectedIndexes.put("index", new String[] {"a", "b", "c"});
+    validateExpectedIndexes(region, expectedIndexes);
+
+    // Validate expected analyzers
+    Map<String, Map<String,Class<? extends Analyzer>>> expectedIndexAnalyzers = new HashMap<>();
+    Map<String,Class<? extends Analyzer>> expectedFieldAnalyzers = new HashMap<>();
+    expectedFieldAnalyzers.put("a", KeywordAnalyzer.class);
+    expectedFieldAnalyzers.put("b", SimpleAnalyzer.class);
+    expectedFieldAnalyzers.put("c", ClassicAnalyzer.class);
+    expectedIndexAnalyzers.put("index", expectedFieldAnalyzers);
+    validateExpectedAnalyzers(region, expectedIndexAnalyzers);
+  }
+
+  private RegionCreation createRegionCreation(String regionName) throws FileNotFoundException {
+    CacheXmlParser parser = CacheXmlParser.parse(new FileInputStream(getXmlFileForTest()));
+    CacheCreation cache = parser.getCacheCreation();
+    return (RegionCreation) cache.getRegion(regionName);
+  }
+
+  private void validateExpectedIndexes(RegionCreation region, Map<String, String[]> expectedIndexes) {
     for(Extension extension : region.getExtensionPoint().getExtensions()) {
       LuceneIndexCreation index = (LuceneIndexCreation) extension;
       assertEquals("/region", index.getRegionPath());
       assertArrayEquals(expectedIndexes.remove(index.getName()), index.getFieldNames());
     }
-    
     assertEquals(Collections.emptyMap(),expectedIndexes);
   }
 
+  private void validateExpectedAnalyzers(RegionCreation region, Map<String, Map<String,Class<? extends Analyzer>>> expectedIndexAnalyzers) {
+    for(Extension extension : region.getExtensionPoint().getExtensions()) {
+      LuceneIndexCreation index = (LuceneIndexCreation) extension;
+      expectedIndexAnalyzers.remove(index.getName());
+    }
+    assertEquals(Collections.emptyMap(),expectedIndexAnalyzers);
+  }
+
   /**
    * Test that the Index creation objects get appropriately translated
    * into a real index.
@@ -93,11 +131,13 @@ public class LuceneIndexXmlParserIntegrationJUnitTest {
     Cache cache = cf.create();
 
     LuceneService service = LuceneServiceProvider.get(cache);
-    assertEquals(2, service.getAllIndexes().size());
+    assertEquals(3, service.getAllIndexes().size());
     LuceneIndex index1 = service.getIndex("index1", "/region");
     LuceneIndex index2 = service.getIndex("index2", "/region");
+    LuceneIndex index3 = service.getIndex("index3", "/region");
     assertArrayEquals(index1.getFieldNames(), new String[] {"a", "b", "c", "d"});
     assertArrayEquals(index2.getFieldNames(), new String[] { "f", "g"});
+    assertArrayEquals(index3.getFieldNames(), new String[] { "h", "i", "j"});
   }
 
   private String getXmlFileForTest() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
index 298c92f..cae2142 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
@@ -21,8 +21,13 @@ package com.gemstone.gemfire.cache.lucene.internal.xml;
 
 import static org.junit.Assert.*;
 
+import java.util.Map;
 import java.util.Stack;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.xml.sax.SAXException;
@@ -35,38 +40,78 @@ import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 @Category(UnitTest.class)
 public class LuceneIndexXmlParserJUnitTest {
-  
-  @Test
-  public void generateWithFields() throws SAXException {
-    LuceneXmlParser parser = new LuceneXmlParser();
-    AttributesImpl attrs = new AttributesImpl();
+
+  private LuceneXmlParser parser;
+
+  private RegionCreation rc;
+
+  private Stack<Object> stack;
+
+  @Before
+  public void setUp() {
+    this.parser = new LuceneXmlParser();
     CacheCreation cache = new CacheCreation();
-    RegionCreation rc = new RegionCreation(cache, "region");
-    Stack<Object> stack = new Stack<Object>();
+    this.rc = new RegionCreation(cache, "region");
+    this.stack = new Stack<Object>();
     stack.push(cache);
     stack.push(rc);
-    parser.setStack(stack);
+    this.parser.setStack(stack);
+  }
+
+  @After
+  public void tearDown() {
+    this.parser = null;
+    this.rc = null;
+    this.stack = null;
+  }
+
+  @Test
+  public void generateWithFields() throws SAXException {
+    AttributesImpl attrs = new AttributesImpl();
     XmlGeneratorUtils.addAttribute(attrs, LuceneXmlConstants.NAME, "index");
-    parser.startElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.INDEX, null, attrs);
-    
-    AttributesImpl field1 = new AttributesImpl();
-    XmlGeneratorUtils.addAttribute(field1, LuceneXmlConstants.NAME, "field1");
-    AttributesImpl field2 = new AttributesImpl();
-    XmlGeneratorUtils.addAttribute(field2, LuceneXmlConstants.NAME, "field2");
-    
-    parser.startElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.FIELD, null, field1);
-    parser.endElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.FIELD, null);
-    parser.startElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.FIELD, null, field2);
-    parser.endElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.FIELD, null);
-    
-    
-    parser.endElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.INDEX, null);
-    assertEquals(rc, stack.peek());
+    this.parser.startElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.INDEX, null, attrs);
+
+    addField("field1");
+    addField("field2");
+    addField("field3", KeywordAnalyzer.class.getName());
+
+    this.parser.endElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.INDEX, null);
+    assertEquals(this.rc, this.stack.peek());
     
-    LuceneIndexCreation index = (LuceneIndexCreation) rc.getExtensionPoint().getExtensions().iterator().next();
+    LuceneIndexCreation index = (LuceneIndexCreation) this.rc.getExtensionPoint().getExtensions().iterator().next();
     assertEquals("index", index.getName());
-    assertArrayEquals(new String[] {"field1", "field2"}, index.getFieldNames());
+    assertArrayEquals(new String[] {"field1", "field2", "field3"}, index.getFieldNames());
+
+    // Assert analyzers
+    Map<String, Analyzer> fieldAnalyzers = index.getFieldAnalyzers();
+    assertEquals(1, fieldAnalyzers.size());
+    assertTrue(fieldAnalyzers.containsKey("field3"));
+    assertTrue(fieldAnalyzers.get("field3") instanceof KeywordAnalyzer);
+  }
+
+  @Test
+  public void attemptInvalidAnalyzerClass() throws SAXException {
+    AttributesImpl attrs = new AttributesImpl();
+    XmlGeneratorUtils.addAttribute(attrs, LuceneXmlConstants.NAME, "index");
+    this.parser.startElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.INDEX, null, attrs);
+    try {
+      addField("field", "some.invalid.class");
+      fail("Should not have been able to add a field with an invalid analyzer class name");
+    } catch (Exception e) {
+    }
   }
-  
 
+  private void addField(String fieldName) throws SAXException {
+    addField(fieldName, null);
+  }
+
+  private void addField(String fieldName, String analyzerClassName) throws SAXException {
+    AttributesImpl field = new AttributesImpl();
+    XmlGeneratorUtils.addAttribute(field, LuceneXmlConstants.NAME, fieldName);
+    if (analyzerClassName != null) {
+      XmlGeneratorUtils.addAttribute(field, LuceneXmlConstants.ANALYZER, analyzerClassName);
+    }
+    this.parser.startElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.FIELD, null, field);
+    this.parser.endElement(LuceneXmlConstants.NAMESPACE, LuceneXmlConstants.FIELD, null);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml b/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
index 89d5bef..acbf2c3 100644
--- a/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
+++ b/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
@@ -37,5 +37,10 @@
     	  <lucene:field name="f"/>
     	  <lucene:field name="g"/>
     	</lucene:index>
-    </region>    
+		<lucene:index name="index3">
+		  <lucene:field name="h" analyzer="org.apache.lucene.analysis.core.KeywordAnalyzer"/>
+		  <lucene:field name="i" analyzer="org.apache.lucene.analysis.core.SimpleAnalyzer"/>
+		  <lucene:field name="j" analyzer="org.apache.lucene.analysis.standard.ClassicAnalyzer"/>
+		</lucene:index>
+    </region>
 </cache>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2a786ee7/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndexWithAnalyzers.cache.xml
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndexWithAnalyzers.cache.xml b/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndexWithAnalyzers.cache.xml
new file mode 100644
index 0000000..eed71fc
--- /dev/null
+++ b/geode-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndexWithAnalyzers.cache.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<cache
+    xmlns="http://geode.apache.org/schema/cache"
+    xmlns:lucene="http://geode.apache.org/schema/lucene"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://geode.apache.org/schema/cache
+        http://geode.apache.org/schema/cache/cache-1.0.xsd
+        http://geode.apache.org/schema/lucene
+        http://geode.apache.org/schema/lucene/lucene-1.0.xsd"
+    version="1.0">
+
+	<region name="region" refid="PARTITION">
+		<lucene:index name="index">
+		  <lucene:field name="a" analyzer="org.apache.lucene.analysis.core.KeywordAnalyzer"/>
+		  <lucene:field name="b" analyzer="org.apache.lucene.analysis.core.SimpleAnalyzer"/>
+		  <lucene:field name="c" analyzer="org.apache.lucene.analysis.standard.ClassicAnalyzer"/>
+		</lucene:index>
+    </region>
+</cache>
\ No newline at end of file


[52/63] [abbrv] incubator-geode git commit: GEODE-1259: Upgrade gradle version to 2.12

Posted by kl...@apache.org.
GEODE-1259: Upgrade gradle version to 2.12

This also addresses GEODE-1085 and GEODE-1261


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8e744982
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8e744982
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8e744982

Branch: refs/heads/feature/GEODE-1276
Commit: 8e744982a1cc5da50d4eb5640a2dae8ed87dfd24
Parents: 152ef59
Author: Jason Huynh <hu...@gmail.com>
Authored: Mon Apr 25 11:00:16 2016 -0700
Committer: Jason Huynh <hu...@gmail.com>
Committed: Mon May 2 10:33:49 2016 -0700

----------------------------------------------------------------------
 BUILDING.md                              |   2 +-
 build.gradle                             |   4 ++++
 gradle.properties                        |   1 +
 gradle/wrapper/gradle-wrapper.jar        | Bin 53637 -> 53639 bytes
 gradle/wrapper/gradle-wrapper.properties |   2 +-
 settings.gradle                          |   1 -
 6 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e744982/BUILDING.md
----------------------------------------------------------------------
diff --git a/BUILDING.md b/BUILDING.md
index ababe2b..f2eeb62 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -39,7 +39,7 @@ or the `gfsh` script can be found in
     ```
     $ set JAVA_HOME="C:\Program Files\Java\jdk1.8.0_60"
     ```
-2. Install Gradle, version 2.3 or a more recent version.
+2. Install Gradle, version 2.12 or a more recent version.
 3. Download the project source from the Releases page at [Apache Geode (incubating)] (http://geode.incubator.apache.org), and unpack the source code.
 4. Within the folder containing the unpacked source code, build without the tests:
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e744982/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 4f79eae..9a3ff10 100755
--- a/build.gradle
+++ b/build.gradle
@@ -29,6 +29,10 @@ buildscript {
 
 apply plugin: 'wrapper'
 
+wrapper {
+  gradleVersion = minimumGradleVersion
+}
+
 // Load all properties in dependency-version.properties as project properties, so all projects can read them
 Properties dependencyVersions = new Properties()
 dependencyVersions.load(new FileInputStream("${project.projectDir}/gradle/dependency-versions.properties"))

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e744982/gradle.properties
----------------------------------------------------------------------
diff --git a/gradle.properties b/gradle.properties
index 669baed..ad39dc6 100755
--- a/gradle.properties
+++ b/gradle.properties
@@ -23,6 +23,7 @@ releaseType = .M3-SNAPSHOT
 org.gradle.daemon = true
 org.gradle.jvmargs = -Xmx2048m
 
+minimumGradleVersion = 2.12
 # Set this on the command line with -P or in ~/.gradle/gradle.properties
 # to change the buildDir location.  Use an absolute path.
 buildRoot=

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e744982/gradle/wrapper/gradle-wrapper.jar
----------------------------------------------------------------------
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index 05ef575..2c6137b 100644
Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e744982/gradle/wrapper/gradle-wrapper.properties
----------------------------------------------------------------------
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index 7b0d17a..72f7318 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
 zipStoreBase=GRADLE_USER_HOME
 zipStorePath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-2.8-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-2.12-bin.zip

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e744982/settings.gradle
----------------------------------------------------------------------
diff --git a/settings.gradle b/settings.gradle
index c579dce..188faa4 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -35,7 +35,6 @@ include 'extensions/geode-modules-hibernate'
 include 'extensions/geode-modules-session'
 include 'extensions/geode-modules-assembly'
 
-def minimumGradleVersion = '2.3'
 if (GradleVersion.current() < GradleVersion.version(minimumGradleVersion)) {
   throw new GradleException('Running with unsupported Gradle Version. Use Gradle Wrapper or with Gradle version >= ' + minimumGradleVersion)
 }


[41/63] [abbrv] incubator-geode git commit: GEODE-1323: Extend PerTestClassLoaderRunner to work with Rules

Posted by kl...@apache.org.
GEODE-1323: Extend PerTestClassLoaderRunner to work with Rules


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a20efb92
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a20efb92
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a20efb92

Branch: refs/heads/feature/GEODE-1276
Commit: a20efb927f29512677d4191094aa3d8445549225
Parents: bcae906
Author: Jens Deppe <jd...@pivotal.io>
Authored: Fri Apr 29 09:26:53 2016 -0700
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Fri Apr 29 10:41:49 2016 -0700

----------------------------------------------------------------------
 .../session/junit/PerTestClassLoaderRunner.java | 136 ++++---------------
 1 file changed, 28 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a20efb92/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
----------------------------------------------------------------------
diff --git a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java b/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
index 3e5c998..dda5307 100644
--- a/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
+++ b/extensions/geode-modules-session/src/test/java/com/gemstone/gemfire/modules/session/junit/PerTestClassLoaderRunner.java
@@ -36,9 +36,12 @@ package com.gemstone.gemfire.modules.session.junit;
 
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.internal.runners.statements.Fail;
 import org.junit.internal.runners.statements.RunAfters;
 import org.junit.internal.runners.statements.RunBefores;
+import org.junit.rules.MethodRule;
+import org.junit.rules.TestRule;
 import org.junit.runners.model.FrameworkMethod;
 import org.junit.runners.model.InitializationError;
 import org.junit.runners.model.Statement;
@@ -65,6 +68,9 @@ public class PerTestClassLoaderRunner extends NamedRunner {
   private TestClass testClassFromClassLoader;
   private Object beforeFromClassLoader;
   private Object afterFromClassLoader;
+  private Object ruleFromClassLoader;
+  private Object testRuleFromClassLoader;
+  private Object methodRuleFromClassLoader;
 
   /**
    * Instantiates a new test per class loader runner.
@@ -117,6 +123,9 @@ public class PerTestClassLoaderRunner extends NamedRunner {
     // See withAfters and withBefores for the reason.
     beforeFromClassLoader = classLoader.loadClass(Before.class.getName());
     afterFromClassLoader = classLoader.loadClass(After.class.getName());
+    ruleFromClassLoader = classLoader.loadClass(Rule.class.getName());
+    testRuleFromClassLoader = classLoader.loadClass(TestRule.class.getName());
+    methodRuleFromClassLoader = classLoader.loadClass(MethodRule.class.getName());
   }
 
   @Override
@@ -170,114 +179,25 @@ public class PerTestClassLoaderRunner extends NamedRunner {
     return new RunBefores(statement, befores, target);
   }
 
-//    /**
-//     * Gets the class path. This value is cached in a static variable for performance reasons.
-//     *
-//     * @return the class path
-//     */
-//    private static String getClassPath()
-//    {
-//        if (classPathDetermined)
-//        {
-//            return classPath;
-//        }
-//
-//        classPathDetermined = true;
-//        // running from maven, we have the classpath in this property.
-//        classPath = System.getProperty("surefire.test.class.path");
-//        if (classPath != null)
-//        {
-//            return classPath;
-//        }
-//
-//        // For a multi module project, running it from the top we have to find it using another way.
-//        // We also need to set useSystemClassLoader=true in the POM so that we gets a jar with the classpath in it.
-//        String booterClassPath = System.getProperty("java.class.path");
-//        Vector<String> pathItems = null;
-//        if (booterClassPath != null)
-//        {
-//            pathItems = scanPath(booterClassPath);
-//        }
-//        // Do we have just 1 entry as classpath which is a jar?
-//        if (pathItems != null && pathItems.size() == 1
-//                && isJar((String) pathItems.get(0)))
-//        {
-//            classPath = loadJarManifestClassPath((String) pathItems.get(0),
-//                    "META-INF/MANIFEST.MF");
-//        }
-//        return classPath;
-//
-//    }
+  @Override
+  protected List<MethodRule> rules(Object target) {
+    List<MethodRule> result = testClassFromClassLoader.getAnnotatedMethodValues(target,
+        (Class<? extends Annotation>) ruleFromClassLoader, (Class) methodRuleFromClassLoader);
+
+    result.addAll(testClassFromClassLoader.getAnnotatedFieldValues(target,
+        (Class<? extends Annotation>) ruleFromClassLoader, (Class) methodRuleFromClassLoader));
 
-//    /**
-//     * Load jar manifest class path.
-//     *
-//     * @param path the path
-//     * @param fileName the file name
-//     *
-//     * @return the string
-//     */
-//    private static String loadJarManifestClassPath(String path, String fileName)
-//    {
-//        File archive = new File(path);
-//        if (!archive.exists()) {
-//            return null;
-//        }
-//        ZipFile zipFile = null;
-//
-//        try {
-//            zipFile = new ZipFile(archive);
-//        } catch (IOException io) {
-//            return null;
-//        }
-//
-//        ZipEntry entry = zipFile.getEntry(fileName);
-//        if (entry == null) {
-//            return null;
-//        } try {
-//            Manifest mf = new Manifest();
-//            mf.read(zipFile.getInputStream(entry));
-//
-//            return mf.getMainAttributes().getValue(Attributes.Name.CLASS_PATH)
-//                    .replaceAll(" ", System.getProperty("path.separator"))
-//                    .replaceAll("file:/", "");
-//        } catch (MalformedURLException e) {
-//            LOGGER.throwing("ClassLoaderTestSuite", "loadJarManifestClassPath", e);
-//        } catch (IOException e) {
-//            LOGGER.throwing("ClassLoaderTestSuite", "loadJarManifestClassPath", e);
-//        }
-//        return null;
-//    }
-//
-//    /**
-//     * Checks if is jar.
-//     *
-//     * @param pathEntry the path entry
-//     *
-//     * @return true, if is jar
-//     */
-//    private static boolean isJar(String pathEntry)
-//    {
-//        return pathEntry.endsWith(".jar") || pathEntry.endsWith(".zip");
-//    }
-//
-//    /**
-//     * Scan path for all directories.
-//     *
-//     * @param classPath the class path
-//     *
-//     * @return the vector< string>
-//     */
-//    private static Vector<String> scanPath(String classPath)
-//    {
-//        String separator = System.getProperty("path.separator");
-//        Vector<String> pathItems = new Vector<String>(10);
-//        StringTokenizer st = new StringTokenizer(classPath, separator);
-//        while (st.hasMoreTokens())
-//        {
-//            pathItems.addElement(st.nextToken());
-//        }
-//        return pathItems;
-//    }
+    return result;
+  }
 
+  @Override
+  protected List<TestRule> getTestRules(Object target) {
+    List<TestRule> result = testClassFromClassLoader.getAnnotatedMethodValues(target,
+        (Class<? extends Annotation>) ruleFromClassLoader, (Class) testRuleFromClassLoader);
+
+    result.addAll(testClassFromClassLoader.getAnnotatedFieldValues(target,
+        (Class<? extends Annotation>) ruleFromClassLoader, (Class) testRuleFromClassLoader));
+
+    return result;
+  }
 }


[06/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
index 74efd51..c5b5d3a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
@@ -96,8 +96,6 @@ public interface GatewaySender {
 
   public static final int DEFAULT_DISPATCHER_THREADS = 5;
   
-  public static final int DEFAULT_HDFS_DISPATCHER_THREADS = 5;
-  
   public static final OrderPolicy DEFAULT_ORDER_POLICY = OrderPolicy.KEY;
   /**
    * The default maximum amount of memory (MB) to allow in the queue before

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
index 77f24a3..bd78f5a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
@@ -52,7 +52,6 @@ import com.gemstone.gemfire.cache.client.internal.locator.LocatorStatusRequest;
 import com.gemstone.gemfire.cache.client.internal.locator.LocatorStatusResponse;
 import com.gemstone.gemfire.cache.client.internal.locator.QueueConnectionRequest;
 import com.gemstone.gemfire.cache.client.internal.locator.QueueConnectionResponse;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.internal.CqEntry;
 import com.gemstone.gemfire.cache.query.internal.CumulativeNonDistinctResults;
@@ -1023,8 +1022,6 @@ public final class DSFIDFactory implements DataSerializableFixedID {
         RemoteFetchVersionMessage.FetchVersionReplyMessage.class);
     registerDSFID(RELEASE_CLEAR_LOCK_MESSAGE, ReleaseClearLockMessage.class);
     registerDSFID(PR_TOMBSTONE_MESSAGE, PRTombstoneMessage.class);
-    registerDSFID(HDFS_GATEWAY_EVENT_IMPL, HDFSGatewayEventImpl.class);
-    
     registerDSFID(REQUEST_RVV_MESSAGE, InitialImageOperation.RequestRVVMessage.class);
     registerDSFID(RVV_REPLY_MESSAGE, InitialImageOperation.RVVReplyMessage.class);
     registerDSFID(SNAPPY_COMPRESSED_CACHED_DESERIALIZABLE, SnappyCompressedCachedDeserializable.class);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
index 5d52346..7427f90 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
@@ -103,7 +103,6 @@ public interface DataSerializableFixedID extends SerializationVersions {
   public static final short JOIN_RESPONSE = -143;
   public static final short JOIN_REQUEST = -142;
 
-  public static final short HDFS_GATEWAY_EVENT_IMPL = -141;
   public static final short SNAPPY_COMPRESSED_CACHED_DESERIALIZABLE = -140;
   
   public static final short GATEWAY_EVENT_IMPL = -136;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
index 4d4197e..f8740db 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
@@ -33,7 +33,6 @@ import com.gemstone.gemfire.cache.CacheLoader;
 import com.gemstone.gemfire.cache.CacheLoaderException;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.CustomExpiry;
 import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.Declarable;
@@ -50,10 +49,7 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.SubscriptionAttributes;
-import com.gemstone.gemfire.compression.CompressionException;
 import com.gemstone.gemfire.compression.Compressor;
-import com.gemstone.gemfire.internal.InternalDataSerializer;
-import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 
@@ -108,8 +104,6 @@ public class RemoteRegionAttributes implements RegionAttributes,
   private String[] gatewaySendersDescs;
   private boolean isGatewaySenderEnabled = false;
   private String[] asyncEventQueueDescs;
-  private String hdfsStoreName;
-  private boolean hdfsWriteOnly;
   private String compressorDesc;
   private boolean offHeap;
 
@@ -161,8 +155,6 @@ public class RemoteRegionAttributes implements RegionAttributes,
     this.isDiskSynchronous = attr.isDiskSynchronous();
     this.gatewaySendersDescs = getDescs(attr.getGatewaySenderIds().toArray());
     this.asyncEventQueueDescs = getDescs(attr.getAsyncEventQueueIds().toArray());
-  	this.hdfsStoreName = attr.getHDFSStoreName();
-    this.hdfsWriteOnly = attr.getHDFSWriteOnly();
     this.compressorDesc = getDesc(attr.getCompressor());
     this.offHeap = attr.getOffHeap();
   }
@@ -419,7 +411,6 @@ public class RemoteRegionAttributes implements RegionAttributes,
   
     DataSerializer.writeString(this.compressorDesc, out);
     out.writeBoolean(this.offHeap);
-    DataSerializer.writeString(this.hdfsStoreName, out);
   }
   
   public void fromData(DataInput in) throws IOException, ClassNotFoundException {
@@ -468,7 +459,6 @@ public class RemoteRegionAttributes implements RegionAttributes,
   
     this.compressorDesc = DataSerializer.readString(in);
     this.offHeap = in.readBoolean();
-    this.hdfsStoreName = DataSerializer.readString(in);
   }
   
   private String[] getDescs(Object[] l) {
@@ -636,15 +626,6 @@ public class RemoteRegionAttributes implements RegionAttributes,
     return this.evictionAttributes;
   }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CustomEvictionAttributes getCustomEvictionAttributes() {
-    // TODO: HDFS: no support for custom eviction attributes from remote yet
-    return null;
-  }
-
   public boolean getCloningEnabled() {
     // TODO Auto-generated method stub
     return this.cloningEnable;
@@ -653,12 +634,6 @@ public class RemoteRegionAttributes implements RegionAttributes,
   public String getDiskStoreName() {
     return this.diskStoreName;
   }
-  public String getHDFSStoreName() {
-	    return this.hdfsStoreName;
-	  }
-  public boolean getHDFSWriteOnly() {
-    return this.hdfsWriteOnly;
-  }
   public boolean isDiskSynchronous() {
     return this.isDiskSynchronous;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
index 1f8da88..92eaa01 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
@@ -34,8 +34,6 @@ import com.gemstone.gemfire.cache.Operation;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.TimeoutException;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
@@ -459,17 +457,8 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
     }
     waitIfQueueFull();
     
-    int sizeOfHdfsEvent = -1;
     try {
-      if (this instanceof HDFSBucketRegionQueue) {
-        // need to fetch the size before event is inserted in queue.
-        // fix for #50016
-        if (this.getBucketAdvisor().isPrimary()) {
-          HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)event.getValue();
-          sizeOfHdfsEvent = hdfsEvent.getSizeOnHDFSInBytes(!((HDFSBucketRegionQueue)this).isBucketSorted);
-        }
-      }
-      
+
       didPut = virtualPut(event, false, false, null, false, startPut, true);
       
       checkReadiness();
@@ -492,7 +481,7 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
       destroyKey(key);
       didPut = false;
     } else {
-      addToEventQueue(key, didPut, event, sizeOfHdfsEvent);
+      addToEventQueue(key, didPut, event);
     }
     return didPut;
   }
@@ -522,8 +511,7 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
   }
   
   protected abstract void clearQueues();
-  protected abstract void addToEventQueue(Object key, boolean didPut, EntryEventImpl event, 
-      int sizeOfHdfsEvent);
+  protected abstract void addToEventQueue(Object key, boolean didPut, EntryEventImpl event);
   
   @Override
   public void afterAcquiringPrimaryState() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
index 10644cb..d37f025 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
@@ -32,7 +32,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.logging.log4j.Logger;
@@ -46,7 +45,6 @@ import com.gemstone.gemfire.cache.CacheLoaderException;
 import com.gemstone.gemfire.cache.CacheStatistics;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.CustomExpiry;
 import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskWriteAttributes;
@@ -54,7 +52,6 @@ import com.gemstone.gemfire.cache.EntryExistsException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.EvictionAttributesMutator;
-import com.gemstone.gemfire.cache.EvictionCriteria;
 import com.gemstone.gemfire.cache.ExpirationAction;
 import com.gemstone.gemfire.cache.ExpirationAttributes;
 import com.gemstone.gemfire.cache.MembershipAttributes;
@@ -100,7 +97,6 @@ import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
 import com.gemstone.gemfire.internal.util.ArrayUtils;
 import com.gemstone.gemfire.pdx.internal.PeerTypeRegistration;
-import com.google.common.util.concurrent.Service.State;
 
 /**
  * Takes care of RegionAttributes, AttributesMutator, and some no-brainer method
@@ -236,8 +232,6 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
 
   protected EvictionAttributesImpl evictionAttributes = new EvictionAttributesImpl();
 
-  protected CustomEvictionAttributes customEvictionAttributes;
-
   /** The membership attributes defining required roles functionality */
   protected MembershipAttributes membershipAttributes;
 
@@ -260,10 +254,6 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
   
   protected String poolName;
   
-  protected String hdfsStoreName;
-  
-  protected boolean hdfsWriteOnly;
-  
   protected Compressor compressor;
   
   /**
@@ -898,16 +888,6 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
     return this.subscriptionAttributes;
   }
   
-  @Override
-  public final String getHDFSStoreName() {
-    return this.hdfsStoreName;
-  }
-  
-  @Override
-  public final boolean getHDFSWriteOnly() {
-    return this.hdfsWriteOnly;
-  }
-  
   /**
    * Get IndexManger for region
    */
@@ -1728,7 +1708,6 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
       this.setEvictionController(this.evictionAttributes
           .createEvictionController(this, attrs.getOffHeap()));
     }
-    this.customEvictionAttributes = attrs.getCustomEvictionAttributes();
     storeCacheListenersField(attrs.getCacheListeners());
     assignCacheLoader(attrs.getCacheLoader());
     assignCacheWriter(attrs.getCacheWriter());
@@ -1786,8 +1765,6 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
             + "when multiuser-authentication is true.");
       }
     }
-    this.hdfsStoreName = attrs.getHDFSStoreName();
-    this.hdfsWriteOnly = attrs.getHDFSWriteOnly();
 
     this.diskStoreName = attrs.getDiskStoreName();
     this.isDiskSynchronous = attrs.isDiskSynchronous();
@@ -1853,52 +1830,12 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
     return this.evictionAttributes;
   }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CustomEvictionAttributes getCustomEvictionAttributes() {
-    return this.customEvictionAttributes;
-  }
-
   public EvictionAttributesMutator getEvictionAttributesMutator()
   {
     return this.evictionAttributes;
   }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CustomEvictionAttributes setCustomEvictionAttributes(long newStart,
-      long newInterval) {
-    checkReadiness();
-
-    if (this.customEvictionAttributes == null) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.AbstractRegion_NO_CUSTOM_EVICTION_SET
-              .toLocalizedString(getFullPath()));
-    }
-
-    if (newStart == 0) {
-      newStart = this.customEvictionAttributes.getEvictorStartTime();
-    }
-    this.customEvictionAttributes = new CustomEvictionAttributesImpl(
-        this.customEvictionAttributes.getCriteria(), newStart, newInterval,
-        newStart == 0 && newInterval == 0);
-
-//    if (this.evService == null) {
-//      initilializeCustomEvictor();
-//    } else {// we are changing the earlier one which is already started.
-//      EvictorService service = getEvictorTask();
-//      service.changeEvictionInterval(newInterval);
-//      if (newStart != 0)
-//        service.changeStartTime(newStart);
-//    }
 
-    return this.customEvictionAttributes;
-  }
-  
   public void setEvictionController(LRUAlgorithm evictionController)
   {
     this.evictionController = evictionController;
@@ -2037,7 +1974,6 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
   
   /**
   * @since 8.1
-  * property used to find region operations that reach out to HDFS multiple times
   */
   @Override
   public ExtensionPoint<Region<?, ?>> getExtensionPoint() {
@@ -2047,87 +1983,4 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
   public boolean getOffHeap() {
     return this.offHeap;
   }
-  /**
-   * property used to find region operations that reach out to HDFS multiple times
-   */
-  private static final boolean DEBUG_HDFS_CALLS = Boolean.getBoolean("DebugHDFSCalls");
-
-  /**
-   * throws exception if region operation goes out to HDFS multiple times
-   */
-  private static final boolean THROW_ON_MULTIPLE_HDFS_CALLS = Boolean.getBoolean("throwOnMultipleHDFSCalls");
-
-  private ThreadLocal<CallLog> logHDFSCalls = DEBUG_HDFS_CALLS ? new ThreadLocal<CallLog>() : null;
-
-  public void hdfsCalled(Object key) {
-    if (!DEBUG_HDFS_CALLS) {
-      return;
-    }
-    logHDFSCalls.get().addStack(new Throwable());
-    logHDFSCalls.get().setKey(key);
-  }
-  public final void operationStart() {
-    if (!DEBUG_HDFS_CALLS) {
-      return;
-    }
-    if (logHDFSCalls.get() == null) {
-      logHDFSCalls.set(new CallLog());
-      //InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:operationStart", new Throwable());
-    } else {
-      logHDFSCalls.get().incNestedCall();
-      //InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:incNestedCall:", new Throwable());
-    }
-  }
-  public final void operationCompleted() {
-    if (!DEBUG_HDFS_CALLS) {
-      return;
-    }
-    //InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:operationCompleted", new Throwable());
-    if (logHDFSCalls.get() != null && logHDFSCalls.get().decNestedCall() < 0) {
-      logHDFSCalls.get().assertCalls();
-      logHDFSCalls.set(null);
-    }
-  }
-
-  public static class CallLog {
-    private List<Throwable> stackTraces = new ArrayList<Throwable>();
-    private Object key;
-    private int nestedCall = 0;
-    public void incNestedCall() {
-      nestedCall++;
-    }
-    public int decNestedCall() {
-      return --nestedCall;
-    }
-    public void addStack(Throwable stack) {
-      this.stackTraces.add(stack);
-    }
-    public void setKey(Object key) {
-      this.key = key;
-    }
-    public void assertCalls() {
-      if (stackTraces.size() > 1) {
-        Throwable firstTrace = new Throwable();
-        Throwable lastTrace = firstTrace;
-        for (Throwable t : this.stackTraces) {
-          lastTrace.initCause(t);
-          lastTrace = t;
-        }
-        if (THROW_ON_MULTIPLE_HDFS_CALLS) {
-          throw new RuntimeException("SWAP:For key:"+key+" HDFS get called more than once: ", firstTrace);
-        } else {
-          InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:For key:"+key+" HDFS get called more than once: ", firstTrace);
-        }
-      }
-    }
-  }
-
-  public EvictionCriteria getEvictionCriteria() {
-    EvictionCriteria criteria = null;
-    if (this.customEvictionAttributes != null
-        && !this.customEvictionAttributes.isEvictIncoming()) {
-      criteria = this.customEvictionAttributes.getCriteria();
-    }
-    return criteria;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
index b936e3f..46a851d 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
@@ -870,15 +870,7 @@ public abstract class AbstractRegionEntry implements RegionEntry,
         removeEntry = true;
       }
 
-      // See #47887, we do not insert a tombstone for evicted HDFS
-      // entries since the value is still present in HDFS
-      // Check if we have to evict or just do destroy.
-      boolean forceRemoveEntry = 
-          (event.isEviction() || event.isExpiration()) 
-          && event.getRegion().isUsedForPartitionedRegionBucket()
-          && event.getRegion().getPartitionedRegion().isHDFSRegion();
-
-      if (removeEntry || forceRemoveEntry) {
+      if (removeEntry) {
         boolean isThisTombstone = isTombstone();
         if(inTokenMode && !event.getOperation().isEviction()) {
           setValue(region, Token.DESTROYED);  
@@ -1398,27 +1390,7 @@ public abstract class AbstractRegionEntry implements RegionEntry,
   /**
    * {@inheritDoc}
    */
-  @Override
-  public final boolean isMarkedForEviction() {
-    return areAnyBitsSet(MARKED_FOR_EVICTION);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public final void setMarkedForEviction() {
-    setBits(MARKED_FOR_EVICTION);
-  }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public final void clearMarkedForEviction() {
-    clearBits(~MARKED_FOR_EVICTION);
-  }
-  
   @Override
   public final synchronized void decRefCount(NewLRUClockHand lruList, LocalRegion lr) {
     if (TXManagerImpl.decRefCount(this)) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
index 3286373..75a1e32 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
@@ -18,7 +18,6 @@
 package com.gemstone.gemfire.internal.cache;
 
 
-import java.io.IOException;
 import java.lang.reflect.Method;
 import java.util.Collection;
 import java.util.HashSet;
@@ -36,7 +35,6 @@ import com.gemstone.gemfire.InvalidDeltaException;
 import com.gemstone.gemfire.cache.CacheRuntimeException;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.DiskAccessException;
 import com.gemstone.gemfire.cache.EntryExistsException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
@@ -83,9 +81,6 @@ import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
 import com.gemstone.gemfire.internal.sequencelog.EntryLogger;
 import com.gemstone.gemfire.internal.util.concurrent.CustomEntryConcurrentHashMap;
-import com.gemstone.gemfire.pdx.PdxInstance;
-import com.gemstone.gemfire.pdx.PdxSerializationException;
-import com.gemstone.gemfire.pdx.internal.ConvertableToBytes;
 
 /**
  * Abstract implementation of {@link RegionMap}that has all the common
@@ -303,10 +298,6 @@ public abstract class AbstractRegionMap implements RegionMap {
 
   public RegionEntry getEntry(Object key) {
     RegionEntry re = (RegionEntry)_getMap().get(key);
-    if (re != null && re.isMarkedForEviction()) {
-      // entry has been faulted in from HDFS
-      return null;
-    }
     return re;
   }
 
@@ -337,16 +328,12 @@ public abstract class AbstractRegionMap implements RegionMap {
   @Override
   public final RegionEntry getOperationalEntryInVM(Object key) {
     RegionEntry re = (RegionEntry)_getMap().get(key);
-    if (re != null && re.isMarkedForEviction()) {
-      // entry has been faulted in from HDFS
-      return null;
-    }
     return re;
   }
  
 
   public final void removeEntry(Object key, RegionEntry re, boolean updateStat) {
-    if (re.isTombstone() && _getMap().get(key) == re && !re.isMarkedForEviction()){
+    if (re.isTombstone() && _getMap().get(key) == re){
       logger.fatal(LocalizedMessage.create(LocalizedStrings.AbstractRegionMap_ATTEMPT_TO_REMOVE_TOMBSTONE), new Exception("stack trace"));
       return; // can't remove tombstones except from the tombstone sweeper
     }
@@ -362,7 +349,7 @@ public abstract class AbstractRegionMap implements RegionMap {
       EntryEventImpl event, final LocalRegion owner,
       final IndexUpdater indexUpdater) {
     boolean success = false;
-    if (re.isTombstone()&& _getMap().get(key) == re && !re.isMarkedForEviction()) {
+    if (re.isTombstone()&& _getMap().get(key) == re) {
       logger.fatal(LocalizedMessage.create(LocalizedStrings.AbstractRegionMap_ATTEMPT_TO_REMOVE_TOMBSTONE), new Exception("stack trace"));
       return; // can't remove tombstones except from the tombstone sweeper
     }
@@ -371,18 +358,6 @@ public abstract class AbstractRegionMap implements RegionMap {
         indexUpdater.onEvent(owner, event, re);
       }
 
-      //This is messy, but custom eviction calls removeEntry
-      //rather than re.destroy I think to avoid firing callbacks, etc.
-      //However, the value still needs to be set to removePhase1
-      //in order to remove the entry from disk.
-      if(event.isCustomEviction() && !re.isRemoved()) {
-        try {
-          re.removePhase1(owner, false);
-        } catch (RegionClearedException e) {
-          //that's ok, we were just trying to do evict incoming eviction
-        }
-      }
-      
       if (_getMap().remove(key, re)) {
         re.removePhase2();
         success = true;
@@ -1169,7 +1144,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                         // transaction conflict (caused by eviction) when the entry
                         // is being added to transaction state.
                         if (isEviction) {
-                          if (!confirmEvictionDestroy(oldRe) || (owner.getEvictionCriteria() != null && !owner.getEvictionCriteria().doEvict(event))) {
+                          if (!confirmEvictionDestroy(oldRe)) {
                             opCompleted = false;
                             return opCompleted;
                           }
@@ -1424,7 +1399,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                   // See comment above about eviction checks
                   if (isEviction) {
                     assert expectedOldValue == null;
-                    if (!confirmEvictionDestroy(re) || (owner.getEvictionCriteria() != null && !owner.getEvictionCriteria().doEvict(event))) {
+                    if (!confirmEvictionDestroy(re)) {
                       opCompleted = false;
                       return opCompleted;
                     }
@@ -1506,12 +1481,6 @@ public abstract class AbstractRegionMap implements RegionMap {
                   }
                 } // !isRemoved
                 else { // already removed
-                  if (owner.isHDFSReadWriteRegion() && re.isRemovedPhase2()) {
-                    // For HDFS region there may be a race with eviction
-                    // so retry the operation. fixes bug 49150
-                    retry = true;
-                    continue;
-                  }
                   if (re.isTombstone() && event.getVersionTag() != null) {
                     // if we're dealing with a tombstone and this is a remote event
                     // (e.g., from cache client update thread) we need to update
@@ -2685,11 +2654,7 @@ public abstract class AbstractRegionMap implements RegionMap {
       boolean onlyExisting, boolean returnTombstone) {
     Object key = event.getKey();
     RegionEntry retVal = null;
-    if (event.isFetchFromHDFS()) {
-      retVal = getEntry(event);
-    } else {
-      retVal = getEntryInVM(key);
-    }
+    retVal = getEntry(event);
     if (onlyExisting) {
       if (!returnTombstone && (retVal != null && retVal.isTombstone())) {
         return null;
@@ -2988,47 +2953,6 @@ public abstract class AbstractRegionMap implements RegionMap {
                   else if (re != null && owner.isUsedForPartitionedRegionBucket()) {
                   BucketRegion br = (BucketRegion)owner;
                   CachePerfStats stats = br.getPartitionedRegion().getCachePerfStats();
-                  long startTime= stats.startCustomEviction();
-                  CustomEvictionAttributes csAttr = br.getCustomEvictionAttributes();
-                  // No need to update indexes if entry was faulted in but operation did not succeed. 
-                  if (csAttr != null && (csAttr.isEvictIncoming() || re.isMarkedForEviction())) {
-                    
-                    if (csAttr.getCriteria().doEvict(event)) {
-                      stats.incEvictionsInProgress();
-                      // set the flag on event saying the entry should be evicted 
-                      // and not indexed
-                      @Released EntryEventImpl destroyEvent = EntryEventImpl.create (owner, Operation.DESTROY, event.getKey(),
-                          null/* newValue */, null, false, owner.getMyId());
-                      try {
-
-                      destroyEvent.setOldValueFromRegion();
-                      destroyEvent.setCustomEviction(true);
-                      destroyEvent.setPossibleDuplicate(event.isPossibleDuplicate());
-                      if(logger.isDebugEnabled()) {
-                        logger.debug("Evicting the entry " + destroyEvent);
-                      }
-                      if(result != null) {
-                        removeEntry(event.getKey(),re, true, destroyEvent,owner, indexUpdater);
-                      }
-                      else{
-                        removeEntry(event.getKey(),re, true, destroyEvent,owner, null);
-                      }
-                      //mark the region entry for this event as evicted 
-                      event.setEvicted();
-                      stats.incEvictions();
-                      if(logger.isDebugEnabled()) {
-                        logger.debug("Evicted the entry " + destroyEvent);
-                      }
-                      //removeEntry(event.getKey(), re);
-                      } finally {
-                        destroyEvent.release();
-                        stats.decEvictionsInProgress();
-                      }
-                    } else {
-                      re.clearMarkedForEviction();
-                    }
-                  }
-                  stats.endCustomEviction(startTime);
                 }
               } // try
             }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
index 3038059..c241c6b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
@@ -1316,7 +1316,6 @@ public class BucketAdvisor extends CacheDistributionAdvisor  {
             ((BucketRegion)br).processPendingSecondaryExpires();
           }
           if (br instanceof BucketRegionQueue) { // Shouldn't it be AbstractBucketRegionQueue
-            // i.e. this stats is not getting incremented for HDFSBucketRegionQueue!!
             BucketRegionQueue brq = (BucketRegionQueue)br;
             brq.incQueueSize(brq.size());
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
index 6e4f426..f5ae0fb 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
@@ -26,7 +26,6 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
 
 import org.apache.logging.log4j.Logger;
@@ -35,7 +34,6 @@ import com.gemstone.gemfire.CancelException;
 import com.gemstone.gemfire.CopyHelper;
 import com.gemstone.gemfire.DataSerializer;
 import com.gemstone.gemfire.DeltaSerializationException;
-import com.gemstone.gemfire.GemFireIOException;
 import com.gemstone.gemfire.InternalGemFireError;
 import com.gemstone.gemfire.InvalidDeltaException;
 import com.gemstone.gemfire.SystemFailure;
@@ -43,20 +41,16 @@ import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.DiskAccessException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.EvictionAction;
 import com.gemstone.gemfire.cache.EvictionAlgorithm;
 import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.EvictionCriteria;
 import com.gemstone.gemfire.cache.ExpirationAction;
 import com.gemstone.gemfire.cache.Operation;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.TimeoutException;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
 import com.gemstone.gemfire.cache.partition.PartitionListener;
 import com.gemstone.gemfire.cache.query.internal.IndexUpdater;
 import com.gemstone.gemfire.distributed.DistributedMember;
@@ -90,13 +84,11 @@ import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.internal.cache.versions.VersionStamp;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
-import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
 import com.gemstone.gemfire.internal.concurrent.Atomics;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
 import com.gemstone.gemfire.internal.logging.log4j.LogMarker;
-import com.gemstone.gemfire.internal.offheap.StoredObject;
 import com.gemstone.gemfire.internal.offheap.annotations.Released;
 import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
@@ -233,8 +225,6 @@ implements Bucket
     return eventSeqNum;
   }
 
-  protected final AtomicReference<HoplogOrganizer> hoplog = new AtomicReference<HoplogOrganizer>();
-  
   public BucketRegion(String regionName, RegionAttributes attrs,
       LocalRegion parentRegion, GemFireCacheImpl cache,
       InternalRegionArguments internalRegionArgs) {
@@ -892,12 +882,6 @@ implements Bucket
 
     beginLocalWrite(event);
     try {
-      // increment the tailKey so that invalidate operations are written to HDFS
-      if (this.partitionedRegion.hdfsStoreName != null) {
-        /* MergeGemXDHDFSToGFE Disabled this while porting. Is this required? */
-        //assert this.partitionedRegion.isLocalParallelWanEnabled();
-        handleWANEvent(event);
-      }
       // which performs the local op.
       // The ARM then calls basicInvalidatePart2 with the entry synchronized.
       if ( !hasSeenEvent(event) ) {
@@ -1152,20 +1136,6 @@ implements Bucket
       if (this.partitionedRegion.isParallelWanEnabled()) {
         handleWANEvent(event);
       }
-      // In GemFire EVICT_DESTROY is not distributed, so in order to remove the entry
-      // from memory, allow the destroy to proceed. fixes #49784
-      if (event.isLoadedFromHDFS() && !getBucketAdvisor().isPrimary()) {
-        if (logger.isDebugEnabled()) {
-          logger.debug("Put the destory event in HDFS queue on secondary "
-              + "and return as event is HDFS loaded " + event);
-        }
-        notifyGatewaySender(EnumListenerEvent.AFTER_DESTROY, event);
-        return;
-      }else{
-        if (logger.isDebugEnabled()) {
-          logger.debug("Going ahead with the destroy on GemFire system");
-        }
-      }
       // This call should invoke AbstractRegionMap (aka ARM) destroy method
       // which calls the CacheWriter, then performs the local op.
       // The ARM then calls basicDestroyPart2 with the entry synchronized.
@@ -1364,39 +1334,7 @@ implements Bucket
   }
 
   @Override
-  public boolean isHDFSRegion() {
-    return this.partitionedRegion.isHDFSRegion();
-  }
-
-  @Override
-  public boolean isHDFSReadWriteRegion() {
-    return this.partitionedRegion.isHDFSReadWriteRegion();
-  }
-
-  @Override
-  protected boolean isHDFSWriteOnly() {
-    return this.partitionedRegion.isHDFSWriteOnly();
-  }
-
-  @Override
   public int sizeEstimate() {
-    if (isHDFSReadWriteRegion()) {
-      try {
-        checkForPrimary();
-        ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
-        if (q == null) return 0;
-        int hdfsBucketRegionSize = q.getBucketRegionQueue(
-            partitionedRegion, getId()).size();
-        int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate();
-        if (logger.isDebugEnabled()) {
-          logger.debug("for bucket " + getName() + " estimateSize returning "
-                  + (hdfsBucketRegionSize + hoplogEstimate));
-        }
-        return hdfsBucketRegionSize + hoplogEstimate;
-      } catch (ForceReattemptException e) {
-        throw new PrimaryBucketException(e.getLocalizedMessage(), e);
-      }
-    }
     return size();
   }
 
@@ -1453,14 +1391,14 @@ implements Bucket
    *                 if there is a serialization problem
    * see LocalRegion#getDeserializedValue(RegionEntry, KeyInfo, boolean, boolean,  boolean, EntryEventImpl, boolean, boolean, boolean)
    */
-  private RawValue getSerialized(Object key, boolean updateStats, boolean doNotLockEntry, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) 
+  private RawValue getSerialized(Object key,
+                                 boolean updateStats,
+                                 boolean doNotLockEntry,
+                                 EntryEventImpl clientEvent,
+                                 boolean returnTombstones)
       throws EntryNotFoundException, IOException {
     RegionEntry re = null;
-    if (allowReadFromHDFS) {
-      re = this.entries.getEntry(key);
-    } else {
-      re = this.entries.getOperationalEntryInVM(key);
-    }
+    re = this.entries.getEntry(key);
     if (re == null) {
       return NULLVALUE;
     }
@@ -1504,13 +1442,18 @@ implements Bucket
    * 
    * @param keyInfo
    * @param generateCallbacks
-   * @param clientEvent holder for the entry's version information 
+   * @param clientEvent holder for the entry's version information
    * @param returnTombstones TODO
    * @return serialized (byte) form
    * @throws IOException if the result is not serializable
    * @see LocalRegion#get(Object, Object, boolean, EntryEventImpl)
    */
-  public RawValue getSerialized(KeyInfo keyInfo, boolean generateCallbacks, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws IOException {
+  public RawValue getSerialized(KeyInfo keyInfo,
+                                boolean generateCallbacks,
+                                boolean doNotLockEntry,
+                                ClientProxyMembershipID requestingClient,
+                                EntryEventImpl clientEvent,
+                                boolean returnTombstones) throws IOException {
     checkReadiness();
     checkForNoAccess();
     CachePerfStats stats = getCachePerfStats();
@@ -1520,7 +1463,7 @@ implements Bucket
     try {
       RawValue valueBytes = NULLVALUE;
       boolean isCreate = false;
-      RawValue result = getSerialized(keyInfo.getKey(), true, doNotLockEntry, clientEvent, returnTombstones, allowReadFromHDFS);
+      RawValue result = getSerialized(keyInfo.getKey(), true, doNotLockEntry, clientEvent, returnTombstones);
       isCreate = result == NULLVALUE || (result.getRawValue() == Token.TOMBSTONE && !returnTombstones);
       miss = (result == NULLVALUE || Token.isInvalid(result.getRawValue()));
       if (miss) {
@@ -1532,7 +1475,7 @@ implements Bucket
             return REQUIRES_ENTRY_LOCK;
           }
           Object value = nonTxnFindObject(keyInfo, isCreate,
-              generateCallbacks, result.getRawValue(), true, true, requestingClient, clientEvent, false, allowReadFromHDFS);
+              generateCallbacks, result.getRawValue(), true, true, requestingClient, clientEvent, false);
           if (value != null) {
             result = new RawValue(value);
           }
@@ -2471,36 +2414,8 @@ implements Bucket
   }
 
   public void beforeAcquiringPrimaryState() {
-    try {
-      createHoplogOrganizer();
-    } catch (IOException e) {
-      // 48990: when HDFS was down, gemfirexd should still start normally
-      logger.warn(LocalizedStrings.HOPLOG_NOT_STARTED_YET, e);
-    } catch(Throwable e) {
-      /*MergeGemXDHDFSToGFE changed this code to checkReadiness*/
-      // SystemFailure.checkThrowable(e);
-      this.checkReadiness();
-      //49333 - no matter what, we should elect a primary.
-      logger.error(LocalizedStrings.LocalRegion_UNEXPECTED_EXCEPTION, e);
-    }
-  }
-
-  public HoplogOrganizer<?> createHoplogOrganizer() throws IOException {
-    if (getPartitionedRegion().isHDFSRegion()) {
-      HoplogOrganizer<?> organizer = hoplog.get();
-      if (organizer != null) {
-        //  hoplog is recreated by anther thread
-        return organizer;
-      }
-
-      HoplogOrganizer hdfs = hoplog.getAndSet(getPartitionedRegion().hdfsManager.create(getId()));
-      assert hdfs == null;
-      return hoplog.get();
-    } else {
-      return null;
-    }
   }
-  
+
   public void afterAcquiringPrimaryState() {
     
   }
@@ -2508,105 +2423,13 @@ implements Bucket
    * Invoked when a primary bucket is demoted.
    */
   public void beforeReleasingPrimaryLockDuringDemotion() {
-    releaseHoplogOrganizer();
   }
 
-  protected void releaseHoplogOrganizer() {
-    // release resources during a clean transition
-    HoplogOrganizer hdfs = hoplog.getAndSet(null);
-    if (hdfs != null) {
-      getPartitionedRegion().hdfsManager.close(getId());
-    }
-  }
-  
-  public HoplogOrganizer<?> getHoplogOrganizer() throws HDFSIOException {
-    HoplogOrganizer<?> organizer = hoplog.get();
-    if (organizer == null) {
-      synchronized (getBucketAdvisor()) {
-        checkForPrimary();
-        try {
-          organizer = createHoplogOrganizer();
-        } catch (IOException e) {
-          throw new HDFSIOException("Failed to create Hoplog organizer due to ", e);
-        }
-        if (organizer == null) {
-          throw new HDFSIOException("Hoplog organizer is not available for " + this);
-        }
-      }
-    }
-    return organizer;
-  }
-  
   @Override
   public RegionAttributes getAttributes() {
     return this;
   }
 
-  @Override
-  public void hdfsCalled(Object key) {
-    this.partitionedRegion.hdfsCalled(key);
-  }
-
-  @Override
-  protected void clearHDFSData() {
-    //clear the HDFS data if present
-    if (getPartitionedRegion().isHDFSReadWriteRegion()) {
-      // Clear the queue
-      ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
-      if (q == null) return;
-      q.clear(getPartitionedRegion(), this.getId());
-      HoplogOrganizer organizer = hoplog.get();
-      if (organizer != null) {
-        try {
-          organizer.clear();
-        } catch (IOException e) {
-          throw new GemFireIOException(LocalizedStrings.HOPLOG_UNABLE_TO_DELETE_HDFS_DATA.toLocalizedString(), e);
-        }
-      }
-    }
-  }
-  
-  public EvictionCriteria getEvictionCriteria() {
-    return this.partitionedRegion.getEvictionCriteria();
-  }
-  
-  public CustomEvictionAttributes getCustomEvictionAttributes() {
-    return this.partitionedRegion.getCustomEvictionAttributes();
-  }
-  
-  /**
-   * @return true if the evict destroy was done; false if it was not needed
-   */
-  public boolean customEvictDestroy(Object key)
-  {
-    checkReadiness();
-    @Released final EntryEventImpl event = 
-          generateCustomEvictDestroyEvent(key);
-    event.setCustomEviction(true);
-    boolean locked = false;
-    try {
-      locked = beginLocalWrite(event);
-      return mapDestroy(event,
-                        false, // cacheWrite
-                        true,  // isEviction
-                        null); // expectedOldValue
-    }
-    catch (CacheWriterException error) {
-      throw new Error(LocalizedStrings.LocalRegion_CACHE_WRITER_SHOULD_NOT_HAVE_BEEN_CALLED_FOR_EVICTDESTROY.toLocalizedString(), error);
-    }
-    catch (TimeoutException anotherError) {
-      throw new Error(LocalizedStrings.LocalRegion_NO_DISTRIBUTED_LOCK_SHOULD_HAVE_BEEN_ATTEMPTED_FOR_EVICTDESTROY.toLocalizedString(), anotherError);
-    }
-    catch (EntryNotFoundException yetAnotherError) {
-      throw new Error(LocalizedStrings.LocalRegion_ENTRYNOTFOUNDEXCEPTION_SHOULD_BE_MASKED_FOR_EVICTDESTROY.toLocalizedString(), yetAnotherError);
-    } finally {
-      if (locked) {
-        endLocalWrite(event);
-      }
-      event.release();
-    }
-  }
-
   public boolean areSecondariesPingable() {
     
     Set<InternalDistributedMember> hostingservers = this.partitionedRegion.getRegionAdvisor()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
index 0facd93..0243cde 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
@@ -441,7 +441,7 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
     }
   }
 
-  protected void addToEventQueue(Object key, boolean didPut, EntryEventImpl event, int sizeOfHDFSEvent) {
+  protected void addToEventQueue(Object key, boolean didPut, EntryEventImpl event) {
     if (didPut) {
       if (this.initialized) {
         this.eventSeqNumQueue.add(key);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
index 6f673c7..4a34771 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
@@ -38,8 +38,6 @@ import com.gemstone.gemfire.cache.InterestPolicy;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.SubscriptionAttributes;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
 import com.gemstone.gemfire.distributed.Role;
 import com.gemstone.gemfire.distributed.internal.DistributionAdvisor;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
@@ -1228,30 +1226,16 @@ public class CacheDistributionAdvisor extends DistributionAdvisor  {
       public boolean include(final Profile profile) {
         if (profile instanceof CacheProfile) {
           final CacheProfile cp = (CacheProfile)profile;
-          /*Since HDFS queues are created only when a region is created, this check is 
-           * unnecessary. Also this check is creating problem because hdfs queue is not 
-           * created on an accessor. Hence removing this check for hdfs queues. */
-          Set<String> allAsyncEventIdsNoHDFS = removeHDFSQueues(allAsyncEventIds);
-          Set<String> profileQueueIdsNoHDFS = removeHDFSQueues(cp.asyncEventQueueIds);
-          if (allAsyncEventIdsNoHDFS.equals(profileQueueIdsNoHDFS)) {
+          if (allAsyncEventIds.equals(cp.asyncEventQueueIds)) {
             return true;
           }else{
-            differAsycnQueueIds.add(allAsyncEventIdsNoHDFS);
-            differAsycnQueueIds.add(profileQueueIdsNoHDFS);
+            differAsycnQueueIds.add(allAsyncEventIds);
+            differAsycnQueueIds.add(cp.asyncEventQueueIds);
             return false;
           }
         }
         return false;
       }
-      private Set<String> removeHDFSQueues(Set<String> queueIds){
-        Set<String> queueIdsWithoutHDFSQueues = new HashSet<String>();
-        for (String queueId: queueIds){
-          if (!queueId.startsWith(HDFSStoreFactoryImpl.DEFAULT_ASYNC_QUEUE_ID_FOR_HDFS)){
-            queueIdsWithoutHDFSQueues.add(queueId);
-          }
-        }
-        return queueIdsWithoutHDFSQueues;
-      }
     });
     return differAsycnQueueIds;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
index 382c537..ad84963 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
@@ -156,13 +156,6 @@ public class CachePerfStats {
   protected static final int compressionPreCompressedBytesId;
   protected static final int compressionPostCompressedBytesId;
   
-  protected static final int evictByCriteria_evictionsId;// total actual evictions (entries evicted)
-  protected static final int evictByCriteria_evictionTimeId;// total eviction time including product + user expr. 
-  protected static final int evictByCriteria_evictionsInProgressId;
-  protected static final int evictByCriteria_evaluationsId;// total eviction attempts
-  protected static final int evictByCriteria_evaluationTimeId;// time taken to evaluate user expression.
-  
-
   /** The Statistics object that we delegate most behavior to */
   protected final Statistics stats;
 
@@ -521,12 +514,6 @@ public class CachePerfStats {
     compressionDecompressionsId = type.nameToId("decompressions");
     compressionPreCompressedBytesId = type.nameToId("preCompressedBytes");
     compressionPostCompressedBytesId = type.nameToId("postCompressedBytes");
-    
-    evictByCriteria_evictionsId = type.nameToId("evictByCriteria_evictions");
-    evictByCriteria_evictionTimeId = type.nameToId("evictByCriteria_evictionTime"); 
-    evictByCriteria_evictionsInProgressId = type.nameToId("evictByCriteria_evictionsInProgress");
-    evictByCriteria_evaluationsId= type.nameToId("evictByCriteria_evaluations");
-    evictByCriteria_evaluationTimeId = type.nameToId("evictByCriteria_evaluationTime");
   }
   
   ////////////////////////  Constructors  ////////////////////////
@@ -1354,66 +1341,4 @@ public class CachePerfStats {
       stats.incLong(exportTimeId, getStatTime() - start);
     }
   }
-  
-//  // used for the case of evict on incoming
-  public long startCustomEviction() {
-    return NanoTimer.getTime();
-  }
-
-  // used for the case of evict on incoming
-  public void endCustomEviction(long start) {
-    long ts = NanoTimer.getTime();
-    stats.incLong(evictByCriteria_evictionTimeId, ts - start);
-  }
-
-  public void incEvictionsInProgress() {
-    this.stats.incLong(evictByCriteria_evictionsInProgressId, 1);
-  }
-
-  public void decEvictionsInProgress() {
-    this.stats.incLong(evictByCriteria_evictionsInProgressId, -1);
-  }
-
-  public void incEvictions() {
-    this.stats.incLong(evictByCriteria_evictionsId, 1);
-  }
-
-  public void incEvaluations() {
-    this.stats.incLong(evictByCriteria_evaluationsId, 1);
-  }
-
-  public void incEvaluations(int delta) {
-    this.stats.incLong(evictByCriteria_evaluationsId, delta);
-  }
-  
-  public long startEvaluation() {
-    return NanoTimer.getTime();
-  }
-
-  public void endEvaluation(long start, long notEvaluationTime) {
-    long ts = NanoTimer.getTime();
-    long totalTime = ts - start;
-    long evaluationTime = totalTime - notEvaluationTime;
-    stats.incLong(evictByCriteria_evaluationTimeId, evaluationTime);
-  }
-
-  public long getEvictions() {
-    return stats.getLong(evictByCriteria_evictionsId);
-  }
-
-  public long getEvictionsInProgress() {
-    return stats.getLong(evictByCriteria_evictionsInProgressId);
-  }
-
-  public long getEvictionsTime() {
-    return stats.getLong(evictByCriteria_evictionTimeId);
-  }
-
-  public long getEvaluations() {
-    return stats.getLong(evictByCriteria_evaluationsId);
-  }
-
-  public long getEvaluationTime() {
-    return stats.getLong(evictByCriteria_evaluationTimeId);
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
index 1441144..72edc10 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
@@ -107,9 +107,6 @@ public class ColocationHelper {
   }
     private static PartitionedRegion getColocatedPR(
       final PartitionedRegion partitionedRegion, final String colocatedWith) {
-    logger.info(LocalizedMessage.create(
-        LocalizedStrings.HOPLOG_0_COLOCATE_WITH_REGION_1_NOT_INITIALIZED_YET,
-        new Object[] { partitionedRegion.getFullPath(), colocatedWith }));
     PartitionedRegion colocatedPR = (PartitionedRegion) partitionedRegion
         .getCache().getPartitionedRegion(colocatedWith, false);
     assert colocatedPR != null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java
deleted file mode 100644
index 0c82f97..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.gemstone.gemfire.internal.cache;
-
-import com.gemstone.gemfire.cache.CustomEvictionAttributes;
-import com.gemstone.gemfire.cache.EvictionCriteria;
-
-/**
- * Concrete instance of {@link CustomEvictionAttributes}.
- * 
- * @since gfxd 1.0
- */
-public final class CustomEvictionAttributesImpl extends
-    CustomEvictionAttributes {
-
-  public CustomEvictionAttributesImpl(EvictionCriteria<?, ?> criteria,
-      long startTime, long interval, boolean evictIncoming) {
-    super(criteria, startTime, interval, evictIncoming);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
index f8475ae..cafdb80 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
@@ -145,7 +145,7 @@ public class DistTXState extends TXState {
               } 
             } 
           } // end if primary
-        } // end non-hdfs buckets
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
index a6d2488..6a7b4f2 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
@@ -863,8 +863,6 @@ public abstract class DistributedCacheOperation {
 
     private final static int INHIBIT_NOTIFICATIONS_MASK = 0x400;
 
-	protected final static short FETCH_FROM_HDFS = 0x200;
-    
     protected final static short IS_PUT_DML = 0x100;
 
     public boolean needsRouting;
@@ -1367,7 +1365,6 @@ public abstract class DistributedCacheOperation {
       if ((extBits & INHIBIT_NOTIFICATIONS_MASK) != 0) {
         this.inhibitAllNotifications = true;
 	  if (this instanceof PutAllMessage) {
-        ((PutAllMessage) this).setFetchFromHDFS((extBits & FETCH_FROM_HDFS) != 0);
         ((PutAllMessage) this).setPutDML((extBits & IS_PUT_DML) != 0);
       }
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
index 2817fdd..b6aa1b6 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
@@ -856,7 +856,6 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
     PutAllMessage msg = new PutAllMessage();
     msg.eventId = event.getEventId();
     msg.context = event.getContext();
-	msg.setFetchFromHDFS(event.isFetchFromHDFS());
     msg.setPutDML(event.isPutDML());
     return msg;
   }
@@ -871,7 +870,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
   public PutAllPRMessage createPRMessagesNotifyOnly(int bucketId) {
     final EntryEventImpl event = getBaseEvent();
     PutAllPRMessage prMsg = new PutAllPRMessage(bucketId, putAllDataSize, true,
-        event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), false, false /*isPutDML*/);
+        event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), false /*isPutDML*/);
     if (event.getContext() != null) {
       prMsg.setBridgeContext(event.getContext());
     }
@@ -900,7 +899,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
       PutAllPRMessage prMsg = (PutAllPRMessage)prMsgMap.get(bucketId);
       if (prMsg == null) {
         prMsg = new PutAllPRMessage(bucketId.intValue(), putAllDataSize, false,
-            event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), event.isFetchFromHDFS(), event.isPutDML());
+            event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), event.isPutDML());
         prMsg.setTransactionDistributed(event.getRegion().getCache().getTxManager().isDistributed());
 
         // set dpao's context(original sender) into each PutAllMsg
@@ -1077,9 +1076,6 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
 
     protected EventID eventId = null;
     
-    // By default, fetchFromHDFS == true;
-    private transient boolean fetchFromHDFS = true;
-    
     private transient boolean isPutDML = false;
 
     protected static final short HAS_BRIDGE_CONTEXT = UNRESERVED_FLAGS_START;
@@ -1137,12 +1133,11 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
      *          the region the entry is put in
      */
     public void doEntryPut(PutAllEntryData entry, DistributedRegion rgn,
-        boolean requiresRegionContext, boolean fetchFromHDFS, boolean isPutDML) {
+        boolean requiresRegionContext, boolean isPutDML) {
       @Released EntryEventImpl ev = PutAllMessage.createEntryEvent(entry, getSender(), 
           this.context, rgn,
           requiresRegionContext, this.possibleDuplicate,
           this.needsRouting, this.callbackArg, true, skipCallbacks);
-	  ev.setFetchFromHDFS(fetchFromHDFS);
       ev.setPutDML(isPutDML);
       // we don't need to set old value here, because the msg is from remote. local old value will get from next step
       try {
@@ -1237,7 +1232,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
               logger.debug("putAll processing {} with {} sender={}", putAllData[i], putAllData[i].versionTag, sender);
             }
             putAllData[i].setSender(sender);
-            doEntryPut(putAllData[i], rgn, requiresRegionContext,  fetchFromHDFS, isPutDML);
+            doEntryPut(putAllData[i], rgn, requiresRegionContext,  isPutDML);
           }
         }
       }, ev.getEventId());
@@ -1366,10 +1361,6 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
       return Arrays.asList(ops);
     }
     
-    public void setFetchFromHDFS(boolean val) {
-      this.fetchFromHDFS = val;
-    }
-    
     public void setPutDML(boolean val) {
       this.isPutDML = val;
     }
@@ -1377,9 +1368,6 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
     @Override
     protected short computeCompressedExtBits(short bits) {
       bits = super.computeCompressedExtBits(bits);
-      if (fetchFromHDFS) {
-        bits |= FETCH_FROM_HDFS;
-      }
       if (isPutDML) {
         bits |= IS_PUT_DML;
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
index addba8e..226d914 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
@@ -17,8 +17,6 @@
 
 package com.gemstone.gemfire.internal.cache;
 
-import static com.gemstone.gemfire.internal.offheap.annotations.OffHeapIdentifier.ABSTRACT_REGION_ENTRY_FILL_IN_VALUE;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
@@ -113,8 +111,6 @@ import com.gemstone.gemfire.internal.cache.versions.ConcurrentCacheModificationE
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor;
 import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueConfigurationException;
 import com.gemstone.gemfire.internal.cache.wan.GatewaySenderConfigurationException;
 import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
@@ -1264,8 +1260,6 @@ public class DistributedRegion extends LocalRegion implements
   private final Set<DistributedMember> memoryThresholdReachedMembers =
     new CopyOnWriteArraySet<DistributedMember>();
 
-  private ConcurrentParallelGatewaySenderQueue hdfsQueue;
-
   /** Sets and returns giiMissingRequiredRoles */
   private boolean checkInitialImageForReliability(
       InternalDistributedMember imageTarget,
@@ -2424,9 +2418,16 @@ public class DistributedRegion extends LocalRegion implements
   /** @return the deserialized value */
   @Override
   @Retained
-  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
-      TXStateInterface txState, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead,
-        boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
+  protected Object findObjectInSystem(KeyInfo keyInfo,
+                                      boolean isCreate,
+                                      TXStateInterface txState,
+                                      boolean generateCallbacks,
+                                      Object localValue,
+                                      boolean disableCopyOnRead,
+                                      boolean preferCD,
+                                      ClientProxyMembershipID requestingClient,
+                                      EntryEventImpl clientEvent,
+                                      boolean returnTombstones)
       throws CacheLoaderException, TimeoutException
   {
     checkForLimitedOrNoAccess();
@@ -2545,18 +2546,6 @@ public class DistributedRegion extends LocalRegion implements
     }
   }
   
-  protected ConcurrentParallelGatewaySenderQueue getHDFSQueue() {
-    if (this.hdfsQueue == null) {
-      String asyncQId = this.getPartitionedRegion().getHDFSEventQueueName();
-      final AsyncEventQueueImpl asyncQ =  (AsyncEventQueueImpl)this.getCache().getAsyncEventQueue(asyncQId);
-      final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender();
-      AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor();
-      if (ep == null) return null;
-      hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue();
-    }
-    return hdfsQueue;
-  }
-
   /** hook for subclasses to note that a cache load was performed
    * @see BucketRegion#performedLoad
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
index 2b826ce..e241622 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
@@ -193,16 +193,8 @@ public class EntryEventImpl
   /** version tag for concurrency checks */
   protected VersionTag versionTag;
 
-  /** boolean to indicate that this operation should be optimized by not fetching from HDFS*/
-  private transient boolean fetchFromHDFS = true;
-  
   private transient boolean isPutDML = false;
 
-  /** boolean to indicate that the RegionEntry for this event was loaded from HDFS*/
-  private transient boolean loadedFromHDFS= false;
-  
-  private transient boolean isCustomEviction = false;
-  
   /** boolean to indicate that the RegionEntry for this event has been evicted*/
   private transient boolean isEvicted = false;
   
@@ -658,14 +650,6 @@ public class EntryEventImpl
     return this.op.isEviction();
   }
 
-  public final boolean isCustomEviction() {
-    return this.isCustomEviction;
-  }
-  
-  public final void setCustomEviction(boolean customEvict) {
-    this.isCustomEviction = customEvict;
-  }
-  
   public final void setEvicted() {
     this.isEvicted = true;
   }
@@ -3047,13 +3031,6 @@ public class EntryEventImpl
   public boolean isOldValueOffHeap() {
     return isOffHeapReference(this.oldValue);
   }
-  public final boolean isFetchFromHDFS() {
-    return fetchFromHDFS;
-  }
-
-  public final void setFetchFromHDFS(boolean fetchFromHDFS) {
-    this.fetchFromHDFS = fetchFromHDFS;
-  }
 
   public final boolean isPutDML() {
     return this.isPutDML;
@@ -3062,12 +3039,4 @@ public class EntryEventImpl
   public final void setPutDML(boolean val) {
     this.isPutDML = val;
   }
-
-  public final boolean isLoadedFromHDFS() {
-    return loadedFromHDFS;
-  }
-
-  public final void setLoadedFromHDFS(boolean loadedFromHDFS) {
-    this.loadedFromHDFS = loadedFromHDFS;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java
deleted file mode 100644
index 9054d6d..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.EvictionCriteria;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.google.common.util.concurrent.AbstractScheduledService;
-import com.gemstone.gemfire.internal.offheap.Releasable;
-/**
- * Schedules each iteration periodically. EvictorService takes absolute time and
- * a period as input and schedules Eviction at absolute times by calculating the
- * interval. For scheduling the next eviction iteration it also takes into
- * account the time taken to complete one iteration. If an iteration takes more
- * time than the specified period then another iteration is scheduled
- * immediately.
- * 
- * 
- */
-
-public class EvictorService extends AbstractScheduledService {
-
-  private final EvictionCriteria<Object, Object> criteria;
-
-  // period is always in seconds
-  private long interval;
-
-  private volatile boolean stopScheduling;
-
-  private long nextScheduleTime;
-
-  private GemFireCacheImpl cache;
-
-  private Region region;
-  
-  private volatile ScheduledExecutorService executorService;
-
-  public EvictorService(EvictionCriteria<Object, Object> criteria,
-      long evictorStartTime, long evictorInterval, TimeUnit unit, Region r) {
-    this.criteria = criteria;
-    this.interval = unit.toSeconds(evictorInterval);
-    this.region = r;
-    try {
-      this.cache = GemFireCacheImpl.getExisting();
-    } catch (CacheClosedException cce) {
-      
-    }
-    //TODO: Unless we revisit System.currentTimeMillis or cacheTimeMillis keep the default
-//    long now = (evictorStartTime != 0 ? evictorStartTime
-//        + this.cache.getDistributionManager().getCacheTimeOffset() : this.cache
-//        .getDistributionManager().cacheTimeMillis()) / 1000;
-    long now = this.cache.getDistributionManager().cacheTimeMillis() / 1000;
-    if (this.cache.getLoggerI18n().fineEnabled()) {
-      this.cache.getLoggerI18n().fine(
-          "EvictorService: The startTime(now) is " + now + " evictorStartTime : " + evictorStartTime);
-    }
-    
-    this.nextScheduleTime = now + 10;
-
-    if (this.cache.getLoggerI18n().fineEnabled()) {
-      this.cache.getLoggerI18n().fine(
-          "EvictorService: The startTime is " + this.nextScheduleTime);
-    }
-  }
-
-  @Override
-  protected void runOneIteration() throws Exception {
-    if (this.cache.getLoggerI18n().fineEnabled()) {
-      this.cache.getLoggerI18n()
-          .fine(
-              "EvictorService: Running the iteration at "
-                  + cache.cacheTimeMillis());
-    }
-    if (stopScheduling || checkCancelled(cache)) {
-      stopScheduling(); // if check cancelled
-      if (this.cache.getLoggerI18n().fineEnabled()) {
-        this.cache
-            .getLoggerI18n()
-            .fine(
-                "EvictorService: Abort eviction since stopScheduling OR cancel in progress. Evicted 0 entries ");
-      }
-      return;
-    }
-    CachePerfStats stats = ((LocalRegion)this.region).getCachePerfStats();
-    long startEvictionTime = stats.startCustomEviction();
-    int evicted = 0;
-    long startEvaluationTime = stats.startEvaluation();
-    Iterator<Entry<Object, Object>> keysItr = null;
-    long totalIterationsTime = 0;
-    
-    keysItr = this.criteria.getKeysToBeEvicted(this.cache
-        .getDistributionManager().cacheTimeMillis(), this.region);
-    try {
-    stats.incEvaluations(this.region.size());
-    // if we have been asked to stop scheduling
-    // or the cache is closing stop in between.
-    
-    
-    while (keysItr.hasNext() && !stopScheduling && !checkCancelled(cache)) {
-      Map.Entry<Object, Object> entry = keysItr.next();
-      long startIterationTime = this.cache
-          .getDistributionManager().cacheTimeMillis();
-      Object routingObj = entry.getValue();
-      if (this.cache.getLoggerI18n().fineEnabled()) {
-        this.cache.getLoggerI18n().fine(
-            "EvictorService: Going to evict the following entry " + entry);
-      }
-      if (this.region instanceof PartitionedRegion) {
-        try {
-          PartitionedRegion pr = (PartitionedRegion)this.region;
-          stats.incEvictionsInProgress();
-          int bucketId = PartitionedRegionHelper.getHashKey(pr, routingObj);
-          BucketRegion br = pr.getDataStore().getLocalBucketById(bucketId);
-          // This has to be called on BucketRegion directly and not on the PR as
-          // PR doesn't allow operation on Secondary buckets.
-          if (br != null) {
-            if (this.cache.getLoggerI18n().fineEnabled()) {
-              this.cache.getLoggerI18n().fine(
-                  "EvictorService: Going to evict the following entry " + entry
-                      + " from bucket " + br);
-            }
-            if (br.getBucketAdvisor().isPrimary()) {
-              boolean succ = false;
-              try {
-                succ = br.customEvictDestroy(entry.getKey());
-              } catch (PrimaryBucketException e) {
-                if (this.cache.getLoggerI18n().fineEnabled()) {
-                  this.cache.getLoggerI18n().warning(
-                      LocalizedStrings.EVICTORSERVICE_CAUGHT_EXCEPTION_0, e);
-                }
-              }
-              
-              if (succ)
-                evicted++;
-              if (this.cache.getLoggerI18n().fineEnabled()) {
-                this.cache.getLoggerI18n()
-                    .fine(
-                        "EvictorService: Evicted the following entry " + entry
-                            + " from bucket " + br + " successfully " + succ
-                            + " the value in buk is " /*
-                                                       * +
-                                                       * br.get(entry.getKey())
-                                                       */);
-              }
-            }
-          }
-          stats.incEvictions();
-        } catch (Exception e) {
-          if (this.cache.getLoggerI18n().fineEnabled()) {
-            this.cache.getLoggerI18n().warning(
-                LocalizedStrings.EVICTORSERVICE_CAUGHT_EXCEPTION_0, e);
-          }
-          // TODO:
-          // Do the exception handling .
-          // Check if the bucket is present
-          // If the entry could not be evicted then log the warning
-          // Log any other exception.
-        }finally{
-          stats.decEvictionsInProgress();
-          long endIterationTime = this.cache
-              .getDistributionManager().cacheTimeMillis();
-          totalIterationsTime += (endIterationTime - startIterationTime);
-        }
-      }
-    }
-    }finally {
-      if(keysItr instanceof Releasable) {
-        ((Releasable)keysItr).release();
-      }
-    }
-    stats.endEvaluation(startEvaluationTime, totalIterationsTime);    
-    
-    if (this.cache.getLoggerI18n().fineEnabled()) {
-      this.cache.getLoggerI18n().fine(
-          "EvictorService: Completed an iteration at time "
-              + this.cache.getDistributionManager().cacheTimeMillis() / 1000
-              + ". Evicted " + evicted + " entries.");
-    }
-    stats.endCustomEviction(startEvictionTime);
-  }
-
-  private boolean checkCancelled(GemFireCacheImpl cache) {
-    if (cache.getCancelCriterion().cancelInProgress() != null) {
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  protected Scheduler scheduler() {
-    return new CustomScheduler() {
-      @Override
-      protected Schedule getNextSchedule() throws Exception {
-        // get the current time in seconds from DM.
-        // it takes care of clock skew etc in different VMs
-        long now = cache.getDistributionManager().cacheTimeMillis() / 1000;
-        if (cache.getLoggerI18n().fineEnabled()) {
-          cache.getLoggerI18n().fine("EvictorService: Now is " + now);
-        }
-        long delay = 0;
-        if (now < nextScheduleTime) {
-          delay = nextScheduleTime - now;
-        }
-        nextScheduleTime += interval;
-        // calculate the next immediate time i.e. schedule time in seconds
-        // set the schedule.delay to that scheduletime - currenttime
-        if (cache.getLoggerI18n().fineEnabled()) {
-          cache.getLoggerI18n().fine(
-              "EvictorService: Returning the next schedule with delay " + delay
-                  + " next schedule is at : " + nextScheduleTime);
-        }
-
-        return new Schedule(delay, TimeUnit.SECONDS);
-      }
-    };
-  }
-
-  /**
-   * Region.destroy and Region.close should make sure to call this method. This
-   * will be called here.
-   */
-  public void stopScheduling() {
-    this.stopScheduling = true;
-  }
-
-  // this will be called when we stop the service.
-  // not sure if we have to do any cleanup
-  // to stop the service call stop()
-  @Override
-  protected void shutDown() throws Exception {
-    this.executorService.shutdownNow();
-    this.region= null;
-    this.cache = null;
-  }
-
-  // This will be called when we start the service.
-  // not sure if we have to any intialization
-  @Override
-  protected void startUp() throws Exception {
-
-  }
-
-  public void changeEvictionInterval(long newInterval) {
-    this.interval = newInterval / 1000;
-    if (cache.getLoggerI18n().fineEnabled()) {
-      cache.getLoggerI18n().fine(
-          "EvictorService: New interval is " + this.interval);
-    }
-  }
-
-  public void changeStartTime(long newStart) {
-    this.nextScheduleTime = newStart/1000;
-    if (cache.getLoggerI18n().fineEnabled()) {
-      cache.getLoggerI18n().fine("EvictorService: New start time is " + this.nextScheduleTime);
-    }
-  }
-  
-  protected ScheduledExecutorService executor() {
-    this.executorService = super.executor();
-    return this.executorService;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index cc9727b..c477466 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -126,16 +126,6 @@ import com.gemstone.gemfire.cache.client.internal.ClientMetadataService;
 import com.gemstone.gemfire.cache.client.internal.ClientRegionFactoryImpl;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.cache.execute.FunctionService;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSFlushQueueFunction;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionFunction;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSLastCompactionTimeFunction;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSStoreDirector;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.internal.DefaultQuery;
 import com.gemstone.gemfire.cache.query.internal.DefaultQueryService;
@@ -932,9 +922,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         HARegionQueue.setMessageSyncInterval(HARegionQueue.DEFAULT_MESSAGE_SYNC_INTERVAL);
       }
       FunctionService.registerFunction(new PRContainsValueFunction());
-      FunctionService.registerFunction(new HDFSLastCompactionTimeFunction());
-      FunctionService.registerFunction(new HDFSForceCompactionFunction());
-      FunctionService.registerFunction(new HDFSFlushQueueFunction());
       this.expirationScheduler = new ExpirationScheduler(this.system);
 
       // uncomment following line when debugging CacheExistsException
@@ -2185,8 +2172,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
           closeDiskStores();
           diskMonitor.close();
           
-          closeHDFSStores();
-          
           // Close the CqService Handle.
           try {
             if (isDebugEnabled) {
@@ -2272,7 +2257,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         } catch (CancelException e) {
           // make sure the disk stores get closed
           closeDiskStores();
-          closeHDFSStores();
           // NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
 
           // okay, we're taking too long to do this stuff, so let's
@@ -3119,8 +3103,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
             future = (Future) this.reinitializingRegions.get(fullPath);
           }
           if (future == null) {
-            HDFSIntegrationUtil.createAndAddAsyncQueue(regionPath, attrs, this);
-            attrs = setEvictionAttributesForLargeRegion(attrs);
             if (internalRegionArgs.getInternalMetaRegion() != null) {
               rgn = internalRegionArgs.getInternalMetaRegion();
             } else if (isPartitionedRegion) {
@@ -3245,54 +3227,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
 
-  /**
-   * turn on eviction by default for HDFS regions
-   */
-  @SuppressWarnings("deprecation")
-  public <K, V> RegionAttributes<K, V> setEvictionAttributesForLargeRegion(
-      RegionAttributes<K, V> attrs) {
-    RegionAttributes<K, V> ra = attrs;
-    if (DISABLE_AUTO_EVICTION) {
-      return ra;
-    }
-    if (attrs.getDataPolicy().withHDFS()
-        || attrs.getHDFSStoreName() != null) {
-      // make the region overflow by default
-      EvictionAttributes evictionAttributes = attrs.getEvictionAttributes();
-      boolean hasNoEvictionAttrs = evictionAttributes == null
-          || evictionAttributes.getAlgorithm().isNone();
-      AttributesFactory<K, V> af = new AttributesFactory<K, V>(attrs);
-      String diskStoreName = attrs.getDiskStoreName();
-      // set the local persistent directory to be the same as that for
-      // HDFS store
-      if (attrs.getHDFSStoreName() != null) {
-        HDFSStoreImpl hdfsStore = findHDFSStore(attrs.getHDFSStoreName());
-        if (attrs.getPartitionAttributes().getLocalMaxMemory() != 0 && hdfsStore == null) {
-          // HDFS store expected to be found at this point
-          throw new IllegalStateException(
-              LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND
-                  .toLocalizedString(attrs.getHDFSStoreName()));
-        }
-        // if there is no disk store, use the one configured for hdfs queue
-        if (attrs.getPartitionAttributes().getLocalMaxMemory() != 0 && diskStoreName == null) {
-          diskStoreName = hdfsStore.getDiskStoreName();
-        }
-      }
-      // set LRU heap eviction with overflow to disk for HDFS stores with
-      // local Oplog persistence
-      // set eviction attributes only if not set
-      if (hasNoEvictionAttrs) {
-        if (diskStoreName != null) {
-          af.setDiskStoreName(diskStoreName);
-        }
-        af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(
-            ObjectSizer.DEFAULT, EvictionAction.OVERFLOW_TO_DISK));
-      }
-      ra = af.create();
-    }
-    return ra;
-  }
-
   public final Region getRegion(String path) {
     return getRegion(path, false);
   }
@@ -5403,39 +5337,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
   
-  public HDFSStoreFactory createHDFSStoreFactory(HDFSStoreCreation creation) {
-    return new HDFSStoreFactoryImpl(this, creation);
-  }
-  public void addHDFSStore(HDFSStoreImpl hsi) {
-    HDFSStoreDirector.getInstance().addHDFSStore(hsi);
-    //TODO:HDFS Add a resource event for hdfs store creation as well 
-    // like the following disk store event
-    //system.handleResourceEvent(ResourceEvent.DISKSTORE_CREATE, dsi);
-  }
-
-  public void removeHDFSStore(HDFSStoreImpl hsi) {
-    //hsi.destroy();
-    HDFSStoreDirector.getInstance().removeHDFSStore(hsi.getName());
-    //TODO:HDFS Add a resource event for hdfs store as well 
-    // like the following disk store event
-    //system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
-  }
-
-  public void closeHDFSStores() {
-    HDFSRegionDirector.reset();
-    HDFSStoreDirector.getInstance().closeHDFSStores();
-  }
-
-  
-  public HDFSStoreImpl findHDFSStore(String name) {
-    return HDFSStoreDirector.getInstance().getHDFSStore(name);
-  }
-  
-  public Collection<HDFSStoreImpl> getHDFSStores() {
-    return HDFSStoreDirector.getInstance().getAllHDFSStores();
-  }
-  
-  
   public TemporaryResultSetFactory getResultSetFactory() {
     return this.resultSetFactory;
   }



[36/63] [abbrv] incubator-geode git commit: GEODE-17: enhance the GeodeSecurityUtil and review changes

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/PulseAbstractTest.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/PulseAbstractTest.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/PulseAbstractTest.java
index 392de4c..09bb7d7 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/PulseAbstractTest.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/PulseAbstractTest.java
@@ -16,11 +16,24 @@
  * limitations under the License.
  *
  */
+
 package com.vmware.gemfire.tools.pulse.tests;
 
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.text.DecimalFormat;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.Assert;
+
 import com.gemstone.gemfire.management.internal.JettyHelper;
+import com.jayway.awaitility.Awaitility;
 import com.vmware.gemfire.tools.pulse.internal.data.PulseConstants;
-import junit.framework.Assert;
+
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Ignore;
@@ -35,16 +48,8 @@ import org.openqa.selenium.support.ui.ExpectedCondition;
 import org.openqa.selenium.support.ui.ExpectedConditions;
 import org.openqa.selenium.support.ui.WebDriverWait;
 
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.text.DecimalFormat;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
 public abstract class PulseAbstractTest extends PulseBaseTest {
+
   private static String jmxPropertiesFile;
   private static String path;
 
@@ -107,7 +112,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   private static final String REGION_PERSISTENCE_LABEL = "regionPersistence";
   private static final String DATA_VIEW_USEDMEMORY = "memoryUsed";
   private static final String DATA_VIEW_TOTALMEMORY = "totalMemory";
-  
+
   private static final String DATA_BROWSER_LABEL = "Data Browser";
   private static final String DATA_BROWSER_REGIONName1 = "treeDemo_1_span";
   private static final String DATA_BROWSER_REGIONName2 = "treeDemo_2_span";
@@ -132,11 +137,12 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
 
   public static void setUpServer(String username, String password, String jsonAuthFile) throws Exception {
     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
     jmxPropertiesFile = classLoader.getResource("test.properties").getPath();
     path = getPulseWarPath();
     server = Server.createServer(9999, jmxPropertiesFile, jsonAuthFile);
 
-    String host = "localhost";// InetAddress.getLocalHost().getHostAddress();
+    String host = "localhost";
     int port = 8080;
     String context = "/pulse";
 
@@ -146,7 +152,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
 
     pulseURL = "http://" + host + ":" + port + context;
 
-    Thread.sleep(5000); // wait till the container settles down
+    Awaitility.await().until(()->jetty.isStarted());
 
     driver = new FirefoxDriver();
     driver.manage().window().maximize();
@@ -160,12 +166,12 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
 
     Thread.sleep(3000);
     WebElement userNameOnPulsePage = (new WebDriverWait(driver, 10))
-        .until(new ExpectedCondition<WebElement>() {
-          @Override
-          public WebElement apply(WebDriver d) {
-            return d.findElement(By.id("userName"));
-          }
-        });
+      .until(new ExpectedCondition<WebElement>() {
+        @Override
+        public WebElement apply(WebDriver d) {
+          return d.findElement(By.id("userName"));
+        }
+      });
     Assert.assertNotNull(userNameOnPulsePage);
     driver.navigate().refresh();
     Thread.sleep(7000);
@@ -215,44 +221,42 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   }
 
   protected void searchByXPathAndClick(String xpath) {
-	WebElement element = driver.findElement(By.xpath(xpath));
-     Assert.assertNotNull(element);
+    WebElement element = driver.findElement(By.xpath(xpath));
+    Assert.assertNotNull(element);
     element.click();
   }
 
   protected void waitForElementByClassName(final String className, int seconds) {
     WebElement linkTextOnPulsePage1 = (new WebDriverWait(driver, seconds))
-        .until(new ExpectedCondition<WebElement>() {
-          @Override
-          public WebElement apply(WebDriver d) {
-            return d.findElement(By.className(className));
-          }
-        });
+      .until(new ExpectedCondition<WebElement>() {
+        @Override
+        public WebElement apply(WebDriver d) {
+          return d.findElement(By.className(className));
+        }
+      });
     Assert.assertNotNull(linkTextOnPulsePage1);
   }
 
   protected void waitForElementById(final String id, int seconds) {
     WebElement element = (new WebDriverWait(driver, 10))
-        .until(new ExpectedCondition<WebElement>() {
-          @Override
-          public WebElement apply(WebDriver d) {
-            return d.findElement(By.id(id));
-          }
-        });
+      .until(new ExpectedCondition<WebElement>() {
+        @Override
+        public WebElement apply(WebDriver d) {
+          return d.findElement(By.id(id));
+        }
+      });
     Assert.assertNotNull(element);
   }
-  
+
   protected void scrollbarVerticalDownScroll() {
     JavascriptExecutor js = (JavascriptExecutor) driver;
     js.executeScript("javascript:window.scrollBy(250,700)");
     WebElement pickerScroll = driver.findElement(By.className("jspDrag"));
     WebElement pickerScrollCorner = driver.findElement(By
-        .className("jspCorner"));
+      .className("jspCorner"));
     Actions builder = new Actions(driver);
-    Actions movePicker = builder.dragAndDrop(pickerScroll, pickerScrollCorner); // pickerscroll
-                                                                                // is
-                                                                                // the
-                                                                                // webelement
+    Actions movePicker = builder.dragAndDrop(pickerScroll, pickerScrollCorner);
+    // pickerscroll is the web element
     movePicker.perform();
   }
 
@@ -260,147 +264,144 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     JavascriptExecutor js = (JavascriptExecutor) driver;
     js.executeScript("javascript:window.scrollBy(250,700)");
     WebElement pickerScroll = driver
-        .findElement(By
-            .xpath("//div[@id='gview_queryStatisticsList']/div[3]/div/div[3]/div[2]/div"));
+      .findElement(By
+        .xpath("//div[@id='gview_queryStatisticsList']/div[3]/div/div[3]/div[2]/div"));
     WebElement pickerScrollCorner = driver.findElement(By
-        .className("jspCorner"));
+      .className("jspCorner"));
     Actions builder = new Actions(driver);
-    Actions movePicker = builder.dragAndDrop(pickerScroll, pickerScrollCorner); // pickerscroll
-                                                                                // is
-                                                                                // the
-                                                                                // webelement
+    Actions movePicker = builder.dragAndDrop(pickerScroll, pickerScrollCorner);
+    // pickerscroll is the web element
     movePicker.perform();
   }
 
-  
-  
+
   @Test
   public void testClusterLocatorCount() throws IOException {
     String clusterLocators = driver
-        .findElement(By.id(CLUSTER_VIEW_LOCATORS_ID)).getText();
-   
-    String totallocators = JMXProperties.getInstance().getProperty("server.S1.locatorCount");  
+      .findElement(By.id(CLUSTER_VIEW_LOCATORS_ID)).getText();
+
+    String totallocators = JMXProperties.getInstance().getProperty("server.S1.locatorCount");
     Assert.assertEquals(totallocators, clusterLocators);
   }
 
- @Test
+  @Test
   public void testClusterRegionCount() {
     String clusterRegions = driver.findElement(By.id(CLUSTER_VIEW_REGIONS_ID))
-        .getText();
+      .getText();
     String totalregions = JMXProperties.getInstance().getProperty(
-        "server.S1.totalRegionCount");
+      "server.S1.totalRegionCount");
     Assert.assertEquals(totalregions, clusterRegions);
   }
 
- @Test
+  @Test
   public void testClusterMemberCount() {
-   String clusterMembers = driver.findElement(By.id(CLUSTER_VIEW_MEMBERS_ID)).getText();
-   String totalMembers = JMXProperties.getInstance().getProperty("server.S1.memberCount");
-   Assert.assertEquals(totalMembers, clusterMembers);
- }
+    String clusterMembers = driver.findElement(By.id(CLUSTER_VIEW_MEMBERS_ID)).getText();
+    String totalMembers = JMXProperties.getInstance().getProperty("server.S1.memberCount");
+    Assert.assertEquals(totalMembers, clusterMembers);
+  }
 
- @Test
+  @Test
   public void testClusterNumClient() {
     String clusterClients = driver.findElement(By.id(CLUSTER_CLIENTS_ID))
-        .getText();
+      .getText();
     String totalclients = JMXProperties.getInstance().getProperty(
-        "server.S1.numClients");
+      "server.S1.numClients");
     Assert.assertEquals(totalclients, clusterClients);
   }
 
   @Test
   public void testClusterNumRunningFunction() {
     String clusterFunctions = driver.findElement(By.id(CLUSTER_FUNCTIONS_ID))
-        .getText();
+      .getText();
     String totalfunctions = JMXProperties.getInstance().getProperty(
-        "server.S1.numRunningFunctions");
+      "server.S1.numRunningFunctions");
     Assert.assertEquals(totalfunctions, clusterFunctions);
   }
 
   @Test
   public void testClusterRegisteredCQCount() {
     String clusterUniqueCQs = driver.findElement(By.id(CLUSTER_UNIQUECQS_ID))
-        .getText();
+      .getText();
     String totaluniqueCQs = JMXProperties.getInstance().getProperty(
-        "server.S1.registeredCQCount");
+      "server.S1.registeredCQCount");
     Assert.assertEquals(totaluniqueCQs, clusterUniqueCQs);
   }
 
- @Test
+  @Test
   public void testClusterNumSubscriptions() {
     String clusterSubscriptions = driver.findElement(
-        By.id(CLUSTER_SUBSCRIPTION_ID)).getText();
+      By.id(CLUSTER_SUBSCRIPTION_ID)).getText();
     String totalSubscriptions = JMXProperties.getInstance().getProperty(
-        "server.S1.numSubscriptions");
+      "server.S1.numSubscriptions");
     Assert.assertEquals(totalSubscriptions, clusterSubscriptions);
   }
 
- @Test
+  @Test
   public void testClusterJVMPausesWidget() {
     String clusterJVMPauses = driver.findElement(By.id(CLUSTER_GCPAUSES_ID))
-        .getText();
+      .getText();
     String totalgcpauses = JMXProperties.getInstance().getProperty(
-        "server.S1.jvmPauses");
+      "server.S1.jvmPauses");
     Assert.assertEquals(totalgcpauses, clusterJVMPauses);
   }
 
   @Test
   public void testClusterAverageWritesWidget() {
     String clusterWritePerSec = driver.findElement(
-        By.id(CLUSTER_WRITEPERSEC_ID)).getText();
+      By.id(CLUSTER_WRITEPERSEC_ID)).getText();
     String totalwritepersec = JMXProperties.getInstance().getProperty(
-        "server.S1.averageWrites");
+      "server.S1.averageWrites");
     Assert.assertEquals(totalwritepersec, clusterWritePerSec);
   }
 
   @Test
   public void testClusterAverageReadsWidget() {
     String clusterReadPerSec = driver.findElement(By.id(CLUSTER_READPERSEC_ID))
-        .getText();
+      .getText();
     String totalreadpersec = JMXProperties.getInstance().getProperty(
-        "server.S1.averageReads");
+      "server.S1.averageReads");
     Assert.assertEquals(totalreadpersec, clusterReadPerSec);
   }
 
   @Test
   public void testClusterQuerRequestRateWidget() {
     String clusterQueriesPerSec = driver.findElement(
-        By.id(CLUSTER_QUERIESPERSEC_ID)).getText();
+      By.id(CLUSTER_QUERIESPERSEC_ID)).getText();
     String totalqueriespersec = JMXProperties.getInstance().getProperty(
-        "server.S1.queryRequestRate");
+      "server.S1.queryRequestRate");
     Assert.assertEquals(totalqueriespersec, clusterQueriesPerSec);
   }
-  
+
   @Test
   public void testClusterGridViewMemberID() throws InterruptedException {
-	 searchByIdAndClick("default_grid_button");
-	 List<WebElement> elements = driver.findElements(By.xpath("//table[@id='memberList']/tbody/tr")); //gives me 11 rows
-	 
-	 for(int memberCount = 1; memberCount<elements.size(); memberCount++){		  
-		  String memberId = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (memberCount + 1) + "]/td")).getText();		  
-		  String propertMemeberId= JMXProperties.getInstance().getProperty("member.M" + memberCount + ".id");		  
-		  Assert.assertEquals(memberId, propertMemeberId);
-	  }	 
+    searchByIdAndClick("default_grid_button");
+    List<WebElement> elements = driver.findElements(By.xpath("//table[@id='memberList']/tbody/tr")); //gives me 11 rows
+
+    for (int memberCount = 1; memberCount < elements.size(); memberCount++) {
+      String memberId = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (memberCount + 1) + "]/td")).getText();
+      String propertMemeberId = JMXProperties.getInstance().getProperty("member.M" + memberCount + ".id");
+      Assert.assertEquals(memberId, propertMemeberId);
+    }
   }
 
   @Test
   public void testClusterGridViewMemberName() {
-	  searchByIdAndClick("default_grid_button"); 
-	  List<WebElement> elements = driver.findElements(By.xpath("//table[@id='memberList']/tbody/tr"));  	  
-	  for (int memberNameCount = 1; memberNameCount < elements.size(); memberNameCount++) {
-		  String gridMemberName = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (memberNameCount + 1) + "]/td[2]")).getText();
-		  String memberName = JMXProperties.getInstance().getProperty("member.M" + memberNameCount + ".member");
-		  Assert.assertEquals(gridMemberName, memberName);
+    searchByIdAndClick("default_grid_button");
+    List<WebElement> elements = driver.findElements(By.xpath("//table[@id='memberList']/tbody/tr"));
+    for (int memberNameCount = 1; memberNameCount < elements.size(); memberNameCount++) {
+      String gridMemberName = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (memberNameCount + 1) + "]/td[2]")).getText();
+      String memberName = JMXProperties.getInstance().getProperty("member.M" + memberNameCount + ".member");
+      Assert.assertEquals(gridMemberName, memberName);
     }
   }
-  
+
 
   @Test
   public void testClusterGridViewMemberHost() {
-	  searchByIdAndClick("default_grid_button"); 
-	  List<WebElement> elements = driver.findElements(By.xpath("//table[@id='memberList']/tbody/tr")); 	  
+    searchByIdAndClick("default_grid_button");
+    List<WebElement> elements = driver.findElements(By.xpath("//table[@id='memberList']/tbody/tr"));
     for (int memberHostCount = 1; memberHostCount < elements.size(); memberHostCount++) {
-      String MemberHost = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (memberHostCount + 1) + "]/td[3]")).getText();     
+      String MemberHost = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (memberHostCount + 1) + "]/td[3]")).getText();
       String gridMemberHost = JMXProperties.getInstance().getProperty("member.M" + memberHostCount + ".host");
       Assert.assertEquals(gridMemberHost, MemberHost);
     }
@@ -408,14 +409,14 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
 
   @Test
   public void testClusterGridViewHeapUsage() {
-	searchByIdAndClick("default_grid_button"); 
+    searchByIdAndClick("default_grid_button");
     for (int i = 1; i <= 3; i++) {
       Float HeapUsage = Float.parseFloat(driver
-          .findElement(
-              By.xpath("//table[@id='memberList']/tbody/tr[" + (i + 1) + "]/td[5]")).getText());
+        .findElement(
+          By.xpath("//table[@id='memberList']/tbody/tr[" + (i + 1) + "]/td[5]")).getText());
       Float gridHeapUsagestring = Float.parseFloat(JMXProperties.getInstance()
-          .getProperty("member.M" + i + ".UsedMemory"));
-     Assert.assertEquals(gridHeapUsagestring, HeapUsage);
+        .getProperty("member.M" + i + ".UsedMemory"));
+      Assert.assertEquals(gridHeapUsagestring, HeapUsage);
     }
   }
 
@@ -424,7 +425,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     searchByIdAndClick("default_grid_button");
     for (int i = 1; i <= 3; i++) {
       String CPUUsage = driver.findElement(By.xpath("//table[@id='memberList']/tbody/tr[" + (i + 1) + "]/td[6]"))
-          .getText();
+        .getText();
       String gridCPUUsage = JMXProperties.getInstance().getProperty("member.M" + i + ".cpuUsage");
       gridCPUUsage = gridCPUUsage.trim();
       Assert.assertEquals(gridCPUUsage, CPUUsage);
@@ -439,19 +440,19 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   }
 
   @Test  // region count in properties file is 2 and UI is 1
-  public void testMemberTotalRegionCount() throws InterruptedException{
-	testRgraphWidget();
-    String RegionCount = driver.findElement(By.id(MEMBER_VIEW_REGION_ID)).getText();  
+  public void testMemberTotalRegionCount() throws InterruptedException {
+    testRgraphWidget();
+    String RegionCount = driver.findElement(By.id(MEMBER_VIEW_REGION_ID)).getText();
     String memberRegionCount = JMXProperties.getInstance().getProperty("member.M1.totalRegionCount");
     Assert.assertEquals(memberRegionCount, RegionCount);
   }
 
   @Test
-  public void testMemberNumThread()throws InterruptedException {
+  public void testMemberNumThread() throws InterruptedException {
     searchByIdAndClick("default_grid_button");
     searchByIdAndClick("M1&M1");
     String ThreadCount = driver.findElement(By.id(MEMBER_VIEW_THREAD_ID)).getText();
-    String memberThreadCount = JMXProperties.getInstance().getProperty("member.M1.numThreads");   
+    String memberThreadCount = JMXProperties.getInstance().getProperty("member.M1.numThreads");
     Assert.assertEquals(memberThreadCount, ThreadCount);
   }
 
@@ -460,69 +461,73 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     searchByIdAndClick("default_grid_button");
     searchByIdAndClick("M1&M1");
     String SocketCount = driver.findElement(By.id(MEMBER_VIEW_SOCKETS_ID))
-        .getText();
+      .getText();
     String memberSocketCount = JMXProperties.getInstance().getProperty(
-        "member.M1.totalFileDescriptorOpen");
+      "member.M1.totalFileDescriptorOpen");
     Assert.assertEquals(memberSocketCount, SocketCount);
   }
 
- @Test
+  @Test
   public void testMemberLoadAverage() throws InterruptedException {
     searchByIdAndClick("default_grid_button");
     searchByIdAndClick("M1&M1");
     String LoadAvg = driver.findElement(By.id(MEMBER_VIEW_LOADAVG_ID))
-        .getText();
+      .getText();
     String memberLoadAvg = JMXProperties.getInstance().getProperty(
-        "member.M1.loadAverage");
+      "member.M1.loadAverage");
     Assert.assertEquals(df2.format(Double.valueOf(memberLoadAvg)), LoadAvg);
   }
 
   @Ignore("WIP") // May be useful in near future
   @Test
-  public void testOffHeapFreeSize(){	  
-	  
+  public void testOffHeapFreeSize() {
+
     String OffHeapFreeSizeString = driver.findElement(
-        By.id(MEMBER_VIEW_OFFHEAPFREESIZE_ID)).getText();
+      By.id(MEMBER_VIEW_OFFHEAPFREESIZE_ID)).getText();
     String OffHeapFreeSizetemp = OffHeapFreeSizeString.replaceAll("[a-zA-Z]",
-        "");
+      "");
     float OffHeapFreeSize = Float.parseFloat(OffHeapFreeSizetemp);
     float memberOffHeapFreeSize = Float.parseFloat(JMXProperties.getInstance()
-        .getProperty("member.M1.OffHeapFreeSize"));
+      .getProperty("member.M1.OffHeapFreeSize"));
     if (memberOffHeapFreeSize < 1048576) {
       memberOffHeapFreeSize = memberOffHeapFreeSize / 1024;
 
-    } else if (memberOffHeapFreeSize < 1073741824) {
+    }
+    else if (memberOffHeapFreeSize < 1073741824) {
       memberOffHeapFreeSize = memberOffHeapFreeSize / 1024 / 1024;
-    } else {
+    }
+    else {
       memberOffHeapFreeSize = memberOffHeapFreeSize / 1024 / 1024 / 1024;
     }
     memberOffHeapFreeSize = Float.parseFloat(new DecimalFormat("##.##")
-        .format(memberOffHeapFreeSize));
-    Assert.assertEquals(memberOffHeapFreeSize, OffHeapFreeSize); 
- 
+      .format(memberOffHeapFreeSize));
+    Assert.assertEquals(memberOffHeapFreeSize, OffHeapFreeSize);
+
   }
 
   @Ignore("WIP") // May be useful in near future
   @Test
   public void testOffHeapUsedSize() throws InterruptedException {
-	 
+
     String OffHeapUsedSizeString = driver.findElement(
-        By.id(MEMBER_VIEW_OFFHEAPUSEDSIZE_ID)).getText();
+      By.id(MEMBER_VIEW_OFFHEAPUSEDSIZE_ID)).getText();
     String OffHeapUsedSizetemp = OffHeapUsedSizeString.replaceAll("[a-zA-Z]",
-        "");
+      "");
     float OffHeapUsedSize = Float.parseFloat(OffHeapUsedSizetemp);
     float memberOffHeapUsedSize = Float.parseFloat(JMXProperties.getInstance()
-        .getProperty("member.M1.OffHeapUsedSize"));
+      .getProperty("member.M1.OffHeapUsedSize"));
     if (memberOffHeapUsedSize < 1048576) {
       memberOffHeapUsedSize = memberOffHeapUsedSize / 1024;
 
-    } else if (memberOffHeapUsedSize < 1073741824) {
+    }
+    else if (memberOffHeapUsedSize < 1073741824) {
       memberOffHeapUsedSize = memberOffHeapUsedSize / 1024 / 1024;
-    } else {
+    }
+    else {
       memberOffHeapUsedSize = memberOffHeapUsedSize / 1024 / 1024 / 1024;
     }
     memberOffHeapUsedSize = Float.parseFloat(new DecimalFormat("##.##")
-        .format(memberOffHeapUsedSize));
+      .format(memberOffHeapUsedSize));
     Assert.assertEquals(memberOffHeapUsedSize, OffHeapUsedSize);
   }
 
@@ -531,9 +536,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     searchByIdAndClick("default_grid_button");
     searchByIdAndClick("M1&M1");
     String JVMPauses = driver.findElement(By.id(MEMBER_VIEW_JVMPAUSES_ID))
-        .getText();
+      .getText();
     String memberGcPausesAvg = JMXProperties.getInstance().getProperty(
-        "member.M1.JVMPauses");
+      "member.M1.JVMPauses");
     Assert.assertEquals(memberGcPausesAvg, JVMPauses);
   }
 
@@ -542,9 +547,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     searchByIdAndClick("default_grid_button");
     searchByIdAndClick("M1&M1");
     String CPUUsagevalue = driver.findElement(By.id(MEMBER_VIEW_CPUUSAGE_ID))
-        .getText();
+      .getText();
     String memberCPUUsage = JMXProperties.getInstance().getProperty(
-        "member.M1.cpuUsage");
+      "member.M1.cpuUsage");
     Assert.assertEquals(memberCPUUsage, CPUUsagevalue);
   }
 
@@ -555,27 +560,27 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     float ReadPerSec = Float.parseFloat(driver.findElement(By.id(MEMBER_VIEW_READPERSEC_ID)).getText());
     float memberReadPerSec = Float.parseFloat(JMXProperties.getInstance().getProperty("member.M1.averageReads"));
     memberReadPerSec = Float.parseFloat(new DecimalFormat("##.##")
-    .format(memberReadPerSec));
+      .format(memberReadPerSec));
     Assert.assertEquals(memberReadPerSec, ReadPerSec);
   }
 
- @Test
+  @Test
   public void testMemberAverageWrites() throws InterruptedException {
     testRgraphWidget();
     String WritePerSec = driver.findElement(By.id(MEMBER_VIEW_WRITEPERSEC_ID))
-        .getText();
+      .getText();
     String memberWritePerSec = JMXProperties.getInstance().getProperty(
-        "member.M1.averageWrites");
+      "member.M1.averageWrites");
     Assert.assertEquals(memberWritePerSec, WritePerSec);
   }
- 
+
 
   @Test
   public void testMemberGridViewData() throws InterruptedException {
     testRgraphWidget();
     searchByXPathAndClick(PulseTestLocators.MemberDetailsView.gridButtonXpath);
     // get the number of rows on the grid
-    List<WebElement> noOfRows = driver.findElements(By.xpath("//table[@id='memberRegionsList']/tbody/tr"));    
+    List<WebElement> noOfRows = driver.findElements(By.xpath("//table[@id='memberRegionsList']/tbody/tr"));
     String MemberRegionName = driver.findElement(By.xpath("//table[@id='memberRegionsList']/tbody/tr[2]/td[1]")).getText();
     String memberRegionName = JMXProperties.getInstance().getProperty("region.R1.name");
     Assert.assertEquals(memberRegionName, MemberRegionName);
@@ -583,7 +588,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
     String MemberRegionType = driver.findElement(By.xpath("//table[@id='memberRegionsList']/tbody/tr[2]/td[2]")).getText();
     String memberRegionType = JMXProperties.getInstance().getProperty("region.R1.regionType");
     Assert.assertEquals(memberRegionType, MemberRegionType);
-    
+
     String MemberRegionEntryCount = driver.findElement(By.xpath("//table[@id='memberRegionsList']/tbody/tr[2]/td[3]")).getText();
     String memberRegionEntryCount = JMXProperties.getInstance().getProperty("regionOnMember./R1.M1.entryCount");
     Assert.assertEquals(memberRegionEntryCount, MemberRegionEntryCount);
@@ -593,7 +598,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   public void testDropDownList() throws InterruptedException {
     searchByIdAndClick("default_grid_button");
     searchByIdAndClick("M1&M1");
-  	searchByIdAndClick("memberName");
+    searchByIdAndClick("memberName");
     searchByLinkAndClick("M3");
     searchByIdAndClick("memberName");
     searchByLinkAndClick("M2");
@@ -615,7 +620,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   public void testDataViewRegionPath() {
     String regionPath = driver.findElement(By.id(REGION_PATH_LABEL)).getText();
     String dataviewregionpath = JMXProperties.getInstance().getProperty(
-        "region.R1.fullPath");
+      "region.R1.fullPath");
     Assert.assertEquals(dataviewregionpath, regionPath);
   }
 
@@ -624,7 +629,7 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   public void testDataViewRegionType() {
     String regionType = driver.findElement(By.id(REGION_TYPE_LABEL)).getText();
     String dataviewregiontype = JMXProperties.getInstance().getProperty(
-        "region.R1.regionType");
+      "region.R1.regionType");
     Assert.assertEquals(dataviewregiontype, regionType);
   }
 
@@ -632,9 +637,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewEmptyNodes() {
     String regionEmptyNodes = driver.findElement(By.id(DATA_VIEW_EMPTYNODES))
-        .getText();
+      .getText();
     String dataviewEmptyNodes = JMXProperties.getInstance().getProperty(
-        "region.R1.emptyNodes");
+      "region.R1.emptyNodes");
     Assert.assertEquals(dataviewEmptyNodes, regionEmptyNodes);
   }
 
@@ -642,9 +647,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewSystemRegionEntryCount() {
     String regionEntryCount = driver.findElement(By.id(DATA_VIEW_ENTRYCOUNT))
-        .getText();
+      .getText();
     String dataviewEntryCount = JMXProperties.getInstance().getProperty(
-        "region.R1.systemRegionEntryCount");
+      "region.R1.systemRegionEntryCount");
     Assert.assertEquals(dataviewEntryCount, regionEntryCount);
   }
 
@@ -652,9 +657,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewPersistentEnabled() {
     String regionPersistence = driver.findElement(
-        By.id(REGION_PERSISTENCE_LABEL)).getText();
+      By.id(REGION_PERSISTENCE_LABEL)).getText();
     String dataviewregionpersistence = JMXProperties.getInstance().getProperty(
-        "region.R1.persistentEnabled");
+      "region.R1.persistentEnabled");
     Assert.assertEquals(dataviewregionpersistence, regionPersistence);
   }
 
@@ -662,9 +667,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewDiskWritesRate() {
     String regionWrites = driver.findElement(By.id(DATA_VIEW_WRITEPERSEC))
-        .getText();
+      .getText();
     String dataviewRegionWrites = JMXProperties.getInstance().getProperty(
-        "region.R1.diskWritesRate");
+      "region.R1.diskWritesRate");
     Assert.assertEquals(dataviewRegionWrites, regionWrites);
   }
 
@@ -672,9 +677,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewDiskReadsRate() {
     String regionReads = driver.findElement(By.id(DATA_VIEW_READPERSEC))
-        .getText();
+      .getText();
     String dataviewRegionReads = JMXProperties.getInstance().getProperty(
-        "region.R1.diskReadsRate");
+      "region.R1.diskReadsRate");
     Assert.assertEquals(dataviewRegionReads, regionReads);
   }
 
@@ -682,9 +687,9 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewDiskUsage() {
     String regionMemoryUsed = driver.findElement(By.id(DATA_VIEW_USEDMEMORY))
-        .getText();
+      .getText();
     String dataviewMemoryUsed = JMXProperties.getInstance().getProperty(
-        "region.R1.diskUsage");
+      "region.R1.diskUsage");
     Assert.assertEquals(dataviewMemoryUsed, regionMemoryUsed);
     searchByLinkAndClick(QUERY_STATISTICS_LABEL);
   }
@@ -693,351 +698,350 @@ public abstract class PulseAbstractTest extends PulseBaseTest {
   @Test
   public void testDataViewGridValue() {
     String DataViewRegionName = driver.findElement(
-        By.xpath("//*[id('6')/x:td[1]]")).getText();
+      By.xpath("//*[id('6')/x:td[1]]")).getText();
     String dataViewRegionName = JMXProperties.getInstance().getProperty(
-        "region.R1.name");
+      "region.R1.name");
     Assert.assertEquals(dataViewRegionName, DataViewRegionName);
 
     String DataViewRegionType = driver.findElement(
-        By.xpath("//*[id('6')/x:td[2]")).getText();
+      By.xpath("//*[id('6')/x:td[2]")).getText();
     String dataViewRegionType = JMXProperties.getInstance().getProperty(
-        "region.R2.regionType");
+      "region.R2.regionType");
     Assert.assertEquals(dataViewRegionType, DataViewRegionType);
 
     String DataViewEntryCount = driver.findElement(
-        By.xpath("//*[id('6')/x:td[3]")).getText();
+      By.xpath("//*[id('6')/x:td[3]")).getText();
     String dataViewEntryCount = JMXProperties.getInstance().getProperty(
-        "region.R2.systemRegionEntryCount");
+      "region.R2.systemRegionEntryCount");
     Assert.assertEquals(dataViewEntryCount, DataViewEntryCount);
 
     String DataViewEntrySize = driver.findElement(
-        By.xpath("//*[id('6')/x:td[4]")).getText();
+      By.xpath("//*[id('6')/x:td[4]")).getText();
     String dataViewEntrySize = JMXProperties.getInstance().getProperty(
-        "region.R2.entrySize");
+      "region.R2.entrySize");
     Assert.assertEquals(dataViewEntrySize, DataViewEntrySize);
 
   }
-  
-  
+
+
   public void loadDataBrowserpage() {
-	  searchByLinkAndClick(DATA_BROWSER_LABEL);
-	  //Thread.sleep(7000);
+    searchByLinkAndClick(DATA_BROWSER_LABEL);
+    //Thread.sleep(7000);
   }
-  
+
   @Test
   public void testDataBrowserRegionName() throws InterruptedException {
-	  loadDataBrowserpage();
-	  String DataBrowserRegionName1 = driver.findElement(By.id(DATA_BROWSER_REGIONName1))
-			  .getText();
-	  String databrowserRegionNametemp1 = JMXProperties.getInstance().getProperty(
-		        "region.R1.name");
-	  String databrowserRegionName1 = databrowserRegionNametemp1.replaceAll("[\\/]", "");
-	  Assert.assertEquals(databrowserRegionName1, DataBrowserRegionName1);
-	  
-	  String DataBrowserRegionName2 = driver.findElement(By.id(DATA_BROWSER_REGIONName2))
-			  .getText();
-	  String databrowserRegionNametemp2 = JMXProperties.getInstance().getProperty(
-		        "region.R2.name");
-	  String databrowserRegionName2 = databrowserRegionNametemp2.replaceAll("[\\/]", "");
-	  Assert.assertEquals(databrowserRegionName2, DataBrowserRegionName2);
-	  
-	  String DataBrowserRegionName3 = driver.findElement(By.id(DATA_BROWSER_REGIONName3))
-			  .getText();
-	  String databrowserRegionNametemp3 = JMXProperties.getInstance().getProperty(
-		        "region.R3.name");
-	  String databrowserRegionName3 = databrowserRegionNametemp3.replaceAll("[\\/]", "");
-	  Assert.assertEquals(databrowserRegionName3, DataBrowserRegionName3);
-	        
+    loadDataBrowserpage();
+    String DataBrowserRegionName1 = driver.findElement(By.id(DATA_BROWSER_REGIONName1))
+      .getText();
+    String databrowserRegionNametemp1 = JMXProperties.getInstance().getProperty(
+      "region.R1.name");
+    String databrowserRegionName1 = databrowserRegionNametemp1.replaceAll("[\\/]", "");
+    Assert.assertEquals(databrowserRegionName1, DataBrowserRegionName1);
+
+    String DataBrowserRegionName2 = driver.findElement(By.id(DATA_BROWSER_REGIONName2))
+      .getText();
+    String databrowserRegionNametemp2 = JMXProperties.getInstance().getProperty(
+      "region.R2.name");
+    String databrowserRegionName2 = databrowserRegionNametemp2.replaceAll("[\\/]", "");
+    Assert.assertEquals(databrowserRegionName2, DataBrowserRegionName2);
+
+    String DataBrowserRegionName3 = driver.findElement(By.id(DATA_BROWSER_REGIONName3))
+      .getText();
+    String databrowserRegionNametemp3 = JMXProperties.getInstance().getProperty(
+      "region.R3.name");
+    String databrowserRegionName3 = databrowserRegionNametemp3.replaceAll("[\\/]", "");
+    Assert.assertEquals(databrowserRegionName3, DataBrowserRegionName3);
+
   }
-  
+
   @Test
   public void testDataBrowserRegionMembersVerificaition() throws InterruptedException {
-	  loadDataBrowserpage();
-	  searchByIdAndClick(DATA_BROWSER_REGION1_CHECKBOX);
-	  String DataBrowserMember1Name1 = driver.findElement(By.xpath("//label[@for='Member0']"))
-			  .getText();
-	  String DataBrowserMember1Name2 = driver.findElement(By.xpath("//label[@for='Member1']"))
-			  .getText();
-	  String DataBrowserMember1Name3 = driver.findElement(By.xpath("//label[@for='Member2']"))
-			  .getText();
-	  String databrowserMember1Names = JMXProperties.getInstance().getProperty(
-		        "region.R1.members");
-	  
-	  String databrowserMember1Names1 = databrowserMember1Names.substring(0, 2);
-	  Assert.assertEquals(databrowserMember1Names1, DataBrowserMember1Name1);
-	  
-	  String databrowserMember1Names2 = databrowserMember1Names.substring(3, 5);
-	  Assert.assertEquals(databrowserMember1Names2, DataBrowserMember1Name2);
-	  
-	  String databrowserMember1Names3 = databrowserMember1Names.substring(6, 8);
-	  Assert.assertEquals(databrowserMember1Names3, DataBrowserMember1Name3);
-	  searchByIdAndClick(DATA_BROWSER_REGION1_CHECKBOX);
-	  
-	  searchByIdAndClick(DATA_BROWSER_REGION2_CHECKBOX);
-	  String DataBrowserMember2Name1 = driver.findElement(By.xpath("//label[@for='Member0']"))
-			  .getText();
-	  String DataBrowserMember2Name2 = driver.findElement(By.xpath("//label[@for='Member1']"))
-			  .getText();
-	  String databrowserMember2Names = JMXProperties.getInstance().getProperty(
-		        "region.R2.members");
-	  
-	  String databrowserMember2Names1 = databrowserMember2Names.substring(0, 2);
-	  Assert.assertEquals(databrowserMember2Names1, DataBrowserMember2Name1);
-	  
-	  String databrowserMember2Names2 = databrowserMember2Names.substring(3, 5);
-	  Assert.assertEquals(databrowserMember2Names2, DataBrowserMember2Name2);
-	  searchByIdAndClick(DATA_BROWSER_REGION2_CHECKBOX);
-	  
-	  searchByIdAndClick(DATA_BROWSER_REGION3_CHECKBOX);
-	  String DataBrowserMember3Name1 = driver.findElement(By.xpath("//label[@for='Member0']"))
-			  .getText();
-	  String DataBrowserMember3Name2 = driver.findElement(By.xpath("//label[@for='Member1']"))
-			  .getText();
-	  String databrowserMember3Names = JMXProperties.getInstance().getProperty(
-		        "region.R3.members");
-	  
-	  String databrowserMember3Names1 = databrowserMember3Names.substring(0, 2);
-	  Assert.assertEquals(databrowserMember3Names1, DataBrowserMember3Name1);
-	  
-	  String databrowserMember3Names2 = databrowserMember3Names.substring(3, 5);
-	  Assert.assertEquals(databrowserMember3Names2, DataBrowserMember3Name2);
-	  searchByIdAndClick(DATA_BROWSER_REGION3_CHECKBOX);
+    loadDataBrowserpage();
+    searchByIdAndClick(DATA_BROWSER_REGION1_CHECKBOX);
+    String DataBrowserMember1Name1 = driver.findElement(By.xpath("//label[@for='Member0']"))
+      .getText();
+    String DataBrowserMember1Name2 = driver.findElement(By.xpath("//label[@for='Member1']"))
+      .getText();
+    String DataBrowserMember1Name3 = driver.findElement(By.xpath("//label[@for='Member2']"))
+      .getText();
+    String databrowserMember1Names = JMXProperties.getInstance().getProperty(
+      "region.R1.members");
+
+    String databrowserMember1Names1 = databrowserMember1Names.substring(0, 2);
+    Assert.assertEquals(databrowserMember1Names1, DataBrowserMember1Name1);
+
+    String databrowserMember1Names2 = databrowserMember1Names.substring(3, 5);
+    Assert.assertEquals(databrowserMember1Names2, DataBrowserMember1Name2);
+
+    String databrowserMember1Names3 = databrowserMember1Names.substring(6, 8);
+    Assert.assertEquals(databrowserMember1Names3, DataBrowserMember1Name3);
+    searchByIdAndClick(DATA_BROWSER_REGION1_CHECKBOX);
+
+    searchByIdAndClick(DATA_BROWSER_REGION2_CHECKBOX);
+    String DataBrowserMember2Name1 = driver.findElement(By.xpath("//label[@for='Member0']"))
+      .getText();
+    String DataBrowserMember2Name2 = driver.findElement(By.xpath("//label[@for='Member1']"))
+      .getText();
+    String databrowserMember2Names = JMXProperties.getInstance().getProperty(
+      "region.R2.members");
+
+    String databrowserMember2Names1 = databrowserMember2Names.substring(0, 2);
+    Assert.assertEquals(databrowserMember2Names1, DataBrowserMember2Name1);
+
+    String databrowserMember2Names2 = databrowserMember2Names.substring(3, 5);
+    Assert.assertEquals(databrowserMember2Names2, DataBrowserMember2Name2);
+    searchByIdAndClick(DATA_BROWSER_REGION2_CHECKBOX);
+
+    searchByIdAndClick(DATA_BROWSER_REGION3_CHECKBOX);
+    String DataBrowserMember3Name1 = driver.findElement(By.xpath("//label[@for='Member0']"))
+      .getText();
+    String DataBrowserMember3Name2 = driver.findElement(By.xpath("//label[@for='Member1']"))
+      .getText();
+    String databrowserMember3Names = JMXProperties.getInstance().getProperty(
+      "region.R3.members");
+
+    String databrowserMember3Names1 = databrowserMember3Names.substring(0, 2);
+    Assert.assertEquals(databrowserMember3Names1, DataBrowserMember3Name1);
+
+    String databrowserMember3Names2 = databrowserMember3Names.substring(3, 5);
+    Assert.assertEquals(databrowserMember3Names2, DataBrowserMember3Name2);
+    searchByIdAndClick(DATA_BROWSER_REGION3_CHECKBOX);
   }
-  
+
   @Test
   public void testDataBrowserColocatedRegions() throws InterruptedException {
-	  loadDataBrowserpage();
-	  String databrowserMemberNames1 = JMXProperties.getInstance().getProperty(
-		        "region.R1.members");
-	  String databrowserMemberNames2 = JMXProperties.getInstance().getProperty(
-		        "region.R2.members");
-	  String databrowserMemberNames3 = JMXProperties.getInstance().getProperty(
-		        "region.R3.members");
-	  
-	  if((databrowserMemberNames1.matches(databrowserMemberNames2+"(.*)"))) {
-		  if((databrowserMemberNames1.matches(databrowserMemberNames3+"(.*)"))) {
-			  if((databrowserMemberNames2.matches(databrowserMemberNames3+"(.*)"))) {
-				  System.out.println("R1, R2 and R3 are colocated regions");
-			  }   
-		  }
-	  }
-	  searchByIdAndClick(DATA_BROWSER_REGION1_CHECKBOX);
-	  searchByLinkAndClick(DATA_BROWSER_COLOCATED_REGION);
-	  String DataBrowserColocatedRegion1 = driver.findElement(By.id(DATA_BROWSER_COLOCATED_REGION_NAME1))
-			  .getText();
-	  String DataBrowserColocatedRegion2 = driver.findElement(By.id(DATA_BROWSER_COLOCATED_REGION_NAME2))
-			  .getText();
-	  String DataBrowserColocatedRegion3 = driver.findElement(By.id(DATA_BROWSER_COLOCATED_REGION_NAME3))
-			  .getText();
-	  
-	  String databrowserColocatedRegiontemp1 = JMXProperties.getInstance().getProperty(
-		        "region.R1.name");
-	  String databrowserColocatedRegion1 = databrowserColocatedRegiontemp1.replaceAll("[\\/]", "");
-	  
-	  String databrowserColocatedRegiontemp2 = JMXProperties.getInstance().getProperty(
-		        "region.R2.name");
-	  String databrowserColocatedRegion2 = databrowserColocatedRegiontemp2.replaceAll("[\\/]", "");
-	  
-	  String databrowserColocatedRegiontemp3 = JMXProperties.getInstance().getProperty(
-		        "region.R3.name");
-	  String databrowserColocatedRegion3 = databrowserColocatedRegiontemp3.replaceAll("[\\/]", "");
-	  
-	  Assert.assertEquals(databrowserColocatedRegion1, DataBrowserColocatedRegion1);
-	  Assert.assertEquals(databrowserColocatedRegion2, DataBrowserColocatedRegion2);
-	  Assert.assertEquals(databrowserColocatedRegion3, DataBrowserColocatedRegion3);
-	  
+    loadDataBrowserpage();
+    String databrowserMemberNames1 = JMXProperties.getInstance().getProperty(
+      "region.R1.members");
+    String databrowserMemberNames2 = JMXProperties.getInstance().getProperty(
+      "region.R2.members");
+    String databrowserMemberNames3 = JMXProperties.getInstance().getProperty(
+      "region.R3.members");
+
+    if ((databrowserMemberNames1.matches(databrowserMemberNames2 + "(.*)"))) {
+      if ((databrowserMemberNames1.matches(databrowserMemberNames3 + "(.*)"))) {
+        if ((databrowserMemberNames2.matches(databrowserMemberNames3 + "(.*)"))) {
+          System.out.println("R1, R2 and R3 are colocated regions");
+        }
+      }
+    }
+    searchByIdAndClick(DATA_BROWSER_REGION1_CHECKBOX);
+    searchByLinkAndClick(DATA_BROWSER_COLOCATED_REGION);
+    String DataBrowserColocatedRegion1 = driver.findElement(By.id(DATA_BROWSER_COLOCATED_REGION_NAME1))
+      .getText();
+    String DataBrowserColocatedRegion2 = driver.findElement(By.id(DATA_BROWSER_COLOCATED_REGION_NAME2))
+      .getText();
+    String DataBrowserColocatedRegion3 = driver.findElement(By.id(DATA_BROWSER_COLOCATED_REGION_NAME3))
+      .getText();
+
+    String databrowserColocatedRegiontemp1 = JMXProperties.getInstance().getProperty(
+      "region.R1.name");
+    String databrowserColocatedRegion1 = databrowserColocatedRegiontemp1.replaceAll("[\\/]", "");
+
+    String databrowserColocatedRegiontemp2 = JMXProperties.getInstance().getProperty(
+      "region.R2.name");
+    String databrowserColocatedRegion2 = databrowserColocatedRegiontemp2.replaceAll("[\\/]", "");
+
+    String databrowserColocatedRegiontemp3 = JMXProperties.getInstance().getProperty(
+      "region.R3.name");
+    String databrowserColocatedRegion3 = databrowserColocatedRegiontemp3.replaceAll("[\\/]", "");
+
+    Assert.assertEquals(databrowserColocatedRegion1, DataBrowserColocatedRegion1);
+    Assert.assertEquals(databrowserColocatedRegion2, DataBrowserColocatedRegion2);
+    Assert.assertEquals(databrowserColocatedRegion3, DataBrowserColocatedRegion3);
+
   }
 
   @Ignore("WIP") // clusterDetails element not found on Data Browser page. No assertions in test
   @Test
   public void testDataBrowserQueryValidation() throws IOException, InterruptedException {
-	  loadDataBrowserpage();
-	  WebElement textArea = driver.findElement(By.id("dataBrowserQueryText"));
-	  textArea.sendKeys("query1");
-	  WebElement executeButton = driver.findElement(By.id("btnExecuteQuery"));
-	  executeButton.click();
-	  String QueryResultHeader1 = driver.findElement(By.xpath("//div[@id='clusterDetails']/div/div/span[@class='n-title']")).getText();
-	  double count = 0,countBuffer=0,countLine=0;
-	  String lineNumber = "";
-	  String filePath = "E:\\springsource\\springsourceWS\\Pulse-Cedar\\src\\main\\resources\\testQueryResultSmall.txt";
-	  BufferedReader br;
-	  String line = "";
-	  br = new BufferedReader(new FileReader(filePath));
-	  while((line = br.readLine()) != null)
-	  {
-		  countLine++;
-          String[] words = line.split(" ");
-
-          for (String word : words) {
-            if (word.equals(QueryResultHeader1)) {
-              count++;
-              countBuffer++;
-            }
-          }
-	  }  
+    loadDataBrowserpage();
+    WebElement textArea = driver.findElement(By.id("dataBrowserQueryText"));
+    textArea.sendKeys("query1");
+    WebElement executeButton = driver.findElement(By.id("btnExecuteQuery"));
+    executeButton.click();
+    String QueryResultHeader1 = driver.findElement(By.xpath("//div[@id='clusterDetails']/div/div/span[@class='n-title']")).getText();
+    double count = 0, countBuffer = 0, countLine = 0;
+    String lineNumber = "";
+    String filePath = "E:\\springsource\\springsourceWS\\Pulse-Cedar\\src\\main\\resources\\testQueryResultSmall.txt";
+    BufferedReader br;
+    String line = "";
+    br = new BufferedReader(new FileReader(filePath));
+    while ((line = br.readLine()) != null) {
+      countLine++;
+      String[] words = line.split(" ");
+
+      for (String word : words) {
+        if (word.equals(QueryResultHeader1)) {
+          count++;
+          countBuffer++;
+        }
+      }
+    }
   }
-  
- public void testTreeMapPopUpData(String S1, String gridIcon) {
-	  for (int i = 1; i <=3; i++) {
-		  searchByLinkAndClick(CLUSTER_VIEW_LABEL);
-		  if (gridIcon.equals(SERVER_GROUP_GRID_ID)) {
-			  WebElement ServerGroupRadio = driver.findElement(By.xpath("//label[@for='radio-servergroups']"));
-			  ServerGroupRadio.click();
-		  }
-		  if (gridIcon.equals(REDUNDANCY_GRID_ID)) {
-			  WebElement ServerGroupRadio = driver.findElement(By.xpath("//label[@for='radio-redundancyzones']"));
-			  ServerGroupRadio.click();
-		  }
-		  searchByIdAndClick(gridIcon);
-		  WebElement TreeMapMember = driver.findElement(By.xpath("//div[@id='" + S1 + "M"+ (i) + "']/div"));
-		  Actions builder = new Actions(driver);
-		  builder.clickAndHold(TreeMapMember).perform();
-		  int j = 1;
-		  String CPUUsageM1temp = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div/div[2]/div"))
-				  .getText();
-		  String CPUUsageM1 = CPUUsageM1temp.replaceAll("[\\%]", "");
-		  String cpuUsageM1 = JMXProperties.getInstance().getProperty(
-			        "member.M" + (i) + ".cpuUsage");
-		  Assert.assertEquals(cpuUsageM1, CPUUsageM1);
-			  
-		  String MemoryUsageM1temp = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 1) + "]/div[2]/div"))
-				  .getText();
-		  String MemoryUsageM1 = MemoryUsageM1temp.replaceAll("MB", "");
-		  String memoryUsageM1 = JMXProperties.getInstance().getProperty(
-				  "member.M" + (i) + ".UsedMemory");
-		  Assert.assertEquals(memoryUsageM1, MemoryUsageM1);
-		  
-		  String LoadAvgM1 = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 2) + "]/div[2]/div"))
-				  .getText();
-		  String loadAvgM1 = JMXProperties.getInstance().getProperty(
-				  "member.M" + (i) + ".loadAverage");
-		  Assert.assertEquals(df2.format(Double.valueOf(loadAvgM1)), LoadAvgM1);
-		  
-		  
-		  String ThreadsM1 = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 3) + "]/div[2]/div"))
-				  .getText();
-		  String threadsM1 = JMXProperties.getInstance().getProperty(
-				  "member.M" + (i) + ".numThreads");
-		  Assert.assertEquals(threadsM1, ThreadsM1);
-		  
-		  String SocketsM1 = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 4) + "]/div[2]/div"))
-				  .getText();
-		  String socketsM1 = JMXProperties.getInstance().getProperty(
-				  "member.M" + (i) + ".totalFileDescriptorOpen");
-		  Assert.assertEquals(socketsM1, SocketsM1);
-          builder.moveToElement(TreeMapMember).release().perform();
-		  }
-	  }
-  
+
+  public void testTreeMapPopUpData(String S1, String gridIcon) {
+    for (int i = 1; i <= 3; i++) {
+      searchByLinkAndClick(CLUSTER_VIEW_LABEL);
+      if (gridIcon.equals(SERVER_GROUP_GRID_ID)) {
+        WebElement ServerGroupRadio = driver.findElement(By.xpath("//label[@for='radio-servergroups']"));
+        ServerGroupRadio.click();
+      }
+      if (gridIcon.equals(REDUNDANCY_GRID_ID)) {
+        WebElement ServerGroupRadio = driver.findElement(By.xpath("//label[@for='radio-redundancyzones']"));
+        ServerGroupRadio.click();
+      }
+      searchByIdAndClick(gridIcon);
+      WebElement TreeMapMember = driver.findElement(By.xpath("//div[@id='" + S1 + "M" + (i) + "']/div"));
+      Actions builder = new Actions(driver);
+      builder.clickAndHold(TreeMapMember).perform();
+      int j = 1;
+      String CPUUsageM1temp = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div/div[2]/div"))
+        .getText();
+      String CPUUsageM1 = CPUUsageM1temp.replaceAll("[\\%]", "");
+      String cpuUsageM1 = JMXProperties.getInstance().getProperty(
+        "member.M" + (i) + ".cpuUsage");
+      Assert.assertEquals(cpuUsageM1, CPUUsageM1);
+
+      String MemoryUsageM1temp = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 1) + "]/div[2]/div"))
+        .getText();
+      String MemoryUsageM1 = MemoryUsageM1temp.replaceAll("MB", "");
+      String memoryUsageM1 = JMXProperties.getInstance().getProperty(
+        "member.M" + (i) + ".UsedMemory");
+      Assert.assertEquals(memoryUsageM1, MemoryUsageM1);
+
+      String LoadAvgM1 = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 2) + "]/div[2]/div"))
+        .getText();
+      String loadAvgM1 = JMXProperties.getInstance().getProperty(
+        "member.M" + (i) + ".loadAverage");
+      Assert.assertEquals(df2.format(Double.valueOf(loadAvgM1)), LoadAvgM1);
+
+
+      String ThreadsM1 = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 3) + "]/div[2]/div"))
+        .getText();
+      String threadsM1 = JMXProperties.getInstance().getProperty(
+        "member.M" + (i) + ".numThreads");
+      Assert.assertEquals(threadsM1, ThreadsM1);
+
+      String SocketsM1 = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[" + (j + 4) + "]/div[2]/div"))
+        .getText();
+      String socketsM1 = JMXProperties.getInstance().getProperty(
+        "member.M" + (i) + ".totalFileDescriptorOpen");
+      Assert.assertEquals(socketsM1, SocketsM1);
+      builder.moveToElement(TreeMapMember).release().perform();
+    }
+  }
+
   @Test
   public void testTopologyPopUpData() {
-	  testTreeMapPopUpData("", CLUSTER_VIEW_GRID_ID); 
+    testTreeMapPopUpData("", CLUSTER_VIEW_GRID_ID);
   }
-  
+
   @Test
   public void testServerGroupTreeMapPopUpData() {
-	  testTreeMapPopUpData("SG1(!)", SERVER_GROUP_GRID_ID);
+    testTreeMapPopUpData("SG1(!)", SERVER_GROUP_GRID_ID);
   }
-  
+
   @Test
   public void testDataViewTreeMapPopUpData() {
-	  searchByLinkAndClick(CLUSTER_VIEW_LABEL);
-	  searchByLinkAndClick(DATA_DROPDOWN_ID);
-	  WebElement TreeMapMember = driver.findElement(By.id("GraphTreeMapClusterData-canvas"));
-	  Actions builder = new Actions(driver);
-	  builder.clickAndHold(TreeMapMember).perform();
-	  String RegionType = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div/div[2]/div"))
-			  .getText();
-	  String regionType = JMXProperties.getInstance().getProperty(
-			  "region.R2.regionType");
-	  Assert.assertEquals(regionType, RegionType);
-	  
-	  String EntryCount = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[2]/div[2]/div"))
-			  .getText();
-	  String entryCount = JMXProperties.getInstance().getProperty(
-			  "region.R2.systemRegionEntryCount");
-	  Assert.assertEquals(entryCount, EntryCount);
-	  
-	  String EntrySizetemp = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[3]/div[2]/div"))
-			  .getText();
-	  float EntrySize = Float.parseFloat(EntrySizetemp);
-	  float entrySize = Float.parseFloat(JMXProperties.getInstance().getProperty(
-			  "region.R2.entrySize"));
-	  entrySize = entrySize / 1024 / 1024;
-	  entrySize = Float.parseFloat(new DecimalFormat("##.####")
+    searchByLinkAndClick(CLUSTER_VIEW_LABEL);
+    searchByLinkAndClick(DATA_DROPDOWN_ID);
+    WebElement TreeMapMember = driver.findElement(By.id("GraphTreeMapClusterData-canvas"));
+    Actions builder = new Actions(driver);
+    builder.clickAndHold(TreeMapMember).perform();
+    String RegionType = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div/div[2]/div"))
+      .getText();
+    String regionType = JMXProperties.getInstance().getProperty(
+      "region.R2.regionType");
+    Assert.assertEquals(regionType, RegionType);
+
+    String EntryCount = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[2]/div[2]/div"))
+      .getText();
+    String entryCount = JMXProperties.getInstance().getProperty(
+      "region.R2.systemRegionEntryCount");
+    Assert.assertEquals(entryCount, EntryCount);
+
+    String EntrySizetemp = driver.findElement(By.xpath("//div[@id='_tooltip']/div/div/div[2]/div[3]/div[2]/div"))
+      .getText();
+    float EntrySize = Float.parseFloat(EntrySizetemp);
+    float entrySize = Float.parseFloat(JMXProperties.getInstance().getProperty(
+      "region.R2.entrySize"));
+    entrySize = entrySize / 1024 / 1024;
+    entrySize = Float.parseFloat(new DecimalFormat("##.####")
       .format(entrySize));
-	  Assert.assertEquals(entrySize, EntrySize);  
-	  builder.moveToElement(TreeMapMember).release().perform();
+    Assert.assertEquals(entrySize, EntrySize);
+    builder.moveToElement(TreeMapMember).release().perform();
   }
-  
+
   @Test
   public void testRegionViewTreeMapPopUpData() {
-	  searchByLinkAndClick(CLUSTER_VIEW_LABEL);
-	  searchByLinkAndClick(DATA_DROPDOWN_ID);
-	  WebElement TreeMapMember = driver.findElement(By.id("GraphTreeMapClusterData-canvas"));
-	  TreeMapMember.click();
+    searchByLinkAndClick(CLUSTER_VIEW_LABEL);
+    searchByLinkAndClick(DATA_DROPDOWN_ID);
+    WebElement TreeMapMember = driver.findElement(By.id("GraphTreeMapClusterData-canvas"));
+    TreeMapMember.click();
   }
 
   @Ignore("WIP")
   @Test
-  public void testNumberOfRegions() throws InterruptedException{
-	  
-		driver.findElement(By.xpath("//a[text()='Data Browser']")).click();
-		
-		 Thread.sleep(1000);
-		 List<WebElement> regionList = driver.findElements(By.xpath("//ul[@id='treeDemo']/li"));		 
-		 String regions = JMXProperties.getInstance().getProperty("regions");
-		 String []regionName = regions.split(" ");
-		 for (String string : regionName) {
-		}
-		 //JMXProperties.getInstance().getProperty("region.R1.regionType");
-		int i=1; 
-		for (WebElement webElement : regionList) {
-			//webElement.getAttribute(arg0)
-			i++;
-		}
-		
-		driver.findElement(By.id("treeDemo_1_check")).click();		
-		
-		List<WebElement> memeberList = driver.findElements(By.xpath("//ul[@id='membersList']/li"));
-		int j=0;
-		for (WebElement webElement : memeberList) {
-			j++;
-		}  
+  public void testNumberOfRegions() throws InterruptedException {
+
+    driver.findElement(By.xpath("//a[text()='Data Browser']")).click();
+
+    Thread.sleep(1000);
+    List<WebElement> regionList = driver.findElements(By.xpath("//ul[@id='treeDemo']/li"));
+    String regions = JMXProperties.getInstance().getProperty("regions");
+    String[] regionName = regions.split(" ");
+    for (String string : regionName) {
+    }
+    //JMXProperties.getInstance().getProperty("region.R1.regionType");
+    int i = 1;
+    for (WebElement webElement : regionList) {
+      //webElement.getAttribute(arg0)
+      i++;
+    }
+
+    driver.findElement(By.id("treeDemo_1_check")).click();
+
+    List<WebElement> memeberList = driver.findElements(By.xpath("//ul[@id='membersList']/li"));
+    int j = 0;
+    for (WebElement webElement : memeberList) {
+      j++;
+    }
   }
 
   @Ignore("WIP")
   @Test
-  public void testDataBrowser(){
-	  
-	  driver.findElement(By.linkText("Data Browser")).click();
-	 // WebElement dataBrowserLabel = driver.findElement(By.xpath(""));
-	  WebDriverWait wait = new WebDriverWait(driver, 20);
-	  wait.until(ExpectedConditions.visibilityOf(driver.findElement(By.xpath("//label[text()='Data Browser']"))));
-	  
-	
-	// Verify all elements must be displayed on data browser screen 
-	  Assert.assertTrue(driver.findElement(By.xpath("//a[text()='Data Regions']")).isDisplayed());	
-	  Assert.assertTrue(driver.findElement(By.id("linkColocatedRegions")).isDisplayed());	  
-	  Assert.assertTrue(driver.findElement(By.linkText("All Regions")).isDisplayed());
-	  
-	  Assert.assertTrue(driver.findElement(By.xpath("//a[text()='Region Members']")).isDisplayed());
-	  
-	  Assert.assertTrue(driver.findElement(By.xpath("//a[text()='Queries']")).isDisplayed());
-	  Assert.assertTrue(driver.findElement(By.xpath("//label[text()='Query Editor']")).isDisplayed());
-	  Assert.assertTrue(driver.findElement(By.xpath("//label[text()='Result']")).isDisplayed());
-	  Assert.assertTrue(driver.findElement(By.xpath("//input[@value='Export Result']")).isDisplayed());
-	  Assert.assertTrue(driver.findElement(By.id("btnExecuteQuery")).isDisplayed());
-	  Assert.assertTrue(driver.findElement(By.xpath("//input[@value='Clear']")).isDisplayed());
-	  Assert.assertTrue(driver.findElement(By.id("dataBrowserQueryText")).isDisplayed());
-	  
-	  Assert.assertTrue(driver.findElement(By.id("historyIcon")).isDisplayed());
-	  
-	  //Actual query execution
-	  
-	  driver.findElement(By.id("dataBrowserQueryText")).sendKeys("Query1");
-
-	  // Assert data regions are displayed 
-	  Assert.assertTrue(driver.findElement(By.id("treeDemo_1")).isDisplayed());
+  public void testDataBrowser() {
+
+    driver.findElement(By.linkText("Data Browser")).click();
+    // WebElement dataBrowserLabel = driver.findElement(By.xpath(""));
+    WebDriverWait wait = new WebDriverWait(driver, 20);
+    wait.until(ExpectedConditions.visibilityOf(driver.findElement(By.xpath("//label[text()='Data Browser']"))));
+
+
+    // Verify all elements must be displayed on data browser screen
+    Assert.assertTrue(driver.findElement(By.xpath("//a[text()='Data Regions']")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.id("linkColocatedRegions")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.linkText("All Regions")).isDisplayed());
+
+    Assert.assertTrue(driver.findElement(By.xpath("//a[text()='Region Members']")).isDisplayed());
+
+    Assert.assertTrue(driver.findElement(By.xpath("//a[text()='Queries']")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.xpath("//label[text()='Query Editor']")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.xpath("//label[text()='Result']")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.xpath("//input[@value='Export Result']")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.id("btnExecuteQuery")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.xpath("//input[@value='Clear']")).isDisplayed());
+    Assert.assertTrue(driver.findElement(By.id("dataBrowserQueryText")).isDisplayed());
+
+    Assert.assertTrue(driver.findElement(By.id("historyIcon")).isDisplayed());
+
+    //Actual query execution
+
+    driver.findElement(By.id("dataBrowserQueryText")).sendKeys("Query1");
+
+    // Assert data regions are displayed
+    Assert.assertTrue(driver.findElement(By.id("treeDemo_1")).isDisplayed());
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
index 1770dd5..ae8cc92 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
@@ -38,7 +38,7 @@ public class Region extends JMXBaseBean implements RegionMBean {
     "enableOffHeapMemory", "scope", "diskStoreName",
     "diskSynchronous" };
   private static OpenType[] regAttItemTypes = { SimpleType.STRING,
-    SimpleType.BOOLEAN, SimpleType.BOOLEAN, SimpleType.STRING, 
+    SimpleType.BOOLEAN, SimpleType.STRING,
     SimpleType.STRING, SimpleType.BOOLEAN };
   private static CompositeType listRegionAttributesCompData = null;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-web/src/main/webapp/WEB-INF/gemfire-servlet.xml
----------------------------------------------------------------------
diff --git a/geode-web/src/main/webapp/WEB-INF/gemfire-servlet.xml b/geode-web/src/main/webapp/WEB-INF/gemfire-servlet.xml
index eb1a533..ce65933 100644
--- a/geode-web/src/main/webapp/WEB-INF/gemfire-servlet.xml
+++ b/geode-web/src/main/webapp/WEB-INF/gemfire-servlet.xml
@@ -53,7 +53,7 @@ limitations under the License.
   </mvc:annotation-driven>
 
   <mvc:interceptors>
-    <bean class="com.gemstone.gemfire.management.internal.web.controllers.support.EnvironmentVariablesHandlerInterceptor"/>
+    <bean class="com.gemstone.gemfire.management.internal.web.controllers.support.LoginHandlerInterceptor"/>
   </mvc:interceptors>
 
 </beans>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptorJUnitTest.java b/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptorJUnitTest.java
deleted file mode 100644
index 4b9e303..0000000
--- a/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/EnvironmentVariablesHandlerInterceptorJUnitTest.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.management.internal.web.controllers.support;
-
-import com.gemstone.gemfire.test.junit.categories.UnitTest;
-import edu.umd.cs.mtc.MultithreadedTestCase;
-import edu.umd.cs.mtc.TestFramework;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.concurrent.Synchroniser;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import javax.servlet.http.HttpServletRequest;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
-/**
- * The EnvironmentVariablesHandlerInterceptorJUnitTest class is a test suite of test cases to test the contract
- * and functionality of the Spring HandlerInterceptor, EnvironmentVariablesHandlerInterceptor class.
- * 
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- * @since 8.0
- */
-@Category(UnitTest.class)
-public class EnvironmentVariablesHandlerInterceptorJUnitTest {
-
-  private Mockery mockContext;
-
-  @Before
-  public void setUp() {
-    mockContext = new Mockery();
-    mockContext.setImposteriser(ClassImposteriser.INSTANCE);
-    mockContext.setThreadingPolicy(new Synchroniser());
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  protected String createEnvironmentVariable(final String name) {
-    return (EnvironmentVariablesHandlerInterceptor.ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX + name);
-  }
-
-  protected <T> Enumeration<T> enumeration(final Iterator<T> iterator) {
-    return new Enumeration<T>() {
-      public boolean hasMoreElements() {
-        return iterator.hasNext();
-      }
-      public T nextElement() {
-        return iterator.next();
-      }
-    };
-  }
-
-  @Test
-  public void testPreHandleAfterCompletion() throws Exception {
-    final Map<String, String> requestParameters = new HashMap<>(2);
-    final Map<String, String> requestHeaders = new HashMap<>();
-
-    requestParameters.put("parameter", "one");
-    requestParameters.put(createEnvironmentVariable("variable"), "two");
-
-    final HttpServletRequest mockHttpRequest = mockContext.mock(HttpServletRequest.class, "testPreHandleAfterCompletion.HttpServletRequest");
-
-    mockContext.checking(new Expectations() {{
-      oneOf(mockHttpRequest).getParameterNames();
-      will(returnValue(enumeration(requestParameters.keySet().iterator())));
-      oneOf(mockHttpRequest).getHeaderNames();
-      will(returnValue(enumeration(requestHeaders.keySet().iterator())));
-      oneOf(mockHttpRequest).getParameter(with(equal(createEnvironmentVariable("variable"))));
-      will(returnValue(requestParameters.get(createEnvironmentVariable("variable"))));
-    }});
-
-    EnvironmentVariablesHandlerInterceptor handlerInterceptor = new EnvironmentVariablesHandlerInterceptor();
-
-    Map<String, String> envBefore = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-    assertNotNull(envBefore);
-    assertTrue(envBefore.isEmpty());
-    assertTrue(handlerInterceptor.preHandle(mockHttpRequest, null, null));
-
-    Map<String, String> envSet = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-    assertNotNull(envSet);
-    assertNotSame(envBefore, envSet);
-    assertEquals(1, envSet.size());
-    assertTrue(envSet.containsKey("variable"));
-    assertEquals("two", envSet.get("variable"));
-
-    handlerInterceptor.afterCompletion(mockHttpRequest, null, null, null);
-
-    Map<String, String> envAfter = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-    assertNotNull(envAfter);
-    assertTrue(envAfter.isEmpty());
-  }
-
-  @Test
-  public void testHandlerInterceptorThreadSafety() throws Throwable {
-    TestFramework.runOnce(new HandlerInterceptorThreadSafetyMultiThreadedTestCase());
-  }
-
-  protected final class HandlerInterceptorThreadSafetyMultiThreadedTestCase extends MultithreadedTestCase {
-
-    private EnvironmentVariablesHandlerInterceptor handlerInterceptor;
-
-    private HttpServletRequest mockHttpRequestOne;
-    private HttpServletRequest mockHttpRequestTwo;
-
-    @Override
-    public void initialize() {
-      super.initialize();
-
-      final Map<String, String> requestParametersOne = new HashMap<>(3);
-      final Map<String, String> requestHeaders = new HashMap<>();
-
-      requestParametersOne.put("param", "one");
-      requestParametersOne.put(createEnvironmentVariable("STAGE"), "test");
-      requestParametersOne.put(createEnvironmentVariable("GEMFIRE"), "/path/to/gemfire/700");
-
-      mockHttpRequestOne = mockContext.mock(HttpServletRequest.class, "testHandlerInterceptorThreadSafety.HttpServletRequest.1");
-
-      mockContext.checking(new Expectations() {{
-        oneOf(mockHttpRequestOne).getParameterNames();
-        will(returnValue(enumeration(requestParametersOne.keySet().iterator())));
-        oneOf(mockHttpRequestOne).getHeaderNames();
-        will(returnValue(enumeration(requestHeaders.keySet().iterator())));
-        oneOf(mockHttpRequestOne).getParameter(with(equal(createEnvironmentVariable("STAGE"))));
-        will(returnValue(requestParametersOne.get(createEnvironmentVariable("STAGE"))));
-        oneOf(mockHttpRequestOne).getParameter(with(equal(createEnvironmentVariable("GEMFIRE"))));
-        will(returnValue(requestParametersOne.get(createEnvironmentVariable("GEMFIRE"))));
-      }});
-
-      mockHttpRequestTwo = mockContext.mock(HttpServletRequest.class, "testHandlerInterceptorThreadSafety.HttpServletRequest.2");
-
-      final Map<String, String> requestParametersTwo = new HashMap<>(3);
-
-      requestParametersTwo.put("parameter", "two");
-      requestParametersTwo.put(createEnvironmentVariable("HOST"), "localhost");
-      requestParametersTwo.put(createEnvironmentVariable("GEMFIRE"), "/path/to/gemfire/75");
-
-      mockContext.checking(new Expectations() {{
-        oneOf(mockHttpRequestTwo).getParameterNames();
-        will(returnValue(enumeration(requestParametersTwo.keySet().iterator())));
-        oneOf(mockHttpRequestTwo).getHeaderNames();
-        will(returnValue(enumeration(requestHeaders.keySet().iterator())));
-        oneOf(mockHttpRequestTwo).getParameter(with(equal(createEnvironmentVariable("HOST"))));
-        will(returnValue(requestParametersTwo.get(createEnvironmentVariable("HOST"))));
-        oneOf(mockHttpRequestTwo).getParameter(with(equal(createEnvironmentVariable("GEMFIRE"))));
-        will(returnValue(requestParametersTwo.get(createEnvironmentVariable("GEMFIRE"))));
-      }});
-
-      handlerInterceptor =  new EnvironmentVariablesHandlerInterceptor();
-    }
-
-    public void thread1() throws Exception {
-      assertTick(0);
-      Thread.currentThread().setName("HTTP Request Processing Thread 1");
-
-      Map<String, String> env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertTrue(env.isEmpty());
-      assertTrue(handlerInterceptor.preHandle(mockHttpRequestOne, null, null));
-
-      env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertEquals(2, env.size());
-      assertFalse(env.containsKey("param"));
-      assertFalse(env.containsKey("parameter"));
-      assertFalse(env.containsKey("HOST"));
-      assertEquals("test", env.get("STAGE"));
-      assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
-
-      waitForTick(2);
-
-      env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertEquals(2, env.size());
-      assertFalse(env.containsKey("param"));
-      assertFalse(env.containsKey("parameter"));
-      assertFalse(env.containsKey("HOST"));
-      assertEquals("test", env.get("STAGE"));
-      assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
-
-      waitForTick(4);
-
-      env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertEquals(2, env.size());
-      assertFalse(env.containsKey("param"));
-      assertFalse(env.containsKey("parameter"));
-      assertFalse(env.containsKey("HOST"));
-      assertEquals("test", env.get("STAGE"));
-      assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
-
-      handlerInterceptor.afterCompletion(mockHttpRequestOne, null, null, null);
-
-      env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertTrue(env.isEmpty());
-    }
-
-    public void thread2() throws Exception {
-      assertTick(0);
-      Thread.currentThread().setName("HTTP Request Processing Thread 2");
-      waitForTick(1);
-
-      Map<String, String> env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertTrue(env.isEmpty());
-      assertTrue(handlerInterceptor.preHandle(mockHttpRequestTwo, null, null));
-
-      env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertEquals(2, env.size());
-      assertFalse(env.containsKey("parameter"));
-      assertFalse(env.containsKey("param"));
-      assertFalse(env.containsKey("STAGE"));
-      assertEquals("localhost", env.get("HOST"));
-      assertEquals("/path/to/gemfire/75", env.get("GEMFIRE"));
-
-      waitForTick(3);
-
-      handlerInterceptor.afterCompletion(mockHttpRequestTwo, null, null, null);
-
-      env = EnvironmentVariablesHandlerInterceptor.getEnvironment();
-
-      assertNotNull(env);
-      assertTrue(env.isEmpty());
-    }
-
-    @Override
-    public void finish() {
-      super.finish();
-      handlerInterceptor = null;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7c38f0d8/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java b/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
new file mode 100644
index 0000000..ef405db
--- /dev/null
+++ b/geode-web/src/test/java/com/gemstone/gemfire/management/internal/web/controllers/support/LoginHandlerInterceptorJUnitTest.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.web.controllers.support;
+
+import static org.junit.Assert.*;
+
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import javax.servlet.http.HttpServletRequest;
+
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+
+import edu.umd.cs.mtc.MultithreadedTestCase;
+import edu.umd.cs.mtc.TestFramework;
+
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.concurrent.Synchroniser;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * The LoginHandlerInterceptorJUnitTest class is a test suite of test cases to test the contract
+ * and functionality of the Spring HandlerInterceptor, LoginHandlerInterceptor class.
+ * 
+ * @see org.jmock.Mockery
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ * @since 8.0
+ */
+@Category(UnitTest.class)
+public class LoginHandlerInterceptorJUnitTest {
+
+  private Mockery mockContext;
+
+  @Before
+  public void setUp() {
+    mockContext = new Mockery();
+    mockContext.setImposteriser(ClassImposteriser.INSTANCE);
+    mockContext.setThreadingPolicy(new Synchroniser());
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  protected String createEnvironmentVariable(final String name) {
+    return (LoginHandlerInterceptor.ENVIRONMENT_VARIABLE_REQUEST_PARAMETER_PREFIX + name);
+  }
+
+  protected <T> Enumeration<T> enumeration(final Iterator<T> iterator) {
+    return new Enumeration<T>() {
+      public boolean hasMoreElements() {
+        return iterator.hasNext();
+      }
+      public T nextElement() {
+        return iterator.next();
+      }
+    };
+  }
+
+  @Test
+  public void testPreHandleAfterCompletion() throws Exception {
+    final Map<String, String> requestParameters = new HashMap<>(2);
+    final Map<String, String> requestHeaders = new HashMap<>();
+
+    requestParameters.put("parameter", "one");
+    requestParameters.put(createEnvironmentVariable("variable"), "two");
+
+    final HttpServletRequest mockHttpRequest = mockContext.mock(HttpServletRequest.class, "testPreHandleAfterCompletion.HttpServletRequest");
+
+    mockContext.checking(new Expectations() {{
+      oneOf(mockHttpRequest).getParameterNames();
+      will(returnValue(enumeration(requestParameters.keySet().iterator())));
+      oneOf(mockHttpRequest).getHeaderNames();
+      will(returnValue(enumeration(requestHeaders.keySet().iterator())));
+      oneOf(mockHttpRequest).getParameter(with(equal(createEnvironmentVariable("variable"))));
+      will(returnValue(requestParameters.get(createEnvironmentVariable("variable"))));
+    }});
+
+    LoginHandlerInterceptor handlerInterceptor = new LoginHandlerInterceptor();
+
+    Map<String, String> envBefore = LoginHandlerInterceptor.getEnvironment();
+
+    assertNotNull(envBefore);
+    assertTrue(envBefore.isEmpty());
+    assertTrue(handlerInterceptor.preHandle(mockHttpRequest, null, null));
+
+    Map<String, String> envSet = LoginHandlerInterceptor.getEnvironment();
+
+    assertNotNull(envSet);
+    assertNotSame(envBefore, envSet);
+    assertEquals(1, envSet.size());
+    assertTrue(envSet.containsKey("variable"));
+    assertEquals("two", envSet.get("variable"));
+
+    handlerInterceptor.afterCompletion(mockHttpRequest, null, null, null);
+
+    Map<String, String> envAfter = LoginHandlerInterceptor.getEnvironment();
+
+    assertNotNull(envAfter);
+    assertTrue(envAfter.isEmpty());
+  }
+
+  @Test
+  public void testHandlerInterceptorThreadSafety() throws Throwable {
+    TestFramework.runOnce(new HandlerInterceptorThreadSafetyMultiThreadedTestCase());
+  }
+
+  protected final class HandlerInterceptorThreadSafetyMultiThreadedTestCase extends MultithreadedTestCase {
+
+    private LoginHandlerInterceptor handlerInterceptor;
+
+    private HttpServletRequest mockHttpRequestOne;
+    private HttpServletRequest mockHttpRequestTwo;
+
+    @Override
+    public void initialize() {
+      super.initialize();
+
+      final Map<String, String> requestParametersOne = new HashMap<>(3);
+      final Map<String, String> requestHeaders = new HashMap<>();
+
+      requestParametersOne.put("param", "one");
+      requestParametersOne.put(createEnvironmentVariable("STAGE"), "test");
+      requestParametersOne.put(createEnvironmentVariable("GEMFIRE"), "/path/to/gemfire/700");
+
+      mockHttpRequestOne = mockContext.mock(HttpServletRequest.class, "testHandlerInterceptorThreadSafety.HttpServletRequest.1");
+
+      mockContext.checking(new Expectations() {{
+        oneOf(mockHttpRequestOne).getParameterNames();
+        will(returnValue(enumeration(requestParametersOne.keySet().iterator())));
+        oneOf(mockHttpRequestOne).getHeaderNames();
+        will(returnValue(enumeration(requestHeaders.keySet().iterator())));
+        oneOf(mockHttpRequestOne).getParameter(with(equal(createEnvironmentVariable("STAGE"))));
+        will(returnValue(requestParametersOne.get(createEnvironmentVariable("STAGE"))));
+        oneOf(mockHttpRequestOne).getParameter(with(equal(createEnvironmentVariable("GEMFIRE"))));
+        will(returnValue(requestParametersOne.get(createEnvironmentVariable("GEMFIRE"))));
+      }});
+
+      mockHttpRequestTwo = mockContext.mock(HttpServletRequest.class, "testHandlerInterceptorThreadSafety.HttpServletRequest.2");
+
+      final Map<String, String> requestParametersTwo = new HashMap<>(3);
+
+      requestParametersTwo.put("parameter", "two");
+      requestParametersTwo.put(createEnvironmentVariable("HOST"), "localhost");
+      requestParametersTwo.put(createEnvironmentVariable("GEMFIRE"), "/path/to/gemfire/75");
+
+      mockContext.checking(new Expectations() {{
+        oneOf(mockHttpRequestTwo).getParameterNames();
+        will(returnValue(enumeration(requestParametersTwo.keySet().iterator())));
+        oneOf(mockHttpRequestTwo).getHeaderNames();
+        will(returnValue(enumeration(requestHeaders.keySet().iterator())));
+        oneOf(mockHttpRequestTwo).getParameter(with(equal(createEnvironmentVariable("HOST"))));
+        will(returnValue(requestParametersTwo.get(createEnvironmentVariable("HOST"))));
+        oneOf(mockHttpRequestTwo).getParameter(with(equal(createEnvironmentVariable("GEMFIRE"))));
+        will(returnValue(requestParametersTwo.get(createEnvironmentVariable("GEMFIRE"))));
+      }});
+
+      handlerInterceptor =  new LoginHandlerInterceptor();
+    }
+
+    public void thread1() throws Exception {
+      assertTick(0);
+      Thread.currentThread().setName("HTTP Request Processing Thread 1");
+
+      Map<String, String> env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertTrue(env.isEmpty());
+      assertTrue(handlerInterceptor.preHandle(mockHttpRequestOne, null, null));
+
+      env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertEquals(2, env.size());
+      assertFalse(env.containsKey("param"));
+      assertFalse(env.containsKey("parameter"));
+      assertFalse(env.containsKey("HOST"));
+      assertEquals("test", env.get("STAGE"));
+      assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
+
+      waitForTick(2);
+
+      env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertEquals(2, env.size());
+      assertFalse(env.containsKey("param"));
+      assertFalse(env.containsKey("parameter"));
+      assertFalse(env.containsKey("HOST"));
+      assertEquals("test", env.get("STAGE"));
+      assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
+
+      waitForTick(4);
+
+      env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertEquals(2, env.size());
+      assertFalse(env.containsKey("param"));
+      assertFalse(env.containsKey("parameter"));
+      assertFalse(env.containsKey("HOST"));
+      assertEquals("test", env.get("STAGE"));
+      assertEquals("/path/to/gemfire/700", env.get("GEMFIRE"));
+
+      handlerInterceptor.afterCompletion(mockHttpRequestOne, null, null, null);
+
+      env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertTrue(env.isEmpty());
+    }
+
+    public void thread2() throws Exception {
+      assertTick(0);
+      Thread.currentThread().setName("HTTP Request Processing Thread 2");
+      waitForTick(1);
+
+      Map<String, String> env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertTrue(env.isEmpty());
+      assertTrue(handlerInterceptor.preHandle(mockHttpRequestTwo, null, null));
+
+      env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertEquals(2, env.size());
+      assertFalse(env.containsKey("parameter"));
+      assertFalse(env.containsKey("param"));
+      assertFalse(env.containsKey("STAGE"));
+      assertEquals("localhost", env.get("HOST"));
+      assertEquals("/path/to/gemfire/75", env.get("GEMFIRE"));
+
+      waitForTick(3);
+
+      handlerInterceptor.afterCompletion(mockHttpRequestTwo, null, null, null);
+
+      env = LoginHandlerInterceptor.getEnvironment();
+
+      assertNotNull(env);
+      assertTrue(env.isEmpty());
+    }
+
+    @Override
+    public void finish() {
+      super.finish();
+      handlerInterceptor = null;
+    }
+  }
+
+}


[34/63] [abbrv] incubator-geode git commit: GEODE-17: geode-pulse test depends on geode-core test

Posted by kl...@apache.org.
GEODE-17: geode-pulse test depends on geode-core test


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f446bbe8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f446bbe8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f446bbe8

Branch: refs/heads/feature/GEODE-1276
Commit: f446bbe8987b389e2263e04edd4df2471afd5ca2
Parents: 3d8f54c
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Apr 28 13:25:36 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Thu Apr 28 13:25:36 2016 -0700

----------------------------------------------------------------------
 geode-pulse/build.gradle | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f446bbe8/geode-pulse/build.gradle
----------------------------------------------------------------------
diff --git a/geode-pulse/build.gradle b/geode-pulse/build.gradle
index 5d8e890..e53a698 100755
--- a/geode-pulse/build.gradle
+++ b/geode-pulse/build.gradle
@@ -70,7 +70,7 @@ dependencies {
 
   testCompile project(':geode-junit')
   testCompile project(':geode-core')
-  testCompile project(path: ':geode-core', configuration: 'testOutput')
+  testCompile files(project(':geode-core').sourceSets.test.output)
 
   testCompile 'org.seleniumhq.selenium:selenium-firefox-driver:' + project.'selenium.version'
   testCompile 'org.seleniumhq.selenium:selenium-api:' + project.'selenium.version'


[10/63] [abbrv] incubator-geode git commit: GEODE-1072: Removing HDFS related code

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
deleted file mode 100644
index e8abb38..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
+++ /dev/null
@@ -1,2004 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.gemstone.gemfire.internal.hll.CardinalityMergeException;
-import com.gemstone.gemfire.internal.hll.HyperLogLog;
-import com.gemstone.gemfire.internal.hll.ICardinality;
-import com.gemstone.gemfire.internal.hll.MurmurHash;
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.ShutdownHookManager;
-
-import com.gemstone.gemfire.InternalGemFireException;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager.CompactionRequest;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReader;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReaderActivityListener;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.Meta;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
-import com.gemstone.gemfire.internal.HeapDataOutputStream;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
-import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics.IOOperation;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.concurrent.ConcurrentHashSet;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * Manages sorted oplog files for a bucket. An instance per bucket will exist in
- * each PR
- * 
- */
-public class HdfsSortedOplogOrganizer extends AbstractHoplogOrganizer<SortedHoplogPersistedEvent> {
-  public static final int AVG_NUM_KEYS_PER_INDEX_BLOCK = 200;
-  
-  // all valid sorted hoplogs will follow the following name pattern
-  public static final String SORTED_HOPLOG_REGEX = HOPLOG_NAME_REGEX + "("
-      + FLUSH_HOPLOG_EXTENSION + "|" + MINOR_HOPLOG_EXTENSION + "|"
-      + MAJOR_HOPLOG_EXTENSION + ")";
-  public static final Pattern SORTED_HOPLOG_PATTERN = Pattern.compile(SORTED_HOPLOG_REGEX);
-  
-  //Amount of time before deleting old temporary files
-  final long TMP_FILE_EXPIRATION_TIME_MS = Long.getLong(HoplogConfig.TMP_FILE_EXPIRATION, HoplogConfig.TMP_FILE_EXPIRATION_DEFAULT);
-  
-  static float RATIO = HoplogConfig.COMPACTION_FILE_RATIO_DEFAULT;
-
-  // Compacter for this bucket
-  private Compactor compacter;
-    
-  private final HoplogReadersController hoplogReadersController;
-  private AtomicLong previousCleanupTimestamp = new AtomicLong(Long.MIN_VALUE);
-
-  /**
-   * The default HLL constant. gives an accuracy of about 3.25%
-   * public only for testing upgrade from 1.3 to 1.4
-   */
-  public static double HLL_CONSTANT = 0.03;
-  /**
-   * This estimator keeps track of this buckets entry count. This value is
-   * affected by flush and compaction cycles
-   */
-  private volatile ICardinality bucketSize = new HyperLogLog(HLL_CONSTANT);
-  //A set of tmp files that existed when this bucket organizer was originally
-  //created. These may still be open by the old primary, or they may be
-  //abandoned files.
-  private LinkedList<FileStatus> oldTmpFiles;
-
-  private ConcurrentMap<Hoplog, Boolean> tmpFiles = new ConcurrentHashMap<Hoplog, Boolean>();
-
-  protected volatile boolean organizerClosed = false;
-
-  /**
-   * For the 1.4 release we are changing the HLL_CONSTANT which will make the
-   * old persisted HLLs incompatible with the new HLLs. To fix this we will
-   * force a major compaction when the system starts up so that we will only
-   * have new HLLs in the system (see bug 51403)
-   */
-  private boolean startCompactionOnStartup = false;
-
-  /**
-   * @param region
-   *          Region manager instance. Instances of hdfs listener instance,
-   *          stats collector, file system, etc are shared by all buckets of a
-   *          region and provided by region manager instance
-   * @param bucketId bucket id to be managed by this organizer
-   * @throws IOException
-   */
-  public HdfsSortedOplogOrganizer(HdfsRegionManager region, int bucketId) throws IOException{
-    super(region, bucketId);
-    
-    String val = System.getProperty(HoplogConfig.COMPACTION_FILE_RATIO);
-    try {
-      RATIO = Float.parseFloat(val);
-    } catch (Exception e) {
-    }
-
-    hoplogReadersController = new HoplogReadersController();
-    
-    // initialize with all the files in the directory
-    List<Hoplog> hoplogs = identifyAndLoadSortedOplogs(true);
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Initializing bucket with existing hoplogs, count = " + hoplogs.size(), logPrefix);
-    }
-    for (Hoplog hoplog : hoplogs) {
-      addSortedOplog(hoplog, false, true);
-    }
-
-    // initialize sequence to the current maximum
-    sequence = new AtomicInteger(findMaxSequenceNumber(hoplogs));
-    
-    initOldTmpFiles();
-    
-    FileSystem fs = store.getFileSystem();
-    Path cleanUpIntervalPath = new Path(store.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME); 
-    if (!fs.exists(cleanUpIntervalPath)) {
-      long intervalDurationMillis = store.getPurgeInterval() * 60 * 1000;
-      HoplogUtil.exposeCleanupIntervalMillis(fs, cleanUpIntervalPath, intervalDurationMillis);
-    }
-
-    if (startCompactionOnStartup) {
-      forceCompactionOnVersionUpgrade();
-      if (logger.isInfoEnabled()) {
-        logger.info(LocalizedStrings.HOPLOG_MAJOR_COMPACTION_SCHEDULED_FOR_BETTER_ESTIMATE);
-      }
-    }
-  }
-
-  /**
-   * Iterates on the input buffer and persists it in a new sorted oplog. This operation is
-   * synchronous and blocks the thread.
-   */
-  @Override
-  public void flush(Iterator<? extends QueuedPersistentEvent> iterator, final int count)
-      throws IOException, ForceReattemptException {
-    assert iterator != null;
-
-    if (logger.isDebugEnabled())
-      logger.debug("{}Initializing flush operation", logPrefix);
-    
-    final Hoplog so = getTmpSortedOplog(null, FLUSH_HOPLOG_EXTENSION);
-    HoplogWriter writer = null;
-    ICardinality localHLL = new HyperLogLog(HLL_CONSTANT);
-    
-    // variables for updating stats
-    long start = stats.getFlush().begin();
-    int byteCount = 0;
-    
-    try {
-      /**MergeGemXDHDFSToGFE changed the following statement as the code of HeapDataOutputStream is not merged */
-      //HeapDataOutputStream out = new HeapDataOutputStream();
-      
-      try {
-        writer = this.store.getSingletonWriter().runSerially(new Callable<Hoplog.HoplogWriter>() {
-          @Override
-          public HoplogWriter call() throws Exception {
-            return so.createWriter(count);
-          }
-        });
-      } catch (Exception e) {
-        if (e instanceof IOException) {
-          throw (IOException)e;
-        }
-        throw new IOException(e);
-      }
-
-      while (iterator.hasNext() && !this.organizerClosed) {
-        HeapDataOutputStream out = new HeapDataOutputStream(1024, null);
-        
-        QueuedPersistentEvent item = iterator.next();
-        item.toHoplogEventBytes(out);
-        byte[] valueBytes = out.toByteArray();
-        writer.append(item.getRawKey(), valueBytes);
-        
-        // add key length and value length to stats byte counter
-        byteCount += (item.getRawKey().length + valueBytes.length);
-
-        // increment size only if entry is not deleted
-        if (!isDeletedEntry(valueBytes, 0)) {
-          int hash = MurmurHash.hash(item.getRawKey());
-          localHLL.offerHashed(hash);
-        }
-        /**MergeGemXDHDFSToGFE how to clear for reuse. Leaving it for Darrel to merge this change*/
-        //out.clearForReuse();
-      }
-      if (organizerClosed)
-        throw new BucketMovedException("The current bucket is moved BucketID: "+  
-            this.bucketId + " Region name: " +  this.regionManager.getRegion().getName());
-      
-      // append completed. provide cardinality and close writer
-      writer.close(buildMetaData(localHLL));
-      writer = null;
-    } catch (IOException e) {
-      stats.getFlush().error(start);
-      try {
-        e = handleWriteHdfsIOError(writer, so, e);
-      } finally {
-        //Set the writer to null because handleWriteHDFSIOError has
-        //already closed the writer.
-        writer = null;
-      }
-      throw e;
-    } catch (BucketMovedException e) {
-      stats.getFlush().error(start);
-      deleteTmpFile(writer, so);
-      writer = null;
-      throw e;
-    } finally {
-      if (writer != null) {
-        writer.close();
-      }
-    }
-
-    try{
-      
-      // ping secondaries before making the file a legitimate file to ensure 
-      // that in case of split brain, no other vm has taken up as primary. #50110.  
-      pingSecondaries();
-      
-      // rename file and check if renaming was successful
-      synchronized (changePrimarylockObject) {
-        if (!organizerClosed)
-          makeLegitimate(so);
-        else 
-          throw new BucketMovedException("The current bucket is moved BucketID: "+  
-              this.bucketId + " Region name: " +  this.regionManager.getRegion().getName());
-      }
-      try {
-        so.getSize();
-      } catch (IllegalStateException e) {
-        throw new IOException("Failed to rename hoplog file:" + so.getFileName());
-      }
-      
-      //Disabling this assertion due to bug 49740
-      // check to make sure the sequence number is correct
-//      if (ENABLE_INTEGRITY_CHECKS) {
-//        Assert.assertTrue(getSequenceNumber(so) == findMaxSequenceNumber(identifyAndLoadSortedOplogs(false)), 
-//            "Invalid sequence number detected for " + so.getFileName());
-//      }
-      
-      // record the file for future maintenance and reads
-      addSortedOplog(so, false, true);
-      stats.getFlush().end(byteCount, start);
-      incrementDiskUsage(so.getSize());
-    } catch (BucketMovedException e) {
-      stats.getFlush().error(start);
-      deleteTmpFile(writer, so);
-      writer = null;
-      throw e;
-    } catch (IOException e) {
-      stats.getFlush().error(start);
-      logger.warn(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e);
-      throw e;
-    }
-
-    submitCompactionRequests();
-  }
-
-
-  /**
-   * store cardinality information in metadata
-   * @param localHLL the hll estimate for this hoplog only
-   */
-  private EnumMap<Meta, byte[]> buildMetaData(ICardinality localHLL) throws IOException {
-    EnumMap<Meta, byte[]> map = new EnumMap<Hoplog.Meta, byte[]>(Meta.class);
-    map.put(Meta.LOCAL_CARDINALITY_ESTIMATE_V2, localHLL.getBytes());
-    return map;
-  }
-
-  private void submitCompactionRequests() throws IOException {
-    CompactionRequest req;
-    
-    // determine if a major compaction is needed and create a compaction request
-    // with compaction manager
-    if (store.getMajorCompaction()) {
-      if (isMajorCompactionNeeded()) {
-        req = new CompactionRequest(regionFolder, bucketId, getCompactor(), true);
-        HDFSCompactionManager.getInstance(store).submitRequest(req);
-      }
-    }
-    
-    // submit a minor compaction task. It will be ignored if there is no work to
-    // be done.
-    if (store.getMinorCompaction()) {
-      req = new CompactionRequest(regionFolder, bucketId, getCompactor(), false);
-      HDFSCompactionManager.getInstance(store).submitRequest(req);
-    }
-  }
-
-  /**
-   * @return true if the oldest hoplog was created 1 major compaction interval ago
-   */
-  private boolean isMajorCompactionNeeded() throws IOException {
-    // major compaction interval in milliseconds
-    
-    long majorCInterval = ((long)store.getMajorCompactionInterval()) * 60 * 1000;
-
-    Hoplog oplog = hoplogReadersController.getOldestHoplog();
-    if (oplog == null) {
-      return false;
-    }
-    
-    long oldestFileTime = oplog.getModificationTimeStamp();
-    long now = System.currentTimeMillis();
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Checking oldest hop " + oplog.getFileName()
-          + " for majorCompactionInterval=" + majorCInterval
-          + " + now=" + now, logPrefix);
-    }
-    if (oldestFileTime > 0l && oldestFileTime < (now - majorCInterval)) {
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public SortedHoplogPersistedEvent read(byte[] key) throws IOException {
-    long startTime = stats.getRead().begin();
-    String user = logger.isDebugEnabled() ? "Read" : null;
-    
-    // collect snapshot of hoplogs
-    List<TrackedReference<Hoplog>> hoplogs = null;
-    hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
-    try {
-      // search for the key in order starting with the youngest oplog
-      for (TrackedReference<Hoplog> hoplog : hoplogs) {
-        HoplogReader reader = hoplog.get().getReader();
-        byte[] val = reader.read(key);
-        if (val != null) {
-          // value found in a younger hoplog. stop iteration
-          SortedHoplogPersistedEvent eventObj = deserializeValue(val);
-          stats.getRead().end(val.length, startTime);
-          return eventObj;
-        }
-      }
-    } catch (IllegalArgumentException e) {
-      if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
-        throw handleIOError((IOException) e.getCause());
-      } else {
-        throw e;
-      }
-    } catch (IOException e) {
-      throw handleIOError(e);
-    } catch (HDFSIOException e) {
-        throw handleIOError(e);
-    } finally {
-      hoplogReadersController.releaseHoplogs(hoplogs, user);
-    }
-    
-    stats.getRead().end(0, startTime);
-    return null;
-  }
-
-  protected IOException handleIOError(IOException e) {
-    // expose the error wrapped inside remote exception
-    if (e instanceof RemoteException) {
-      return ((RemoteException) e).unwrapRemoteException();
-    } 
-    
-    checkForSafeError(e);
-    
-    // it is not a safe error. let the caller handle it
-    return e;
-  }
-  
-  protected HDFSIOException handleIOError(HDFSIOException e) {
-    checkForSafeError(e);
-    return e;
-  }
-
-  protected void checkForSafeError(Exception e) {
-    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
-    if (safeError) {
-      // IOException because of closed file system. This happens when member is
-      // shutting down
-      if (logger.isDebugEnabled())
-        logger.debug("IO error caused by filesystem shutdown", e);
-      throw new CacheClosedException("IO error caused by filesystem shutdown", e);
-    } 
-
-    if(isClosed()) {
-      //If the hoplog organizer is closed, throw an exception to indicate the 
-      //caller should retry on the new primary.
-      throw new PrimaryBucketException(e);
-    }
-  }
-  
-  protected IOException handleWriteHdfsIOError(HoplogWriter writer, Hoplog so, IOException e)
-      throws IOException {
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Handle write error:" + so, logPrefix);
-    }
-    
-    closeWriter(writer);
-    // add to the janitor queue
-    tmpFiles.put(so, Boolean.TRUE);
-
-    return handleIOError(e);
-  }
-
-  private void deleteTmpFile(HoplogWriter writer, Hoplog so) {
-    closeWriter(writer);
-    
-    // delete the temporary hoplog
-    try {
-      if (so != null) {
-        so.delete();
-      }
-    } catch (IOException e1) {
-      logger.info(e1);
-    }
-  }
-
-  private void closeWriter(HoplogWriter writer) {
-    if (writer != null) {
-      // close writer before deleting it
-      try {
-        writer.close();
-      } catch (Throwable e1) {
-        // error to close hoplog will happen if no connections to datanode are
-        // available. Try to delete the file on namenode
-        if(!isClosed()) {
-          logger.info(e1);
-        }
-      }
-    }
-  }
-
-  /**
-   * Closes hoplog and suppresses IO during reader close. Suppressing IO errors
-   * when the organizer is closing or an hoplog becomes inactive lets the system
-   * continue freeing other resources. It could potentially lead to socket
-   * leaks though.
-   */
-  private void closeReaderAndSuppressError(Hoplog hoplog, boolean clearCache) {
-    try {
-      hoplog.close();
-    } catch (IOException e) {
-      // expose the error wrapped inside remote exception
-      if (e instanceof RemoteException) {
-        e = ((RemoteException) e).unwrapRemoteException();
-      } 
-      logger.info(e);
-    }
-  }
-
-  @Override
-  public BucketIterator scan() throws IOException {
-    String user = logger.isDebugEnabled() ? "Scan" : null;
-    List<TrackedReference<Hoplog>> hoplogs = null;
-    BucketIterator iter = null;
-    try {
-      hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
-      iter = new BucketIterator(hoplogs);
-      return iter;
-    }  finally {
-      // Normally the hoplogs will be released when the iterator is closed. The
-      // hoplogs must be released only if creating the iterator has failed.
-      if (iter == null) {
-        hoplogReadersController.releaseHoplogs(hoplogs, user);
-      }
-    }
-  }
-
-  @Override
-  public BucketIterator scan(byte[] from, byte[] to) throws IOException {
-    throw new NotImplementedException();
-  }
-
-  @Override
-  public BucketIterator scan(byte[] from, boolean fromInclusive, byte[] to, boolean toInclusive) throws IOException {
-    throw new NotImplementedException();
-  }
-
-  @Override
-  public HoplogIterator<byte[], SortedHoplogPersistedEvent> scan(
-      long startOffset, long length) throws IOException {
-    throw new UnsupportedOperationException("Not supported for " + this.getClass().getSimpleName());
-  }
-
-  @Override
-  public void close() throws IOException {
-    super.close();
-    
-    synchronized (changePrimarylockObject) {
-      organizerClosed = true;
-    }
-    //Suspend compaction
-    getCompactor().suspend();
-    
-    //Close the readers controller.
-    hoplogReadersController.close();
-    
-    previousCleanupTimestamp.set(Long.MIN_VALUE);
-    
-  }
-
-  /**
-   * This method call will happen on secondary node. The secondary node needs to update its data
-   * structures
-   */
-  @Override
-  public void hoplogCreated(String region, int bucketId, Hoplog... oplogs)
-      throws IOException {
-    for (Hoplog oplog : oplogs) {
-      addSortedOplog(oplog, false, true);
-    }
-  }
-
-  @Override
-  public long sizeEstimate() {
-    return this.bucketSize.cardinality();
-  }
-
-  private void addSortedOplog(Hoplog so, boolean notify, boolean addsToBucketSize)
-  throws IOException {
-    if (!hoplogReadersController.addSortedOplog(so)) {
-      so.close();
-      throw new InternalGemFireException("Failed to add " + so);
-    }
-
-    String user = logger.isDebugEnabled() ? "Add" : null;
-    if (addsToBucketSize) {
-      TrackedReference<Hoplog> ref = null;
-      try {
-        ref = hoplogReadersController.trackHoplog(so, user);
-        synchronized (bucketSize) {
-          ICardinality localHLL = ref.get().getEntryCountEstimate();
-          if (localHLL != null) {
-            bucketSize = mergeHLL(bucketSize, localHLL);
-          }
-        }
-      } finally {
-        if (ref != null) {
-          hoplogReadersController.releaseHoplog(ref, user);
-        }
-      }
-    }
-
-    if (notify && listener != null) {
-      listener.hoplogCreated(regionFolder, bucketId, so);
-    }
-  }
-
-  private void reEstimateBucketSize() throws IOException {
-    ICardinality global = null;
-    String user = logger.isDebugEnabled() ? "HLL" : null;
-    List<TrackedReference<Hoplog>> hoplogs = null;
-    try {
-      hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
-      global = new HyperLogLog(HLL_CONSTANT);
-      for (TrackedReference<Hoplog> hop : hoplogs) {
-        global = mergeHLL(global, hop.get().getEntryCountEstimate());
-      }
-    } finally {
-      hoplogReadersController.releaseHoplogs(hoplogs, user);
-    }
-    bucketSize = global;
-  }
-
-  protected ICardinality mergeHLL(ICardinality global, ICardinality local)
-  /*throws IOException*/ {
-    try {
-      return global.merge(local);
-    } catch (CardinalityMergeException e) {
-      // uncomment this after the 1.4 release
-      //throw new InternalGemFireException(e.getLocalizedMessage(), e);
-      startCompactionOnStartup = true;
-      return global;
-    }
-  }
-
-  private void removeSortedOplog(TrackedReference<Hoplog> so, boolean notify) throws IOException {
-    hoplogReadersController.removeSortedOplog(so);
-    
-    // release lock before notifying listeners
-    if (notify && listener != null) {
-      listener.hoplogDeleted(regionFolder, bucketId, so.get());
-    }
-  }
-  
-  private void notifyCompactionListeners(boolean isMajor) {
-    listener.compactionCompleted(regionFolder, bucketId, isMajor);
-  }
-  
-  /**
-   * This method call will happen on secondary node. The secondary node needs to update its data
-   * structures
-   * @throws IOException 
-   */
-  @Override
-  public void hoplogDeleted(String region, int bucketId, Hoplog... oplogs) throws IOException {
-    throw new NotImplementedException();
-  }
-
-  @Override
-  public synchronized Compactor getCompactor() {
-    if (compacter == null) {
-      compacter = new HoplogCompactor();
-    }
-    return compacter;
-  }
-
-  @Override
-  protected Hoplog getHoplog(Path hoplogPath) throws IOException {
-    Hoplog so = new HFileSortedOplog(store, hoplogPath, store.getBlockCache(), stats, store.getStats());
-    return so;
-  }
-
-  /**
-   * locks sorted oplogs collection, removes oplog and renames for deletion later
-   * @throws IOException 
-   */
-  void markSortedOplogForDeletion(List<TrackedReference<Hoplog>> targets, boolean notify) throws IOException {
-    for (int i = targets.size(); i > 0; i--) {
-      TrackedReference<Hoplog> so = targets.get(i - 1);
-      removeSortedOplog(so, true);
-      if (!store.getFileSystem().exists(new Path(bucketPath, so.get().getFileName()))) {
-        // the hoplog does not even exist on file system. Skip remaining steps
-        continue;
-      }
-      addExpiryMarkerForAFile(so.get());
-    }
-  }
-  
-  /**
-   * Deletes expired hoplogs and expiry markers from the file system. Calculates
-   * a target timestamp based on cleanup interval. Then gets list of target
-   * hoplogs. It also updates the disk usage state
-   * 
-   * @return number of files deleted
-   */
-   synchronized int initiateCleanup() throws IOException {
-    int conf = store.getPurgeInterval();
-    // minutes to milliseconds
-    long intervalDurationMillis = conf * 60 * 1000;
-    // Any expired hoplog with timestamp less than targetTS is a delete
-    // candidate.
-    long targetTS = System.currentTimeMillis() - intervalDurationMillis;
-    if (logger.isDebugEnabled()) {
-      logger.debug("Target timestamp for expired hoplog deletion " + targetTS, logPrefix);
-    }
-    // avoid too frequent cleanup invocations. Exit cleanup invocation if the
-    // previous cleanup was executed within 10% range of cleanup interval
-    if (previousCleanupTimestamp.get() > targetTS
-        && (previousCleanupTimestamp.get() - targetTS) < (intervalDurationMillis / 10)) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("Skip cleanup, previous " + previousCleanupTimestamp.get(), logPrefix);
-      }
-      return 0;
-    }
-
-    List<FileStatus> targets = getOptimizationTargets(targetTS);
-    return deleteExpiredFiles(targets);
-  }
-
-  protected int deleteExpiredFiles(List<FileStatus> targets) throws IOException {
-    if (targets == null) {
-      return 0;
-    }
-
-    for (FileStatus file : targets) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Deleting file: " + file.getPath(), logPrefix);
-      }
-      store.getFileSystem().delete(file.getPath(), false);
-      
-      if (isClosed()) {
-        if (logger.isDebugEnabled())
-          logger.debug("{}Expiry file cleanup interupted by bucket close", logPrefix);
-        return 0;
-      }
-      incrementDiskUsage(-1 * file.getLen());
-    }
-
-    previousCleanupTimestamp.set(System.currentTimeMillis());
-    return targets.size();
-  }
-
-  /**
-   * @param ts
-   *          target timestamp
-   * @return list of hoplogs, whose expiry markers were created before target
-   *         timestamp, and the expiry marker itself.
-   * @throws IOException
-   */
-  protected List<FileStatus> getOptimizationTargets(long ts) throws IOException {
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Identifying optimization targets " + ts, logPrefix);
-    }
-
-    List<FileStatus> deleteTargets = new ArrayList<FileStatus>();
-    FileStatus[] markers = getExpiryMarkers();
-    if (markers != null) {
-      for (FileStatus marker : markers) {
-        String name = truncateExpiryExtension(marker.getPath().getName());
-        long timestamp = marker.getModificationTime();
-
-        // expired minor compacted files are not being used anywhere. These can
-        // be removed immediately. All the other expired files should be removed
-        // when the files have aged
-        boolean isTarget = false;
-        
-        if (name.endsWith(MINOR_HOPLOG_EXTENSION)) {
-          isTarget = true;
-        } else if (timestamp < ts && name.endsWith(FLUSH_HOPLOG_EXTENSION)) {
-          isTarget = true;
-        } else if (timestamp < ts && name.endsWith(MAJOR_HOPLOG_EXTENSION)) {
-          long majorCInterval = ((long)store.getMajorCompactionInterval()) * 60 * 1000;
-          if (timestamp < (System.currentTimeMillis() - majorCInterval)) {
-            isTarget = true;
-          }
-        }
-        if (!isTarget) {
-          continue;
-        }
-        
-        // if the file is still being read, do not delete or rename it
-        TrackedReference<Hoplog> used = hoplogReadersController.getInactiveHoplog(name);
-        if (used != null) {
-          if (used.inUse() && logger.isDebugEnabled()) {
-            logger.debug("{}Optimizer: found active expired hoplog:" + name, logPrefix);
-          } else if (logger.isDebugEnabled()) {
-            logger.debug("{}Optimizer: found open expired hoplog:" + name, logPrefix);
-          }
-          continue;
-        }
-        
-        if (logger.isDebugEnabled()) {
-          logger.debug("{}Delete target identified " + marker.getPath(), logPrefix);
-        }
-        
-        deleteTargets.add(marker);
-        Path hoplogPath = new Path(bucketPath, name);
-        if (store.getFileSystem().exists(hoplogPath)) {
-          FileStatus hoplog = store.getFileSystem().getFileStatus(hoplogPath);
-          deleteTargets.add(hoplog);
-        }
-      }
-    }
-    return deleteTargets;
-  }
-
-  /**
-   * Returns a list of of hoplogs present in the bucket's directory, expected to be called during
-   * hoplog set initialization
-   */
-  List<Hoplog> identifyAndLoadSortedOplogs(boolean countSize) throws IOException {
-    FileSystem fs = store.getFileSystem();
-    if (! fs.exists(bucketPath)) {
-      return new ArrayList<Hoplog>();
-    }
-    
-    FileStatus allFiles[] = fs.listStatus(bucketPath);
-    ArrayList<FileStatus> validFiles = new ArrayList<FileStatus>();
-    for (FileStatus file : allFiles) {
-      // All hoplog files contribute to disk usage
-      Matcher matcher = HOPLOG_NAME_PATTERN.matcher(file.getPath().getName());
-      if (! matcher.matches()) {
-        // not a hoplog
-        continue;
-      }
-      
-      // account for the disk used by this file
-      if (countSize) {
-        incrementDiskUsage(file.getLen());
-      }
-      
-      // All valid hoplog files must match the regex
-      matcher = SORTED_HOPLOG_PATTERN.matcher(file.getPath().getName());
-      if (matcher.matches()) {
-        validFiles.add(file);
-      }
-    }
-    
-    FileStatus[] markers = getExpiryMarkers();
-    FileStatus[] validHoplogs = filterValidHoplogs(
-        validFiles.toArray(new FileStatus[validFiles.size()]), markers);
-
-    ArrayList<Hoplog> results = new ArrayList<Hoplog>();
-    if (validHoplogs == null || validHoplogs.length == 0) {
-      return results;
-    }
-
-    for (int i = 0; i < validHoplogs.length; i++) {
-      // Skip directories
-      if (validHoplogs[i].isDirectory()) {
-        continue;
-      }
-
-      final Path p = validHoplogs[i].getPath();
-      // skip empty file
-      if (fs.getFileStatus(p).getLen() <= 0) {
-        continue;
-      }
-
-      Hoplog hoplog = new HFileSortedOplog(store, p, store.getBlockCache(), stats, store.getStats());
-      results.add(hoplog);
-    }
-
-    return results;
-  }
-
-  private static int findMaxSequenceNumber(List<Hoplog> hoplogs) throws IOException {
-    int maxSeq = 0;
-    for (Hoplog hoplog : hoplogs) {
-      maxSeq = Math.max(maxSeq, getSequenceNumber(hoplog));
-    }
-    return maxSeq;
-  }
-
-  /**
-   * @return the sequence number associate with a hoplog file
-   */
-  static int getSequenceNumber(Hoplog hoplog) {
-    Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(hoplog.getFileName());
-    boolean matched = matcher.find();
-    assert matched;
-    return Integer.valueOf(matcher.group(3));
-  }
-
-  protected FileStatus[] getExpiryMarkers() throws IOException {
-    FileSystem fs = store.getFileSystem();
-    if (hoplogReadersController.hoplogs == null
-        || hoplogReadersController.hoplogs.size() == 0) {
-      // there are no hoplogs in the system. May be the bucket is not existing
-      // at all.
-      if (!fs.exists(bucketPath)) {
-        if (logger.isDebugEnabled())
-          logger.debug("{}This bucket is unused, skipping expired hoplog check", logPrefix);
-        return null;
-      }
-    }
-    
-    FileStatus files[] = FSUtils.listStatus(fs, bucketPath, new PathFilter() {
-      @Override
-      public boolean accept(Path file) {
-        // All expired hoplog end with expire extension and must match the valid file regex
-        String fileName = file.getName();
-        if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
-          return false;
-        }
-        fileName = truncateExpiryExtension(fileName);
-        Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(fileName);
-        return matcher.find();
-      }
-
-    });
-    return files;
-  }
-  
-  @Override
-  public void clear() throws IOException {
-    //Suspend compaction while we are doing the clear. This
-    //aborts the currently in progress compaction.
-    getCompactor().suspend();
-    
-    // while compaction is suspended, clear method marks hoplogs for deletion
-    // only. Files will be removed by cleanup thread after active gets and
-    // iterations are completed
-    String user = logger.isDebugEnabled() ? "clear" : null;
-    List<TrackedReference<Hoplog>> oplogs = null;
-    try {
-      oplogs = hoplogReadersController.getTrackedSortedOplogList(user);
-      markSortedOplogForDeletion(oplogs, true);
-    } finally {
-      if (oplogs != null) {
-        hoplogReadersController.releaseHoplogs(oplogs, user);
-      }
-      //Resume compaction
-      getCompactor().resume();
-    }
-  }
-
-  /**
-   * Performs the following activities
-   * <UL>
-   * <LI>Submits compaction requests as needed
-   * <LI>Deletes tmp files which the system failed to removed earlier
-   */
-  @Override
-  public void performMaintenance() throws IOException {
-    long startTime = System.currentTimeMillis();
-    
-    if (logger.isDebugEnabled())
-      logger.debug("{}Executing bucket maintenance", logPrefix);
-
-    submitCompactionRequests();
-    hoplogReadersController.closeInactiveHoplogs();
-    initiateCleanup();
-    
-    cleanupTmpFiles();
-    
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}Time spent in bucket maintenance (in ms): "
-          + (System.currentTimeMillis() - startTime), logPrefix);
-    }
-  }
-
-  @Override
-  public Future<CompactionStatus> forceCompaction(boolean isMajor) {
-    CompactionRequest request = new CompactionRequest(regionFolder, bucketId,
-        getCompactor(), isMajor, true/*force*/);
-    return HDFSCompactionManager.getInstance(store).submitRequest(request);
-  }
-
-  private Future<CompactionStatus> forceCompactionOnVersionUpgrade() {
-    CompactionRequest request = new CompactionRequest(regionFolder, bucketId, getCompactor(), true, true, true);
-    return HDFSCompactionManager.getInstance(store).submitRequest(request);
-  }
-
-  @Override
-  public long getLastMajorCompactionTimestamp() {
-    long ts = 0;
-    String user = logger.isDebugEnabled() ? "StoredProc" : null;
-    List<TrackedReference<Hoplog>> hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
-    try {
-      for (TrackedReference<Hoplog> hoplog : hoplogs) {
-        String fileName = hoplog.get().getFileName();
-        Matcher file = HOPLOG_NAME_PATTERN.matcher(fileName);
-        if (file.matches() && fileName.endsWith(MAJOR_HOPLOG_EXTENSION)) {
-          ts = getHoplogTimestamp(file);
-          break;
-        }
-      }
-    } finally {
-      hoplogReadersController.releaseHoplogs(hoplogs, user);
-    }
-    if (logger.isDebugEnabled()) {
-      logger.debug("{}HDFS: for bucket:"+getRegionBucketStr()+" returning last major compaction timestamp "+ts, logPrefix);
-    }
-    return ts;
-  }
-
-  private void initOldTmpFiles() throws IOException {
-    FileSystem fs = store.getFileSystem();
-    if (! fs.exists(bucketPath)) {
-      return;
-    }
-    
-    oldTmpFiles = new LinkedList<FileStatus>(Arrays.asList(fs.listStatus(bucketPath, new TmpFilePathFilter())));
-  }
-  
-  private void cleanupTmpFiles() throws IOException {
-    if(oldTmpFiles == null && tmpFiles == null) {
-      return;
-    }
-    
-    if (oldTmpFiles != null) {
-      FileSystem fs = store.getFileSystem();
-      long now = System.currentTimeMillis();
-      for (Iterator<FileStatus> itr = oldTmpFiles.iterator(); itr.hasNext();) {
-        FileStatus file = itr.next();
-        if(file.getModificationTime() + TMP_FILE_EXPIRATION_TIME_MS > now) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("{}Deleting temporary file:" + file.getPath(), logPrefix);
-          }
-          fs.delete(file.getPath(), false);
-          itr.remove();
-        }
-      }
-    }
-    if (tmpFiles != null) {
-      for (Hoplog so : tmpFiles.keySet()) {
-        if (logger.isDebugEnabled()) {
-          logger.debug("{}Deleting temporary file:" + so.getFileName(), logPrefix);
-        }
-        deleteTmpFile(null, so);
-      }
-    }
-  }
-  
-  /**
-   * Executes tiered compaction of hoplog files. One instance of compacter per bucket will exist
-   */
-  protected class HoplogCompactor implements Compactor {
-    private volatile boolean suspend = false;
-    
-    // the following boolean will be used to synchronize minor compaction
-    private AtomicBoolean isMinorCompactionActive = new AtomicBoolean(false);
-    // the following boolean will be used to synchronize major compaction
-    private AtomicBoolean isMajorCompactionActive = new AtomicBoolean(false);
-    // the following integer tracks the max sequence number amongst the
-    // target files being major compacted. This value will be used to prevent
-    // concurrent MajorC and minorC. MinorC is preempted in case of an
-    // overlap. This object is also used as a lock. The lock is acquired before
-    // identifying compaction targets and before marking targets for expiry
-    final AtomicInteger maxMajorCSeqNum = new AtomicInteger(-1);
-
-    @Override
-    public void suspend() {
-      long wait = Long.getLong(HoplogConfig.SUSPEND_MAX_WAIT_MS, HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
-      this.suspend=true;
-      //this forces the compact method to finish.
-      while (isMajorCompactionActive.get() || isMinorCompactionActive.get()) {
-        if (wait < 0) {
-          wait = Long.getLong(HoplogConfig.SUSPEND_MAX_WAIT_MS, HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
-          String act = isMajorCompactionActive.get() ? "MajorC" : "MinorC";
-          logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_SUSPEND_OF_0_FAILED_IN_1, new Object[] {act, wait}));
-          break;
-        }
-        try {
-          TimeUnit.MILLISECONDS.sleep(50);
-          wait -= 50;
-        } catch (InterruptedException e) {
-          break;
-        }
-      }
-    }
-    
-    @Override
-    public void resume() {
-      this.suspend = false;
-    }
-    
-    @Override
-    public boolean isBusy(boolean isMajor) {
-      if (isMajor) {
-        return isMajorCompactionActive.get();
-      } else {
-        return isMinorCompactionActive.get();
-      }
-    }
-    
-    /**
-     * compacts hoplogs. The method takes a minor or major compaction "lock" to
-     * prevent concurrent execution of compaction cycles. A possible improvement
-     * is to allow parallel execution of minor compaction if the sets of
-     * hoplogs being compacted are disjoint.
-     */
-    @Override
-    public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-      if(suspend) {
-        return false;
-      }
-
-      String extension = null;
-      IOOperation compactionStats = null;
-      long startTime = 0; 
-      final AtomicBoolean lock;
-      Hoplog compactedHoplog = null;
-      List<TrackedReference<Hoplog>> targets = null;
-      String user = logger.isDebugEnabled() ? (isMajor ? "MajorC" : "MinorC") : null;
-      
-      if (isMajor) {
-        lock = isMajorCompactionActive;
-        extension = MAJOR_HOPLOG_EXTENSION;
-        compactionStats = stats.getMajorCompaction();
-      } else {
-        lock = isMinorCompactionActive;
-        extension = MINOR_HOPLOG_EXTENSION;
-        compactionStats = stats.getMinorCompaction();
-      }
-
-      // final check before beginning compaction. Return if compaction is active
-      if (! lock.compareAndSet(false, true)) {
-        if (isMajor) {
-          if (logger.isDebugEnabled())
-            logger.debug("{}Major compaction already active. Ignoring new request", logPrefix);
-        } else {
-          if (logger.isDebugEnabled())
-            logger.debug("Minor compaction already active. Ignoring new request", logPrefix);
-        }
-        return false;
-      }
-      
-      try {
-        if(suspend) {
-          return false;
-        }
-        
-        // variables for updating stats
-        startTime = compactionStats.begin();
-        
-        int seqNum = -1;
-        int lastKnownMajorCSeqNum;
-        synchronized (maxMajorCSeqNum) {
-          lastKnownMajorCSeqNum = maxMajorCSeqNum.get();
-          targets = hoplogReadersController.getTrackedSortedOplogList(user);
-          getCompactionTargets(isMajor, targets, lastKnownMajorCSeqNum);
-          if (targets != null && targets.size() > 0) {
-            targets = Collections.unmodifiableList(targets);
-            seqNum = getSequenceNumber(targets.get(0).get());
-            if (isMajor) {
-              maxMajorCSeqNum.set(seqNum);
-            }
-          }
-        }
-        
-        if (targets == null || targets.isEmpty() || (!isMajor && targets.size() == 1 && !isForced)) {
-          if (logger.isDebugEnabled()){
-            logger.debug("{}Skipping compaction, too few hoplops to compact. Major?" + isMajor, logPrefix);
-          }
-            
-          compactionStats.end(0, startTime);
-          return true;
-        }
-        
-        //In case that we only have one major compacted file, we don't need to run major compaction to
-        //generate a copy of the same content
-        if (targets.size() == 1 && !isForced) {
-        String hoplogName = targets.get(0).get().getFileName();
-          if (hoplogName.endsWith(MAJOR_HOPLOG_EXTENSION)){
-            if (logger.isDebugEnabled()){
-              logger.debug("{}Skipping compaction, no need to compact a major compacted file. Major?" + isMajor, logPrefix);
-            }
-            compactionStats.end(0, startTime);
-            return true;
-          }
-        }
-        
-        if (logger.isDebugEnabled()) {
-          for (TrackedReference<Hoplog> target : targets) {
-            if (logger.isDebugEnabled()) {
-              fineLog("Target:", target, " size:", target.get().getSize());
-            }
-          }
-        }
-        
-        // Create a temporary hoplog for compacted hoplog. The compacted hoplog
-        // will have the seq number same as that of youngest target file. Any
-        // hoplog younger than target hoplogs will have a higher sequence number
-        compactedHoplog = getTmpSortedOplog(seqNum, extension);
-        
-        long byteCount;
-        try {
-          byteCount = fillCompactionHoplog(isMajor, targets, compactedHoplog, lastKnownMajorCSeqNum);
-          compactionStats.end(byteCount, startTime);
-        } catch (InterruptedException e) {
-          if (logger.isDebugEnabled())
-            logger.debug("{}Compaction execution suspended", logPrefix);
-          compactionStats.error(startTime);
-          return false;
-        } catch (ForceReattemptException e) {
-          if (logger.isDebugEnabled())
-            logger.debug("{}Compaction execution suspended", logPrefix);
-          compactionStats.error(startTime);
-          return false;
-        }
-        
-        // creation of compacted hoplog completed, its time to use it for
-        // reading. Before using it, make sure minorC and mojorC were not
-        // executing on overlapping sets of files. All targets can be marked for
-        // expiration. Notify listener if configured. Update bucket size
-        synchronized (maxMajorCSeqNum) {
-          if (!isMajor && isMinorMajorOverlap(targets, maxMajorCSeqNum.get())) {
-            // MajorC is higher priority. In case of any overlap kill minorC
-            if (logger.isDebugEnabled())
-              logger.debug("{}Interrupting MinorC for a concurrent MajorC", logPrefix);
-            compactionStats.error(startTime);
-            return false;
-          }
-          addSortedOplog(compactedHoplog, true, false);
-          markSortedOplogForDeletion(targets, true);
-        }
-      } catch (IOException e) {
-        compactionStats.error(startTime);
-        throw e;
-      } finally {
-        if (isMajor) {
-          maxMajorCSeqNum.set(-1);
-        }
-        lock.set(false);
-        hoplogReadersController.releaseHoplogs(targets, user);
-      }
-      
-      incrementDiskUsage(compactedHoplog.getSize());
-      reEstimateBucketSize();
-      
-      notifyCompactionListeners(isMajor);
-      return true;
-    }
-
-    /**
-     * Major compaction compacts all files. Seq number of the youngest file
-     * being MajorCed is known. If MinorC is operating on any file with a seq
-     * number less than this number, there is a overlap
-     * @param num 
-     */
-    boolean isMinorMajorOverlap(List<TrackedReference<Hoplog>> targets, int num) {
-      if (num < 0 || targets == null || targets.isEmpty()) {
-        return false;
-      }
-
-      for (TrackedReference<Hoplog> hop : targets) {
-        if (getSequenceNumber(hop.get()) <= num) {
-          return true;
-        }
-      }
-      
-      return false;
-    }
-
-    /**
-     * Iterates over targets and writes eligible targets to the output hoplog.
-     * Handles creation of iterators and writer and closing it in case of
-     * errors.
-     */
-    public long fillCompactionHoplog(boolean isMajor,
-        List<TrackedReference<Hoplog>> targets, Hoplog output, int majorCSeqNum)
-        throws IOException, InterruptedException, ForceReattemptException {
-
-      HoplogWriter writer = null;
-      ICardinality localHLL = new HyperLogLog(HLL_CONSTANT);
-      HoplogSetIterator mergedIter = null;
-      int byteCount = 0;
-      
-      try {
-        // create a merged iterator over the targets and write entries into
-        // output hoplog
-        mergedIter = new HoplogSetIterator(targets);
-        writer = output.createWriter(mergedIter.getRemainingEntryCount());
-
-        boolean interrupted = false;
-        for (; mergedIter.hasNext(); ) {
-          if (suspend) {
-            interrupted = true;
-            break;
-          } else if (!isMajor &&  maxMajorCSeqNum.get() > majorCSeqNum) {
-            // A new major compaction cycle is starting, quit minorC to avoid
-            // duplicate work and missing deletes
-            if (logger.isDebugEnabled())
-              logger.debug("{}Preempting MinorC, new MajorC cycle detected ", logPrefix);
-            interrupted = true;
-            break;
-          }
-
-          mergedIter.nextBB();
-          
-          ByteBuffer k = mergedIter.getKeyBB();
-          ByteBuffer v = mergedIter.getValueBB();
-          
-          boolean isDeletedEntry = isDeletedEntry(v.array(), v.arrayOffset());
-          if (isMajor && isDeletedEntry) {
-            // its major compaction, time to ignore deleted entries
-            continue;
-          }
-
-          if (!isDeletedEntry) {
-            int hash = MurmurHash.hash(k.array(), k.arrayOffset(), k.remaining(), -1);
-            localHLL.offerHashed(hash);
-          }
-
-          writer.append(k, v);
-          byteCount += (k.remaining() + v.remaining());
-        }
-
-        mergedIter.close();
-        mergedIter = null;
-
-        writer.close(buildMetaData(localHLL));
-        writer = null;
-
-        if (interrupted) {
-          // If we suspended compaction operations, delete the partially written
-          // file and return.
-          output.delete();
-          throw new InterruptedException();
-        }
-        
-        // ping secondaries before making the file a legitimate file to ensure 
-        // that in case of split brain, no other vm has taken up as primary. #50110. 
-        pingSecondaries();
-        
-        makeLegitimate(output);
-        return byteCount;
-      } catch (IOException e) {
-        e = handleWriteHdfsIOError(writer, output, e);
-        writer = null;
-        throw e;
-      } catch (ForceReattemptException e) {
-        output.delete();
-        throw e;
-      }finally {
-        if (mergedIter != null) {
-          mergedIter.close();
-        }
-
-        if (writer != null) {
-          writer.close();
-        }
-      }
-    }
-
-    /**
-     * identifies targets. For major compaction all sorted oplogs will be
-     * iterated on. For minor compaction, policy driven fewer targets will take
-     * place.
-     */
-    protected void getCompactionTargets(boolean major,
-        List<TrackedReference<Hoplog>> targets, int majorCSeqNum) {
-      if (!major) {
-        getMinorCompactionTargets(targets, majorCSeqNum);
-      }
-    }
-
-    /**
-     * list of oplogs most suitable for compaction. The alogrithm selects m
-     * smallest oplogs which are not bigger than X in size. Null if valid
-     * candidates are not found
-     */
-    void getMinorCompactionTargets(List<TrackedReference<Hoplog>> targets, int majorCSeqNum) 
-    {
-      List<TrackedReference<Hoplog>> omittedHoplogs = new ArrayList<TrackedReference<Hoplog>>();
-
-      // reverse the order of hoplogs in list. the oldest file becomes the first file.
-      Collections.reverse(targets);
-
-      // hoplog greater than this size will not be minor-compacted
-      final long MAX_COMPACTION_FILE_SIZE;
-      // maximum number of files to be included in any compaction cycle
-      final int MAX_FILE_COUNT_COMPACTION;
-      // minimum number of files that must be present for compaction to be worth
-      final int MIN_FILE_COUNT_COMPACTION;
-      
-      MAX_COMPACTION_FILE_SIZE = ((long)store.getInputFileSizeMax()) * 1024 *1024;
-      MAX_FILE_COUNT_COMPACTION = store.getInputFileCountMax();
-      MIN_FILE_COUNT_COMPACTION = store.getInputFileCountMin();
-
-      try {
-        // skip till first file smaller than the max compaction file size is
-        // found. And if MajorC is active, move to a file which is also outside
-        // scope of MajorC
-        for (Iterator<TrackedReference<Hoplog>> iterator = targets.iterator(); iterator.hasNext();) {
-          TrackedReference<Hoplog> oplog = iterator.next();
-          if (majorCSeqNum >= getSequenceNumber(oplog.get())) {
-            iterator.remove();
-            omittedHoplogs.add(oplog);
-            if (logger.isDebugEnabled()){
-              fineLog("Overlap with MajorC, excluding hoplog " + oplog.get());
-            }
-            continue;
-          }
-          
-          if (oplog.get().getSize() > MAX_COMPACTION_FILE_SIZE || oplog.get().getFileName().endsWith(MAJOR_HOPLOG_EXTENSION)) {
-          // big file will not be included for minor compaction
-          // major compacted file will not be converted to minor compacted file
-            iterator.remove();
-            omittedHoplogs.add(oplog);
-            if (logger.isDebugEnabled()) {
-              fineLog("Excluding big hoplog from minor cycle:",
-                  oplog.get(), " size:", oplog.get().getSize(), " limit:",
-                  MAX_COMPACTION_FILE_SIZE);
-            }
-          } else {
-            // first small hoplog found, skip the loop
-            break;
-          }
-        }
-
-        // If there are too few files no need to perform compaction
-        if (targets.size() < MIN_FILE_COUNT_COMPACTION) {
-          if (logger.isDebugEnabled()){
-            logger.debug("{}Too few hoplogs for minor cycle:" + targets.size(), logPrefix);
-          }
-          omittedHoplogs.addAll(targets);
-          targets.clear();
-          return;
-        }
-        
-        float maxGain = Float.MIN_VALUE;
-        int bestFrom = -1; 
-        int bestTo = -1; 
-        
-        // for listSize=5 list, minFile=3; maxIndex=5-3. 
-        // so from takes values 0,1,2
-        int maxIndexForFrom = targets.size() - MIN_FILE_COUNT_COMPACTION;
-        for (int from = 0; from <= maxIndexForFrom ; from++) {
-          // for listSize=6 list, minFile=3, maxFile=5; minTo=0+3-1, maxTo=0+5-1
-          // so to takes values 2,3,4
-          int minIndexForTo = from + MIN_FILE_COUNT_COMPACTION - 1;
-          int maxIndexForTo = Math.min(from + MAX_FILE_COUNT_COMPACTION, targets.size());
-          
-          for (int i = minIndexForTo; i < maxIndexForTo; i++) {
-            Float gain = computeGain(from, i, targets);
-            if (gain == null) {
-              continue;
-            }
-            
-            if (gain > maxGain) {
-              maxGain = gain;
-              bestFrom = from;
-              bestTo = i;
-            }
-          }
-        }
-        
-        if (bestFrom == -1) {
-          if (logger.isDebugEnabled())
-            logger.debug("{}Failed to find optimal target set for MinorC", logPrefix);
-          omittedHoplogs.addAll(targets);
-          targets.clear();
-          return;
-        }
-
-        if (logger.isDebugEnabled()) {
-          fineLog("MinorCTarget optimal result from:", bestFrom, " to:", bestTo);
-        }
-
-        // remove hoplogs they do not fall in the bestFrom-bestTo range
-        int i = 0;
-        for (Iterator<TrackedReference<Hoplog>> iter = targets.iterator(); iter.hasNext();) {
-          TrackedReference<Hoplog> hop = iter.next();
-          if (i < bestFrom || i > bestTo) {
-            iter.remove();
-            omittedHoplogs.add(hop);
-          }
-          i++;
-        }
-      } finally {
-        // release readers of targets not included in the compaction cycle 
-        String user = logger.isDebugEnabled() ? "MinorC" : null;
-        hoplogReadersController.releaseHoplogs(omittedHoplogs, user);
-      }
-      
-      // restore the order, youngest file is the first file again
-      Collections.reverse(targets);
-    }
-
-    @Override
-    public HDFSStore getHdfsStore() {
-      return store;
-    }
-  }
-  
-  Float computeGain(int from, int to, List<TrackedReference<Hoplog>> targets) {
-    double SIZE_64K = 64.0 * 1024;
-    // TODO the base for log should depend on the average number of keys a index block will contain
-    double LOG_BASE = Math.log(AVG_NUM_KEYS_PER_INDEX_BLOCK);
-    
-    long totalSize = 0;
-    double costBefore = 0f;
-    for (int i = from; i <= to; i++) {
-      long size = targets.get(i).get().getSize();
-      if (size == 0) {
-        continue;
-      }
-      totalSize += size;
-      
-      // For each hoplog file, read cost is number of index block reads and 1
-      // data block read. Index blocks on an average contain N keys and are
-      // organized in a N-ary tree structure. Hence the number of index block
-      // reads will be logBaseN(number of data blocks)
-      costBefore += Math.ceil(Math.max(1.0, Math.log(size / SIZE_64K) / LOG_BASE)) + 1;
-    }
-    
-    // if the first file is relatively too large this set is bad for compaction
-    long firstFileSize = targets.get(from).get().getSize();
-    if (firstFileSize > (totalSize - firstFileSize) * RATIO) {
-      if (logger.isDebugEnabled()){
-        fineLog("First file too big:", firstFileSize, " totalSize:", totalSize);
-      }
-      return null;
-    }
-        
-    // compute size in mb so that the value of gain is in few decimals
-    long totalSizeInMb = totalSize / 1024 / 1024;
-    if (totalSizeInMb == 0) {
-      // the files are tooooo small, just return the count. The more we compact
-      // the better it is
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Total size too small:" +totalSize, logPrefix);
-      }
-      return (float) costBefore;
-    }
-    
-    double costAfter = Math.ceil(Math.log(totalSize / SIZE_64K) / LOG_BASE) + 1;
-    return (float) ((costBefore - costAfter) / totalSizeInMb);
-  }
-  
-  /**
-   * Hoplog readers are accessed asynchronously. There could be a window in
-   * which, while a hoplog is being iterated on, it gets compacted and becomes
-   * expired or inactive. The reader of the hoplog must not be closed till the
-   * iterator completes. All such scenarios will be managed by this class. It
-   * will keep all the reader, active and inactive, and reference counter to the
-   * readers. An inactive reader will be closed if the reference count goes down
-   * to 0.
-   * 
-   * One important point, only compaction process makes a hoplog inactive.
-   * Compaction process in a bucket is single threaded. So compaction itself
-   * will not face race condition. Read and scan operations on the bucket will
-   * be affected. So reference counter is incremented for each read and scan.
-   * 
-   */
-  private class HoplogReadersController implements HoplogReaderActivityListener {
-    private Integer maxOpenFilesLimit;
-
-    // sorted collection of all the active oplog files associated with this bucket. Instead of a
-    // queue, a set is used. New files created as part of compaction may be inserted after a few
-    // hoplogs were created. The compacted file is such a case but should not be treated newest.
-    private final ConcurrentSkipListSet<TrackedReference<Hoplog>> hoplogs;
-    
-    // list of all the hoplogs that have been compacted and need to be closed
-    // once the reference count reduces to 0
-    private final ConcurrentHashSet<TrackedReference<Hoplog>> inactiveHoplogs;
-    
-    // ReadWriteLock on list of oplogs to allow for consistent reads and scans
-    // while hoplog set changes. A write lock is needed on completion of
-    // compaction, addition of a new hoplog or on receiving updates message from
-    // other GF nodes
-    private final ReadWriteLock hoplogRWLock = new ReentrantReadWriteLock(true);
-
-    // tracks the number of active readers for hoplogs of this bucket
-    private AtomicInteger activeReaderCount = new AtomicInteger(0);
-    
-    public HoplogReadersController() {
-      HoplogComparator comp = new HoplogComparator();
-      hoplogs = new ConcurrentSkipListSet<TrackedReference<Hoplog>>(comp) {
-        private static final long serialVersionUID = 1L;
-
-        @Override
-        public boolean add(TrackedReference<Hoplog> e) {
-          // increment number of hoplogs active for this bucket
-          boolean result =  super.add(e);
-          if (result)
-            stats.incActiveFiles(1);
-          return result;
-        }
-        
-        @Override
-        public boolean remove(Object o) {
-          // decrement the number of hoplogs active for this bucket
-          boolean result =  super.remove(o);
-          if (result)
-            stats.incActiveFiles(-1);
-          return result;
-        }
-      };
-      
-      inactiveHoplogs = new ConcurrentHashSet<TrackedReference<Hoplog>>() {
-        private static final long serialVersionUID = 1L;
-        
-        @Override
-        public boolean add(TrackedReference<Hoplog> e) {
-          boolean result =  super.add(e);
-          if (result)
-            stats.incInactiveFiles(1);
-          return result;
-        }
-        
-        @Override
-        public boolean remove(Object o) {
-          boolean result =  super.remove(o);
-          if (result)
-            stats.incInactiveFiles(-1);
-          return result;
-        }
-      };
-      
-      maxOpenFilesLimit = Integer.getInteger(
-          HoplogConfig.BUCKET_MAX_OPEN_HFILES_CONF,
-          HoplogConfig.BUCKET_MAX_OPEN_HFILES_DEFAULT);
-    }
-    
-    Hoplog getOldestHoplog() {
-      if (hoplogs.isEmpty()) {
-        return null;
-      }
-      return hoplogs.last().get();
-    }
-
-    /**
-     * locks sorted oplogs collection and performs add operation
-     * @return if addition was successful
-     */
-    private boolean addSortedOplog(Hoplog so) throws IOException {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Try add " + so, logPrefix);
-      }
-      hoplogRWLock.writeLock().lock();
-      try {
-        int size = hoplogs.size();
-        boolean result = hoplogs.add(new TrackedReference<Hoplog>(so));
-        so.setReaderActivityListener(this);
-        if (logger.isDebugEnabled()){
-          fineLog("Added: ", so, " Before:", size, " After:", hoplogs.size());
-        }
-        return result;
-      } finally {
-        hoplogRWLock.writeLock().unlock();
-      }
-    }
-    
-    /**
-     * locks sorted oplogs collection and performs remove operation and updates readers also
-     */
-    private void removeSortedOplog(TrackedReference<Hoplog> so) throws IOException {
-      if (logger.isDebugEnabled()) {
-        logger.debug("Try remove " + so, logPrefix);
-      }
-      hoplogRWLock.writeLock().lock();
-      try {
-        int size = hoplogs.size();
-        boolean result = hoplogs.remove(so);
-        if (result) {
-          inactiveHoplogs.add(so);
-          if (logger.isDebugEnabled()) {
-            fineLog("Removed: ", so, " Before:", size, " After:", hoplogs.size());
-          }
-        } else {
-          if (inactiveHoplogs.contains(so)) {
-            if (logger.isDebugEnabled()) {
-              logger.debug("{}Found a missing active hoplog in inactive list." + so, logPrefix);
-            }
-          } else {
-            so.get().close();
-            logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_MISSING_IN_BUCKET_FORCED_CLOSED, so.get()));
-          }
-        }
-      } finally {
-        hoplogRWLock.writeLock().unlock();
-      }
-    }
-    
-    private  void closeInactiveHoplogs() throws IOException {
-      hoplogRWLock.writeLock().lock();
-      try {
-        for (TrackedReference<Hoplog> hoplog : inactiveHoplogs) {
-          if (logger.isDebugEnabled()){
-            logger.debug("{}Try close inactive " + hoplog, logPrefix);
-          }
-
-          if (!hoplog.inUse()) {
-            int size = inactiveHoplogs.size();            
-            inactiveHoplogs.remove(hoplog);
-            closeReaderAndSuppressError(hoplog.get(), true);
-            if (logger.isDebugEnabled()){
-              fineLog("Closed inactive: ", hoplog.get(), " Before:", size,
-                  " After:", inactiveHoplogs.size());
-            }
-          }
-        }
-      } finally {
-        hoplogRWLock.writeLock().unlock();
-      }
-    }
-    
-    /**
-     * @param target
-     *          name of the hoplog file
-     * @return trackedReference if target exists in inactive hoplog list.
-     * @throws IOException
-     */
-    TrackedReference<Hoplog> getInactiveHoplog(String target) throws IOException {
-      hoplogRWLock.writeLock().lock();
-      try {
-        for (TrackedReference<Hoplog> hoplog : inactiveHoplogs) {
-          if (hoplog.get().getFileName().equals(target)) {
-            if (logger.isDebugEnabled()) {
-              logger.debug("{}Target found in inactive hoplogs list: " + hoplog, logPrefix);
-            }
-            return hoplog;
-          }
-        }
-        if (logger.isDebugEnabled()){
-          logger.debug("{}Target not found in inactive hoplogs list: " + target, logPrefix);
-        }
-        return null;
-      } finally {
-        hoplogRWLock.writeLock().unlock();
-      }
-    }
-    
-    /**
-     * force closes all readers
-     */
-    public void close() throws IOException {
-      hoplogRWLock.writeLock().lock();
-      try {
-        for (TrackedReference<Hoplog> hoplog : hoplogs) {
-          closeReaderAndSuppressError(hoplog.get(), true);
-        }
-        
-        for (TrackedReference<Hoplog> hoplog : inactiveHoplogs) {
-          closeReaderAndSuppressError(hoplog.get(), true);
-        }
-      } finally {
-        hoplogs.clear();
-        inactiveHoplogs.clear();
-        hoplogRWLock.writeLock().unlock();
-      }
-    }
-    
-    /**
-     * locks hoplogs to create a snapshot of active hoplogs. reference of each
-     * reader is incremented to keep it from getting closed
-     * 
-     * @return ordered list of sorted oplogs
-     */
-    private List<TrackedReference<Hoplog>> getTrackedSortedOplogList(String user) {
-      List<TrackedReference<Hoplog>> oplogs = new ArrayList<TrackedReference<Hoplog>>();
-      hoplogRWLock.readLock().lock();
-      try {
-        for (TrackedReference<Hoplog> oplog : hoplogs) {
-          oplog.increment(user);
-          oplogs.add(oplog);
-          if (logger.isDebugEnabled()) {
-            logger.debug("{}Track ref " + oplog, logPrefix);
-          }
-        }
-      } finally {
-        hoplogRWLock.readLock().unlock();
-      }
-      return oplogs;
-    }
-
-    private TrackedReference<Hoplog> trackHoplog(Hoplog hoplog, String user) {
-      hoplogRWLock.readLock().lock();
-      try {
-        for (TrackedReference<Hoplog> oplog : hoplogs) {
-          if (oplog.get().getFileName().equals(hoplog.getFileName())) {
-            oplog.increment(user);
-            if (logger.isDebugEnabled()) {
-              logger.debug("{}Track " + oplog, logPrefix);
-            }
-            return oplog;
-          }
-        }
-      } finally {
-        hoplogRWLock.readLock().unlock();
-      }
-      throw new NoSuchElementException(hoplog.getFileName());
-    }
-    
-    public void releaseHoplogs(List<TrackedReference<Hoplog>> targets, String user) {
-      if (targets == null) {
-        return;
-      }
-      
-      for (int i = targets.size() - 1; i >= 0; i--) {
-        TrackedReference<Hoplog> hoplog = targets.get(i);
-        releaseHoplog(hoplog, user);
-      }
-    }
-
-    public void releaseHoplog(TrackedReference<Hoplog> target, String user) {
-      if (target ==  null) {
-        return;
-      }
-      
-      target.decrement(user);
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Try release " + target, logPrefix);
-      }
-      if (target.inUse()) {
-        return;
-      }
-      
-      // there are no users of this hoplog. if it is inactive close it.
-      hoplogRWLock.writeLock().lock();
-      try {
-        if (!target.inUse()) {
-          if (inactiveHoplogs.contains(target) ) {
-            int sizeBefore = inactiveHoplogs.size();
-            inactiveHoplogs.remove(target);
-            closeReaderAndSuppressError(target.get(), true);
-            if (logger.isDebugEnabled()) {
-              fineLog("Closed inactive: ", target, " totalBefore:", sizeBefore,
-                  " totalAfter:", inactiveHoplogs.size());
-            }
-          } else if (hoplogs.contains(target)) {
-            closeExcessReaders();              
-          }
-        }
-      } catch (IOException e) {
-        logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_IO_ERROR, 
-            "Close reader: " + target.get().getFileName()), e);
-      } finally {
-        hoplogRWLock.writeLock().unlock();
-      }
-    }
-
-    /*
-     * detects if the total number of open hdfs readers is more than configured
-     * max file limit. In case the limit is exceeded, some readers need to be
-     * closed to avoid dadanode receiver overflow error.
-     */
-    private void closeExcessReaders() throws IOException {
-      if (logger.isDebugEnabled()) {
-        logger.debug("{}Close excess readers. Size:" + hoplogs.size()
-            + " activeReaders:" + activeReaderCount.get() + " limit:"
-            + maxOpenFilesLimit, logPrefix);
-      }
-
-      if (hoplogs.size() <= maxOpenFilesLimit) {
-        return;
-      }
-      
-      if (activeReaderCount.get() <= maxOpenFilesLimit) {
-        return;
-      }
-      
-      for (TrackedReference<Hoplog> hoplog : hoplogs.descendingSet()) {
-        if (!hoplog.inUse() && !hoplog.get().isClosed()) {
-          hoplog.get().close(false);
-          if (logger.isDebugEnabled()) {
-            logger.debug("{}Excess reader closed " + hoplog, logPrefix);
-          }
-        }
-        
-        if (activeReaderCount.get() <= maxOpenFilesLimit) {
-          return;
-        }
-      }
-    }
-
-    @Override
-    public void readerCreated() {
-      activeReaderCount.incrementAndGet();
-      stats.incActiveReaders(1);
-      if (logger.isDebugEnabled())
-        logger.debug("{}ActiveReader++", logPrefix);
-    }
-
-    @Override
-    public void readerClosed() {
-      activeReaderCount.decrementAndGet(); 
-      stats.incActiveReaders(-1);
-      if (logger.isDebugEnabled())
-        logger.debug("{}ActiveReader--", logPrefix);
-    }
-  }
-
-  /**
-   * returns an ordered list of oplogs, FOR TESTING ONLY
-   */
-  public List<TrackedReference<Hoplog>> getSortedOplogs() throws IOException {
-    List<TrackedReference<Hoplog>> oplogs = new ArrayList<TrackedReference<Hoplog>>();
-    for (TrackedReference<Hoplog> oplog : hoplogReadersController.hoplogs) {
-        oplogs.add(oplog);
-    }
-    return oplogs;
-  }
-
-  /**
-   * Merged iterator on a list of hoplogs. 
-   */
-  public class BucketIterator implements HoplogIterator<byte[], SortedHoplogPersistedEvent> {
-    // list of hoplogs to be iterated on.
-    final List<TrackedReference<Hoplog>> hoplogList;
-    HoplogSetIterator mergedIter;
-
-    public BucketIterator(List<TrackedReference<Hoplog>> hoplogs) throws IOException {
-      this.hoplogList = hoplogs;
-      try {
-        mergedIter = new HoplogSetIterator(this.hoplogList);
-        if (logger.isDebugEnabled()) {
-          for (TrackedReference<Hoplog> hoplog : hoplogs) {
-            logger.debug("{}BucketIter target hop:" + hoplog.get().getFileName(), logPrefix);
-          }
-        }
-      } catch (IllegalArgumentException e) {
-        if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
-          throw handleIOError((IOException) e.getCause());
-        } else {
-          throw e;
-        }
-      } catch (IOException e) {
-        throw handleIOError(e);
-      } catch (HDFSIOException e) {
-        throw handleIOError(e);
-      } 
-    }
-
-    @Override
-    public boolean hasNext() {
-      return mergedIter.hasNext();
-    }
-
-    @Override
-    public byte[] next() throws IOException {
-      try {
-        return HFileSortedOplog.byteBufferToArray(mergedIter.next());
-      } catch (IllegalArgumentException e) {
-        if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
-          throw handleIOError((IOException) e.getCause());
-        } else {
-          throw e;
-        }
-      } catch (IOException e) {
-        throw handleIOError(e);
-      }  
-    }
-
-    @Override
-    public byte[] getKey() {
-      // merged iterator returns a byte[]. This needs to be deserialized to the object which was
-      // provided during flush operation
-      return HFileSortedOplog.byteBufferToArray(mergedIter.getKey());
-    }
-
-    @Override
-    public SortedHoplogPersistedEvent getValue() {
-      // merged iterator returns a byte[]. This needs to be deserialized to the
-      // object which was provided during flush operation
-      try {
-        return deserializeValue(HFileSortedOplog.byteBufferToArray(mergedIter.getValue()));
-      } catch (IOException e) {
-        throw new HDFSIOException("Failed to deserialize byte while iterating on partition", e);
-      }
-    }
-
-    @Override
-    public void remove() {
-      mergedIter.remove();
-    }
-
-    @Override
-    public void close() {
-      // TODO release the closed iterators early
-      String user = logger.isDebugEnabled() ? "Scan" : null;
-      hoplogReadersController.releaseHoplogs(hoplogList, user);
-    }
-  }
-  
-  /**
-   * This utility class is used to filter temporary hoplogs in a bucket
-   * directory
-   * 
-   */
-  private static class TmpFilePathFilter implements PathFilter {
-    @Override
-    public boolean accept(Path path) {
-      Matcher matcher = HOPLOG_NAME_PATTERN.matcher(path.getName());
-      if (matcher.matches() && path.getName().endsWith(TEMP_HOPLOG_EXTENSION)) {
-        return true;
-      }
-      return false;
-    }
-  }
-
-  private void fineLog(Object... strings) {
-    if (logger.isDebugEnabled()) {
-      StringBuffer sb = concatString(strings);
-      logger.debug(logPrefix + sb.toString());
-    }
-  }
-
-  private StringBuffer concatString(Object... strings) {
-    StringBuffer sb = new StringBuffer();
-    for (Object str : strings) {
-      sb.append(str.toString());
-    }
-    return sb;
-  }
-
-  @Override
-  public void compactionCompleted(String region, int bucket, boolean isMajor) {
-    // do nothing for compaction events. Hoplog Organizer depends on addition
-    // and deletion of hoplogs only
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java
deleted file mode 100644
index e622749..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import com.gemstone.gemfire.internal.hll.ICardinality;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.EnumMap;
-
-
-/**
- * Ordered sequence file
- */
-public interface Hoplog extends Closeable, Comparable<Hoplog>  {
-  public static final boolean NOP_WRITE = Boolean.getBoolean("Hoplog.NOP_WRITE");
-  
-  /** the gemfire magic number for sorted oplogs */
-  public static final byte[] MAGIC = new byte[] { 0x47, 0x53, 0x4F, 0x50 };
-
-  /**
-   * @return an instance of cached reader, creates one if does not exist
-   * @throws IOException
-   */
-  HoplogReader getReader() throws IOException;
-
-  /**
-   * Creates a new sorted writer.
-   * 
-   * @param keys
-   *          an estimate of the number of keys to be written
-   * @return the writer
-   * @throws IOException
-   *           error creating writer
-   */
-  HoplogWriter createWriter(int keys) throws IOException;
-
-  /**
-   * @param listener listener of reader's activity
-   */
-  void setReaderActivityListener(HoplogReaderActivityListener listener);
-  
-  /**
-   * @return file name
-   */
-  String getFileName();
-
-  /**
-   * @return Entry count estimate for this hoplog
-   */
-  public ICardinality getEntryCountEstimate() throws IOException;
-
-  /**
-   * renames the file to the input name
-   * 
-   * @throws IOException
-   */
-  void rename(String name) throws IOException;
-
-  /**
-   * Deletes the sorted oplog file
-   */
-  void delete() throws IOException;
-  
-  /**
-   * Returns true if the hoplog is closed for reads.
-   * @return true if closed
-   */
-  boolean isClosed();
-  
-  /**
-   * @param clearCache clear this sorted oplog's cache if true
-   * @throws IOException 
-   */
-  void close(boolean clearCache) throws IOException;
-  
-  /**
-   * @return the modification timestamp of the file
-   */
-  long getModificationTimeStamp();
-  
-  /**
-   * @return the size of file
-   */
-  long getSize();
-
-  /**
-   * Reads sorted oplog file.
-   */
-  public interface HoplogReader extends HoplogSetReader<byte[], byte[]> {
-    /**
-     * Returns a byte buffer based view of the value linked to the key
-     */
-    ByteBuffer get(byte[] key) throws IOException;
-
-    /**
-     * @return Returns the bloom filter associated with this sorted oplog file.
-     */
-    BloomFilter getBloomFilter() throws IOException;
-
-    /**
-     * @return number of KV pairs in the file, including tombstone entries
-     */
-    long getEntryCount();
-
-    /**
-     * Returns the {@link ICardinality} implementation that is useful for
-     * estimating the size of this Hoplog.
-     * 
-     * @return the cardinality estimator
-     */
-    ICardinality getCardinalityEstimator();
-  }
-
-  /**
-   * Provides hoplog's reader's activity related events to owners
-   * 
-   */
-  public interface HoplogReaderActivityListener {
-    /**
-     * Invoked when a reader is created and an active reader did not exist
-     * earlier
-     */
-    public void readerCreated();
-    
-    /**
-     * Invoked when an active reader is closed
-     */
-    public void readerClosed();
-  }
-
-  /**
-   * Writes key/value pairs in a sorted oplog file. Each entry that is appended must have a key that
-   * is greater than or equal to the previous key.
-   */
-  public interface HoplogWriter extends Closeable {
-    /**
-     * Appends another key and value. The key is expected to be greater than or equal to the last
-     * key that was appended.
-     * @param key
-     * @param value
-     */
-    void append(byte[] key, byte[] value) throws IOException;
-
-    /**
-     * Appends another key and value. The key is expected to be greater than or equal to the last
-     * key that was appended.
-     */
-    void append(ByteBuffer key, ByteBuffer value) throws IOException;
-
-    void close(EnumMap<Meta, byte[]> metadata) throws IOException;
-    
-    /**
-     * flushes all outstanding data into the OS buffers on all DN replicas 
-     * @throws IOException
-     */
-    void hsync() throws IOException;
-    
-    /**
-     * Gets the size of the data that has already been written
-     * to the writer.  
-     * 
-     * @return number of bytes already written to the writer
-     */
-    public long getCurrentSize() throws IOException; 
-  }
-
-  /**
-   * Identifies the gemfire sorted oplog versions.
-   */
-  public enum HoplogVersion {
-    V1;
-
-    /**
-     * Returns the version string as bytes.
-     * 
-     * @return the byte form
-     */
-    public byte[] toBytes() {
-      return name().getBytes();
-    }
-
-    /**
-     * Constructs the version from a byte array.
-     * 
-     * @param version
-     *          the byte form of the version
-     * @return the version enum
-     */
-    public static HoplogVersion fromBytes(byte[] version) {
-      return HoplogVersion.valueOf(new String(version));
-    }
-  }
-
-  /**
-   * Names the available metadata keys that will be stored in the sorted oplog.
-   */
-  public enum Meta {
-    /** identifies the soplog as a gemfire file, required */
-    GEMFIRE_MAGIC,
-
-    /** identifies the soplog version, required */
-    SORTED_OPLOG_VERSION,
-    
-    /** identifies the gemfire version the soplog was created with */
-    GEMFIRE_VERSION,
-
-    /** identifies the statistics data */
-    STATISTICS,
-
-    /** identifies the embedded comparator types */
-    COMPARATORS,
-    
-    /** identifies the pdx type data, optional */
-    PDX,
-
-    /**
-     * identifies the hyperLogLog byte[] which estimates the cardinality for
-     * only one hoplog
-     */
-    LOCAL_CARDINALITY_ESTIMATE,
-
-    /**
-     * represents the hyperLogLog byte[] after upgrading the constant from
-     * 0.1 to 0.03 (in gfxd 1.4)
-     */
-    LOCAL_CARDINALITY_ESTIMATE_V2
-    ;
-
-    /**
-     * Converts the metadata name to bytes.
-     */
-    public byte[] toBytes() {
-      return ("gemfire." + name()).getBytes();
-    }
-
-    /**
-     * Converts the byte form of the name to an enum.
-     * 
-     * @param key
-     *          the key as bytes
-     * @return the enum form
-     */
-    public static Meta fromBytes(byte[] key) {
-      return Meta.valueOf(new String(key).substring("gemfire.".length()));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java
deleted file mode 100644
index 7b8415e..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-
-/**
- * This interface defines all the hoplog configuration related constants. One
- * location simplifies searching for a constant
- * 
- */
-public interface HoplogConfig {
-  // max number of open files per bucket. by default each region has 113
-  // buckets. A typical hdfs deployment has 5 DN each allowing 4096 open
-  // files. The intent is to use around 40 % of these and hence the default
-  // value is 72
-  public static final String BUCKET_MAX_OPEN_HFILES_CONF = "hoplog.bucket.max.open.files";
-  public final Integer BUCKET_MAX_OPEN_HFILES_DEFAULT = 72;
-  
-  public static final String HFILE_BLOCK_SIZE_CONF = "hoplog.hfile.block.size";
-  
-  // Region maintenance activity interval. default is 2 mins
-  public static final String JANITOR_INTERVAL_SECS = "hoplog.janitor.interval.secs";
-  public static final long JANITOR_INTERVAL_SECS_DEFAULT = 120l;
-  
-  // Maximum number of milliseconds to wait for suspension action to complete
-  public static final String SUSPEND_MAX_WAIT_MS = "hoplog.suspend.max.wait.ms";
-  public static final long SUSPEND_MAX_WAIT_MS_DEFAULT = 1000l;
-  
-  // Compaction request queue limit configuraiton
-  public static final String COMPCATION_QUEUE_CAPACITY = "hoplog.compaction.queue.capacity";
-  public static final int COMPCATION_QUEUE_CAPACITY_DEFAULT = 500;
-  
-  // Compaction request queue limit configuraiton
-  public static final String COMPACTION_FILE_RATIO = "hoplog.compaction.file.ratio";
-  public static final float COMPACTION_FILE_RATIO_DEFAULT = 1.3f;
-  
-  //Amount of time before deleting old temporary files
-  public static final String TMP_FILE_EXPIRATION = "hoplog.tmp.file.expiration.ms";
-  public static final long TMP_FILE_EXPIRATION_DEFAULT = 10 * 60 * 1000;
-  
-  // If this property is set as true, GF will let DFS client cache FS objects
-  public static final String USE_FS_CACHE = "hoplog.use.fs.cache";
-
-  // If set hdfs store will be able to connect to local file System
-  public static final String ALLOW_LOCAL_HDFS_PROP = "hoplog.ALLOW_LOCAL_HDFS";
-  
-  // The following constants are used to read kerberos authentication related
-  // configuration. Currently these configurations are provided as client config
-  // file while hdfs store is created
-  public static final String KERBEROS_PRINCIPAL = "gemfirexd.kerberos.principal";
-  public static final String KERBEROS_KEYTAB_FILE= "gemfirexd.kerberos.keytab.file";
-  public static final String PERFORM_SECURE_HDFS_CHECK_PROP = "gemfire.PERFORM_SECURE_HDFS_CHECK";
-  
-  // clean up interval file that exposed to MapReduce jobs
-  public static final String CLEAN_UP_INTERVAL_FILE_NAME = "cleanUpInterval";
-  // Compression settings
-  public static final String COMPRESSION = "hoplog.compression.algorithm";
-  public static final String COMPRESSION_DEFAULT = "NONE";
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46535f28/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java
deleted file mode 100644
index 7c3de03..0000000
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-
-/**
- * Defines an observer of asynchronous operations on sorted oplog files associated with a bucket.
- */
-public interface HoplogListener {
-  /**
-   * Notifies creation of new sorted oplog files. A new file will be created after compaction or
-   * other bucket maintenance activities
-   * 
-   * @throws IOException
-   */
-  void hoplogCreated(String regionFolder, int bucketId, Hoplog... oplogs) throws IOException;
-
-  /**
-   * Notifies file deletion. A file becomes redundant after compaction or other bucket maintenance
-   * activities
-   * @throws IOException 
-   */
-  void hoplogDeleted(String regionFolder, int bucketId, Hoplog... oplogs) throws IOException;
-  
-  /**
-   * Notifies completion of a hoplog compaction cycle. 
-   * @param region Region on which compaction was performed
-   * @param bucket bucket id
-   * @param isMajor true if major compaction was executed
-   */
-  void compactionCompleted(String region, int bucket, boolean isMajor);
-}



[27/63] [abbrv] incubator-geode git commit: GEODE-1262: Removed VM5-VM7 in AsyncEventQueueTestBase

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ecbbf766/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
index cec93fa..a099617 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
@@ -76,49 +76,49 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testSerialAsyncEventQueueAttributes() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(createCacheRunnable(lnPort));
+    vm1.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 150, true, true, "testDS", true ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventQueueAttributes( "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true ));
   }
   
   public void testSerialAsyncEventQueueSize() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
+    vm1.invoke(createAsyncEventQueueRunnable());
+    vm2.invoke(createAsyncEventQueueRunnable());
+    vm3.invoke(createAsyncEventQueueRunnable());
     vm4.invoke(createAsyncEventQueueRunnable());
-    vm5.invoke(createAsyncEventQueueRunnable());
-    vm6.invoke(createAsyncEventQueueRunnable());
-    vm7.invoke(createAsyncEventQueueRunnable());
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm5
+    vm2
         .invoke(pauseAsyncEventQueueRunnable());
-    vm6
+    vm3
         .invoke(pauseAsyncEventQueueRunnable());
-    vm7
+    vm4
         .invoke(pauseAsyncEventQueueRunnable());
     Wait.pause(1000);// pause at least for the batchTimeInterval
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
 
-    int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
-    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
-    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm1size);
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm2size);
   }
 
   protected SerializableRunnableIF pauseAsyncEventQueueRunnable() {
@@ -145,35 +145,35 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testConcurrentSerialAsyncEventQueueSize() {
 	Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+	vm1.invoke(createCacheRunnable(lnPort));
+	vm2.invoke(createCacheRunnable(lnPort));
+	vm3.invoke(createCacheRunnable(lnPort));
 	vm4.invoke(createCacheRunnable(lnPort));
-	vm5.invoke(createCacheRunnable(lnPort));
-	vm6.invoke(createCacheRunnable(lnPort));
-	vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 150, true, false, null, false, 2, OrderPolicy.KEY ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 150, true, false, null, false, 2, OrderPolicy.KEY ));
 
+	vm1.invoke(createReplicatedRegionRunnable());
+	vm2.invoke(createReplicatedRegionRunnable());
+	vm3.invoke(createReplicatedRegionRunnable());
 	vm4.invoke(createReplicatedRegionRunnable());
-	vm5.invoke(createReplicatedRegionRunnable());
-	vm6.invoke(createReplicatedRegionRunnable());
-	vm7.invoke(createReplicatedRegionRunnable());
 
-	vm4
+	vm1
 	  .invoke(pauseAsyncEventQueueRunnable());
-	vm5
+	vm2
 	  .invoke(pauseAsyncEventQueueRunnable());
 
 	Wait.pause(1000);// pause at least for the batchTimeInterval
 
-	vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+	vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
 		1000 ));
 
-	int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
-	int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
-	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
-	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+	int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+	int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm1size);
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm2size);
   }
   
   /**
@@ -186,28 +186,28 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialAsyncEventQueue() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
+    vm1.invoke(createAsyncEventQueueRunnable());
+    vm2.invoke(createAsyncEventQueueRunnable());
+    vm3.invoke(createAsyncEventQueueRunnable());
     vm4.invoke(createAsyncEventQueueRunnable());
-    vm5.invoke(createAsyncEventQueueRunnable());
-    vm6.invoke(createAsyncEventQueueRunnable());
-    vm7.invoke(createAsyncEventQueueRunnable());
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
   
   /**
@@ -217,32 +217,32 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialAsyncEventQueueWithCacheLoader() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createReplicatedRegionWithCacheLoaderAndAsyncEventQueue( getTestMethodName() + "_RR", "ln" ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doGets( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doGets( getTestMethodName() + "_RR",
         10 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 10, true, false ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 0, true, false ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 0, true, false ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 0, true, false ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 10, true, false ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 0, true, false ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 0, true, false ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 0, true, false ));// secondary
   }
   
   /**
@@ -260,47 +260,47 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialAsyncEventQueue_ExceptionScenario() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
         false, 100, 100, false, false, null, false, 1 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
         false, 100, 100, false, false, null, false, 1 ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
         false, 100, 100, false, false, null, false, 1 ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithCustomListener( "ln",
         false, 100, 100, false, false, null, false, 1 ));
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
     
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm5
+    vm2
         .invoke(pauseAsyncEventQueueRunnable());
-    vm6
+    vm3
         .invoke(pauseAsyncEventQueueRunnable());
-    vm7
+    vm4
         .invoke(pauseAsyncEventQueueRunnable());
     Wait.pause(2000);// pause at least for the batchTimeInterval
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         100 ));
     
+    vm1.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 100 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 0 ));// secondary
+
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 100 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateCustomAsyncEventListener( "ln", 0 ));// secondary
   }
 
   /**
@@ -312,32 +312,32 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialAsyncEventQueueWithConflationEnabled() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm5
+    vm2
         .invoke(pauseAsyncEventQueueRunnable());
-    vm6
+    vm3
         .invoke(pauseAsyncEventQueueRunnable());
-    vm7
+    vm4
         .invoke(pauseAsyncEventQueueRunnable());
     Wait.pause(1000);// pause at least for the batchTimeInterval
 
@@ -347,11 +347,11 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
       keyValues.put(i, i);
     }
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_RR", keyValues ));
 
     Wait.pause(1000);
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() ));
 
     for (int i = 0; i < 500; i++) {
@@ -360,29 +360,29 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 
     // Put the update events and check the queue size.
     // There should be no conflation with the previous create events.
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_RR", updateKeyValues ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() + updateKeyValues.size() ));
 
     // Put the update events again and check the queue size.
     // There should be conflation with the previous update events.
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_RR", updateKeyValues ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() + updateKeyValues.size() ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
 
   
@@ -398,28 +398,28 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   @Ignore("Disabled until I can sort out the hydra dependencies - see bug 52214")
   public void DISABLED_testReplicatedSerialAsyncEventQueueWithoutLocator() {
     int mPort = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
+    vm1.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createCacheWithoutLocator( mPort ));
 
+    vm1.invoke(createAsyncEventQueueRunnable());
+    vm2.invoke(createAsyncEventQueueRunnable());
+    vm3.invoke(createAsyncEventQueueRunnable());
     vm4.invoke(createAsyncEventQueueRunnable());
-    vm5.invoke(createAsyncEventQueueRunnable());
-    vm6.invoke(createAsyncEventQueueRunnable());
-    vm7.invoke(createAsyncEventQueueRunnable());
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
 
   /**
@@ -434,31 +434,31 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testReplicatedSerialAsyncEventQueueWithPeristenceEnabled() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
 
   /**
@@ -474,32 +474,32 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void DISABLED_testReplicatedSerialAsyncEventQueueWithPeristenceEnabled_Restart() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    String firstDStore = (String)vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100,
+    String firstDStore = (String)vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100,
             100, true, null ));
 
-    vm4.invoke(createReplicatedRegionRunnable());
+    vm1.invoke(createReplicatedRegionRunnable());
 
     // pause async channel and then do the puts
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
 
-    // ------------------ KILL VM4 AND REBUILD
+    // ------------------ KILL vm1 AND REBUILD
     // ------------------------------------------
-    vm4.invoke(() -> AsyncEventQueueTestBase.killSender());
+    vm1.invoke(() -> AsyncEventQueueTestBase.killSender());
 
-    vm4.invoke(createCacheRunnable(lnPort));
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, firstDStore ));
-    vm4.invoke(createReplicatedRegionRunnable());
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, firstDStore ));
+    vm1.invoke(createReplicatedRegionRunnable());
     // -----------------------------------------------------------------------------------
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
   }
 
   /**
@@ -514,36 +514,36 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void DISABLED_testReplicatedSerialAsyncEventQueueWithPeristenceEnabled_Restart2() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, null ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, null ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, null ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, null ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, null ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, null ));
 
-    vm4.invoke(createReplicatedRegionRunnable());
-    vm4.invoke(() -> AsyncEventQueueTestBase.addCacheListenerAndCloseCache( getTestMethodName() + "_RR" ));
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm1.invoke(() -> AsyncEventQueueTestBase.addCacheListenerAndCloseCache( getTestMethodName() + "_RR" ));
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
 
-    vm5.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR", 2000 ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR", 2000 ));
 
     // -----------------------------------------------------------------------------------
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForSenderToBecomePrimary( AsyncEventQueueImpl
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForSenderToBecomePrimary( AsyncEventQueueImpl
             .getSenderIdFromAsyncEventQueueId("ln") ));
     
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
 
-    int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln" ));
 
-    LogWriterUtils.getLogWriter().info("vm4 size is: " + vm4size);
-    LogWriterUtils.getLogWriter().info("vm5 size is: " + vm5size);
+    LogWriterUtils.getLogWriter().info("vm1 size is: " + vm1size);
+    LogWriterUtils.getLogWriter().info("vm2 size is: " + vm2size);
     // verify that there is no event loss
     assertTrue(
-        "Total number of entries in events map on vm4 and vm5 should be at least 2000",
-        (vm4size + vm5size) >= 2000);
+        "Total number of entries in events map on vm1 and vm2 should be at least 2000",
+        (vm1size + vm2size) >= 2000);
   }
   
   /**
@@ -557,31 +557,31 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testConcurrentSerialAsyncEventQueueWithReplicatedRegion() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY ));
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
   }
   
   /**
@@ -595,37 +595,37 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testConcurrentSerialAsyncEventQueueWithReplicatedRegion_2() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD ));
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         500 ));
-    vm4.invokeAsync(() -> AsyncEventQueueTestBase.doNextPuts( getTestMethodName() + "_RR",
+    vm1.invokeAsync(() -> AsyncEventQueueTestBase.doNextPuts( getTestMethodName() + "_RR",
       500, 1000 ));
     //Async invocation which was bound to fail
-//    vm4.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+//    vm1.invokeAsync(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
 //      1000, 1500 ));
     
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
   }
   
   /**
@@ -635,31 +635,31 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testConcurrentSerialAsyncEventQueueWithoutOrderPolicy() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, null ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, null ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, null ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false, 3, null ));
 
+    vm1.invoke(createReplicatedRegionRunnable());
+    vm2.invoke(createReplicatedRegionRunnable());
+    vm3.invoke(createReplicatedRegionRunnable());
     vm4.invoke(createReplicatedRegionRunnable());
-    vm5.invoke(createReplicatedRegionRunnable());
-    vm6.invoke(createReplicatedRegionRunnable());
-    vm7.invoke(createReplicatedRegionRunnable());
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_RR",
         1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener("ln", 0 ));// secondary
   }
 
   /**
@@ -671,29 +671,29 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testPartitionedSerialAsyncEventQueue() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
+    vm1.invoke(createAsyncEventQueueRunnable());
+    vm2.invoke(createAsyncEventQueueRunnable());
+    vm3.invoke(createAsyncEventQueueRunnable());
     vm4.invoke(createAsyncEventQueueRunnable());
-    vm5.invoke(createAsyncEventQueueRunnable());
-    vm6.invoke(createAsyncEventQueueRunnable());
-    vm7.invoke(createAsyncEventQueueRunnable());
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         500 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.doPutsFrom(
+    vm2.invoke(() -> AsyncEventQueueTestBase.doPutsFrom(
         getTestMethodName() + "_PR", 500, 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
 
   /**
@@ -705,32 +705,32 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testPartitionedSerialAsyncEventQueueWithConflationEnabled() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, true, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm5
+    vm2
         .invoke(pauseAsyncEventQueueRunnable());
-    vm6
+    vm3
         .invoke(pauseAsyncEventQueueRunnable());
-    vm7
+    vm4
         .invoke(pauseAsyncEventQueueRunnable());
     
     Wait.pause(2000);
@@ -741,10 +741,10 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
       keyValues.put(i, i);
     }
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", keyValues ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() ));
 
     for (int i = 0; i < 500; i++) {
@@ -753,29 +753,29 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 
     // Put the update events and check the queue size.
     // There should be no conflation with the previous create events.
-    vm5.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm2.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", updateKeyValues ));
 
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() + updateKeyValues.size() ));
 
     // Put the update events again and check the queue size.
     // There should be conflation with the previous update events.
-    vm5.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm2.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
       getTestMethodName() + "_PR", updateKeyValues ));
 
-    vm5.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm2.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
       "ln", keyValues.size() + updateKeyValues.size() ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
 
   /**
@@ -789,33 +789,33 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testPartitionedSerialAsyncEventQueueWithPeristenceEnabled() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, true, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, true, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, true, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         false, 100, 100, false, true, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         500 ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.doPutsFrom(
+    vm2.invoke(() -> AsyncEventQueueTestBase.doPutsFrom(
         getTestMethodName() + "_PR", 500, 1000 ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
+    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 0 ));// secondary
   }
 
   /**
@@ -829,54 +829,54 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testPartitionedSerialAsyncEventQueueWithPeristenceEnabled_Restart() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    String firstDStore = (String)vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100,
+    String firstDStore = (String)vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100,
             100, true, null ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
     // pause async channel and then do the puts
-    vm4
+    vm1
         .invoke(() -> AsyncEventQueueTestBase.pauseAsyncEventQueueAndWaitForDispatcherToPause( "ln" ));
   
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         1000 ));
 
-    // ------------------ KILL VM4 AND REBUILD
+    // ------------------ KILL vm1 AND REBUILD
     // ------------------------------------------
-    vm4.invoke(() -> AsyncEventQueueTestBase.killSender());
+    vm1.invoke(() -> AsyncEventQueueTestBase.killSender());
 
-    vm4.invoke(createCacheRunnable(lnPort));
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, firstDStore ));
-    vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueueWithDiskStore( "ln", false, 100, 100, true, firstDStore ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     // -----------------------------------------------------------------------------------
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventListener( "ln", 1000 ));// primary sender
   }
 
   public void testParallelAsyncEventQueueWithReplicatedRegion() {
     try {
       Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+      vm1.invoke(createCacheRunnable(lnPort));
+      vm2.invoke(createCacheRunnable(lnPort));
+      vm3.invoke(createCacheRunnable(lnPort));
       vm4.invoke(createCacheRunnable(lnPort));
-      vm5.invoke(createCacheRunnable(lnPort));
-      vm6.invoke(createCacheRunnable(lnPort));
-      vm7.invoke(createCacheRunnable(lnPort));
 
-      vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
+      vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
           "ln", true, 100, 100, true, false, null, false ));
-      vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
+      vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
           "ln", true, 100, 100, true, false, null, false ));
-      vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
+      vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
           "ln", true, 100, 100, true, false, null, false ));
-      vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
+      vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue(
           "ln", true, 100, 100, true, false, null, false ));
 
-      vm4.invoke(createReplicatedRegionRunnable());
+      vm1.invoke(createReplicatedRegionRunnable());
       fail("Expected GatewaySenderConfigException where parallel async event queue can not be used with replicated region");
     }
     catch (Exception e) {
@@ -890,77 +890,77 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testParallelAsyncEventQueue() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         256 ));
     
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm3size = (Integer)vm3.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm6size = (Integer)vm6.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm7size = (Integer)vm7.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     
-    assertEquals(vm4size + vm5size + vm6size + vm7size, 256);
+    assertEquals(vm1size + vm2size + vm3size + vm4size, 256);
   }
 
   public void testParallelAsyncEventQueueWithSubstitutionFilter() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(createCacheRunnable(lnPort));
+    vm1.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false, "MyAsyncEventListener", "MyGatewayEventSubstitutionFilter" ));
 
     String regionName = getTestMethodName() + "_PR";
-    vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( regionName, "ln", isOffHeap() ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( regionName, "ln", isOffHeap() ));
 
     int numPuts = 10;
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( regionName, numPuts ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( regionName, numPuts ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
 
-    vm4.invoke(() -> verifySubstitutionFilterInvocations( "ln" ,numPuts ));
+    vm1.invoke(() -> verifySubstitutionFilterInvocations( "ln" ,numPuts ));
   }
 
   public void testParallelAsyncEventQueueWithSubstitutionFilterNoSubstituteValueToDataInvocations() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
-    vm4.invoke(createCacheRunnable(lnPort));
+    vm1.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false, "MyAsyncEventListener", "SizeableGatewayEventSubstitutionFilter" ));
 
     String regionName = getTestMethodName() + "_PR";
-    vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( regionName, "ln", isOffHeap() ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( regionName, "ln", isOffHeap() ));
 
     int numPuts = 10;
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( regionName, numPuts ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( regionName, numPuts ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
 
-    vm4.invoke(() -> verifySubstitutionFilterToDataInvocations( "ln" ,0 ));
+    vm1.invoke(() -> verifySubstitutionFilterToDataInvocations( "ln" ,0 ));
   }
 
   /**
@@ -970,73 +970,73 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testParallelAsyncEventQueueWithCacheLoader() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
     	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
     	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
     	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
     	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithCacheLoaderAndAsyncQueue( getTestMethodName() + "_PR", "ln" ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPutAll( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPutAll( getTestMethodName() + "_PR",
     	100, 10 ));
+    vm1.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
     vm4.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.validateAsyncEventForOperationDetail( "ln", 250, false, true ));
   }
   
   public void testParallelAsyncEventQueueSize() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, false, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm5
+    vm2
         .invoke(pauseAsyncEventQueueRunnable());
-    vm6
+    vm3
         .invoke(pauseAsyncEventQueueRunnable());
-    vm7
+    vm4
         .invoke(pauseAsyncEventQueueRunnable());
     Wait.pause(1000);// pause at least for the batchTimeInterval
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+    vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
         1000 ));
 
-    int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
     
-    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
-    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm1size);
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm2size);
   }
   
   /**
@@ -1046,74 +1046,74 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testConcurrentParallelAsyncEventQueueSize() {
 	Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+	vm1.invoke(createCacheRunnable(lnPort));
+	vm2.invoke(createCacheRunnable(lnPort));
+	vm3.invoke(createCacheRunnable(lnPort));
 	vm4.invoke(createCacheRunnable(lnPort));
-	vm5.invoke(createCacheRunnable(lnPort));
-	vm6.invoke(createCacheRunnable(lnPort));
-	vm7.invoke(createCacheRunnable(lnPort));
 
-	vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+	vm1.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
 	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY ));
-	vm5.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+	vm2.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
 	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY ));
-	vm6.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+	vm3.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
 	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY ));
-	vm7.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
+	vm4.invoke(() -> AsyncEventQueueTestBase.createConcurrentAsyncEventQueue( "ln",
 	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY ));
 
+	vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+	vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+	vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 	vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-	vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-	vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-	vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-	vm4
+	vm1
 	  .invoke(pauseAsyncEventQueueRunnable());
-	vm5
+	vm2
 	  .invoke(pauseAsyncEventQueueRunnable());
-	vm6
+	vm3
 	  .invoke(pauseAsyncEventQueueRunnable());
-	vm7
+	vm4
 	  .invoke(pauseAsyncEventQueueRunnable());
 	Wait.pause(1000);// pause at least for the batchTimeInterval
 
-	vm4.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
+	vm1.invoke(() -> AsyncEventQueueTestBase.doPuts( getTestMethodName() + "_PR",
 	  1000 ));
 
-	int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
-	int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+	int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
+	int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventQueueSize( "ln" ));
 	    
-	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
-	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm1size);
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm2size);
   }
   
   public void testParallelAsyncEventQueueWithConflationEnabled() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPartitionedRegionWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4
+    vm1
         .invoke(pauseAsyncEventQueueRunnable());
-    vm5
+    vm2
         .invoke(pauseAsyncEventQueueRunnable());
-    vm6
+    vm3
         .invoke(pauseAsyncEventQueueRunnable());
-    vm7
+    vm4
         .invoke(pauseAsyncEventQueueRunnable());
 
     Wait.pause(2000);// pause for the batchTimeInterval to ensure that all the
@@ -1125,45 +1125,45 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
       keyValues.put(i, i);
     }
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", keyValues ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() ));
 
     for (int i = 0; i < 500; i++) {
       updateKeyValues.put(i, i + "_updated");
     }
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", updateKeyValues ));
 
  
-    vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncEventQueueSize(
         "ln", keyValues.size() + updateKeyValues.size() )); // no conflation of creates
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", updateKeyValues ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncEventQueueSize(
         "ln", keyValues.size() + updateKeyValues.size() )); // conflation of updates
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm3size = (Integer)vm3.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm6size = (Integer)vm6.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm7size = (Integer)vm7.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     
-    assertEquals(vm4size + vm5size + vm6size + vm7size, keyValues.size());
+    assertEquals(vm1size + vm2size + vm3size + vm4size, keyValues.size());
   }
 
   /**
@@ -1172,33 +1172,29 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
   public void testParallelAsyncEventQueueWithConflationEnabled_bug47213() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort));
-    vm6.invoke(createCacheRunnable(lnPort));
-    vm7.invoke(createCacheRunnable(lnPort));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm1.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm2.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm3.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
+    vm4.invoke(() -> AsyncEventQueueTestBase.createAsyncEventQueue( "ln",
         true, 100, 100, true, false, null, false ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
     vm4.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.createPRWithRedundantCopyWithAsyncEventQueue( getTestMethodName() + "_PR", "ln", isOffHeap() ));
 
-    vm4
-        .invoke(pauseAsyncEventQueueRunnable());
-    vm5
-        .invoke(pauseAsyncEventQueueRunnable());
-    vm6
-        .invoke(pauseAsyncEventQueueRunnable());
-    vm7
-        .invoke(pauseAsyncEventQueueRunnable());
+    vm1.invoke(pauseAsyncEventQueueRunnable());
+    vm2.invoke(pauseAsyncEventQueueRunnable());
+    vm3.invoke(pauseAsyncEventQueueRunnable());
+    vm4.invoke(pauseAsyncEventQueueRunnable());
 
     Wait.pause(2000);// pause for the batchTimeInterval to ensure that all the
     // senders are paused
@@ -1209,308 +1205,302 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
       keyValues.put(i, i);
     }
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", keyValues ));
 
     Wait.pause(2000);
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() ));
 
     for (int i = 0; i < 500; i++) {
       updateKeyValues.put(i, i + "_updated");
     }
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", updateKeyValues ));
 
-    vm4.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
+    vm1.invoke(() -> AsyncEventQueueTestBase.putGivenKeyValue(
         getTestMethodName() + "_PR", updateKeyValues ));
 
     // pause to ensure that events have been conflated.
     Wait.pause(2000);
-    vm4.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
+    vm1.invoke(() -> AsyncEventQueueTestBase.checkAsyncEventQueueSize(
         "ln", keyValues.size() + updateKeyValues.size() ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.resumeAsyncEventQueue( "ln" ));
 
+    vm1.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm2.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
+    vm3.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     vm4.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm5.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm6.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
-    vm7.invoke(() -> AsyncEventQueueTestBase.waitForAsyncQueueToGetEmpty( "ln" ));
     
+    int vm1size = (Integer)vm1.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm2size = (Integer)vm2.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
+    int vm3size = (Integer)vm3.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     int vm4size = (Integer)vm4.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm5size = (Integer)vm5.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm6size = (Integer)vm6.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
-    int vm7size = (Integer)vm7.invoke(() -> AsyncEventQueueTestBase.getAsyncEventListenerMapSize( "ln"));
     
-    assertEquals(vm4size + vm5size + vm6size + vm7size, keyValues.size());
+    assertEquals(vm1size + vm2size + vm3size + vm4size, keyValues.size());
     
   }
 
   public void testParallelAsyncEventQueueWithOneAccessor() {
     Integer lnPort = (Integer)vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId( 1 ));
 
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
     vm3.invoke(createCacheRunnable(lnPort));
     vm4.invoke(createCacheRunnable(lnPort));
-    vm5.invoke(createCacheRunnable(lnPort))

<TRUNCATED>


[48/63] [abbrv] incubator-geode git commit: GEODE-1326: fix compilation errors

Posted by kl...@apache.org.
GEODE-1326: fix compilation errors


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/29fde0dc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/29fde0dc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/29fde0dc

Branch: refs/heads/feature/GEODE-1276
Commit: 29fde0dcc8ff483312a6b95a1c3203485ec27edb
Parents: a3f308a
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Fri Apr 29 14:31:32 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Apr 29 14:31:32 2016 -0700

----------------------------------------------------------------------
 .../internal/cli/commands/FunctionCommandsDUnitTest.java         | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/29fde0dc/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
index 86c0273..bc92409 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
@@ -171,7 +171,7 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
 
   @Test
   public void testExecuteFunctionOnRegionWithCustomResultCollector() {
-    createDefaultSetup(null);
+    setUpJmxManagerOnVm0ThenConnect(null);
 
     final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
     Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
@@ -384,7 +384,7 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
     Properties localProps = new Properties();
     localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
     localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
-    createDefaultSetup(localProps);
+    setUpJmxManagerOnVm0ThenConnect(localProps);
     Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
     FunctionService.registerFunction(function);
 


[23/63] [abbrv] incubator-geode git commit: GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class

Posted by kl...@apache.org.
GEODE-1059: PRQueryDUnitHelper no longer inherits PartitionedRegionDUnitTestCase class

* PRQueryDUnitHelper class no longer inherits PartitionedRegionDUnitTestCase hence no more calls to its constructor.
* All the methods for creating PortfolioData elements were moved to Utils class.
* Removed the file PRQueryPerfDUnitTest.java

This closes #132


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/6fb84d96
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/6fb84d96
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/6fb84d96

Branch: refs/heads/feature/GEODE-1276
Commit: 6fb84d96294e50f3f4d1914a66d82dd1da4dceff
Parents: 6b4cdb1
Author: nabarun <nn...@pivotal.io>
Authored: Wed Apr 13 15:38:44 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 15:59:51 2016 -0700

----------------------------------------------------------------------
 .../com/gemstone/gemfire/cache/query/Utils.java |  38 +
 .../dunit/QueryDataInconsistencyDUnitTest.java  |   2 -
 .../QueryUsingFunctionContextDUnitTest.java     |   8 +-
 .../QueryREUpdateInProgressJUnitTest.java       |  12 +-
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |  27 +-
 ...ndexOperationsOnOverflowRegionDUnitTest.java |  97 +--
 ...pdateWithInplaceObjectModFalseDUnitTest.java |  46 +-
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |  48 +-
 ...itializeIndexEntryDestroyQueryDUnitTest.java |  96 +--
 .../PRBasicIndexCreationDUnitTest.java          | 302 +++----
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |  42 +-
 .../PRBasicMultiIndexCreationDUnitTest.java     | 267 ++----
 .../partitioned/PRBasicQueryDUnitTest.java      |  36 +-
 .../PRBasicRemoveIndexDUnitTest.java            |  37 +-
 .../PRColocatedEquiJoinDUnitTest.java           | 106 +--
 .../partitioned/PRInvalidQueryDUnitTest.java    |  26 +-
 .../partitioned/PRQueryCacheCloseDUnitTest.java |  81 +-
 .../query/partitioned/PRQueryDUnitHelper.java   | 818 +++----------------
 .../query/partitioned/PRQueryDUnitTest.java     |  87 +-
 .../query/partitioned/PRQueryPerfDUnitTest.java | 504 ------------
 .../PRQueryRegionCloseDUnitTest.java            |  28 +-
 .../PRQueryRegionDestroyedDUnitTest.java        |  36 +-
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |  48 +-
 .../gemfire/management/QueryDataDUnitTest.java  |   6 +-
 24 files changed, 805 insertions(+), 1993 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
index ddd3a16..a34d049 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/Utils.java
@@ -25,6 +25,12 @@ package com.gemstone.gemfire.cache.query;
 import java.util.Collection;
 import java.util.Iterator;
 
+import com.gemstone.gemfire.cache.query.data.Portfolio;
+import com.gemstone.gemfire.cache.query.data.PortfolioData;
+import com.gemstone.gemfire.cache.query.data.Position;
+
+import parReg.query.unittest.NewPortfolio;
+
 /**
  *
  */
@@ -53,4 +59,36 @@ public class Utils {
       sb.append(r);
     return sb.toString();
   }
+  /**
+   * This function <br>
+   * 1. The Creates an array of PortfolioData objects
+   *
+   * @return PortFolioData Objects
+   */
+
+  public static PortfolioData[] createPortfolioData(final int cnt, final int cntDest) {
+    PortfolioData[] portfolio = new PortfolioData[cntDest];
+    for (int k = cnt; k < cntDest; k++) {
+      portfolio[k] = new PortfolioData(k);
+    }
+    return portfolio;
+  }
+
+  public static Portfolio[] createPortfoliosAndPositions(int count) {
+    Position.cnt = 0; // reset Portfolio counter
+    Portfolio[] portfolios = new Portfolio[count];
+    for (int i = 0; i < count; i++) {
+      portfolios[i] = new Portfolio(i);
+    }
+    return portfolios;
+  }
+
+  public static NewPortfolio[] createNewPortfoliosAndPositions(int count) {
+    Position.cnt = 0; // reset Portfolio counter
+    NewPortfolio[] portfolios = new NewPortfolio[count];
+    for (int i = 0; i < count; i++) {
+      portfolios[i] = new NewPortfolio("" + i, i);
+    }
+    return portfolios;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
index c5f5140..475ad49 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
@@ -84,8 +84,6 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
   public static String[] queriesForRR = new String[] { "<trace> select * from /"
       + repRegionName + " where ID=1" };
 
-  private static PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   public static volatile boolean hooked = false;
   /**
    * @param name

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
index 08626de..1d60010 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.dunit;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfoliosAndPositions;
+
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -125,13 +127,11 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
   
   public static String[] queriesForRR = new String[]{"<trace> select * from /"+repRegionName+" where ID>=0"};
 
-  private static PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
   /**
    * @param name
    */
   public QueryUsingFunctionContextDUnitTest(String name) {
     super(name);
-
   }
 
   @Override
@@ -671,7 +671,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
 
   public void fillValuesInRegions() {
     //Create common Portflios and NewPortfolios
-    final Portfolio[] portfolio = PRQHelp.createPortfoliosAndPositions(cntDest);
+    final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
 
     //Fill local region
     server1.invoke(getCacheSerializableRunnableForPRPuts(localRegionName,
@@ -1015,7 +1015,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
           region.put(new Integer(j), portfolio[j]);
         LogWriterUtils.getLogWriter()
             .info(
-                "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
+                "getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
                     + regionName);
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
index e7681b5..9a48929 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/functional/QueryREUpdateInProgressJUnitTest.java
@@ -19,6 +19,7 @@
  */
 package com.gemstone.gemfire.cache.query.functional;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfoliosAndPositions;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -39,7 +40,6 @@ import com.gemstone.gemfire.cache.query.CacheUtils;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache.query.types.ObjectType;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion.NonTXEntry;
@@ -368,21 +368,21 @@ public class QueryREUpdateInProgressJUnitTest {
       if ((r[j][0] != null) && (r[j][1] != null)) {
         type1 = ((SelectResults) r[j][0]).getCollectionType().getElementType();
         assertNotNull(
-            "PRQueryDUnitHelper#compareTwoQueryResults: Type 1 is NULL "
+            "#compareTwoQueryResults: Type 1 is NULL "
                 + type1, type1);
         type2 = ((SelectResults) r[j][1]).getCollectionType().getElementType();
         assertNotNull(
-            "PRQueryDUnitHelper#compareTwoQueryResults: Type 2 is NULL "
+            "#compareTwoQueryResults: Type 2 is NULL "
                 + type2, type2);
         if ( !(type1.getClass().getName()).equals(type2.getClass().getName()) ) {
-          fail("PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search result Type is different in both the cases: " 
+          fail("#compareTwoQueryResults: FAILED:Search result Type is different in both the cases: "
               + type1.getClass().getName() + " "
               + type2.getClass().getName());
         }
         int size0 = ((SelectResults) r[j][0]).size();
         int size1 = ((SelectResults) r[j][1]).size();
         if (size0 != size1) {
-          fail("PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0="
+          fail("#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0="
               + size0 + ";size1=" + size1 + ";j=" + j);
         }
       }
@@ -401,7 +401,7 @@ public class QueryREUpdateInProgressJUnitTest {
 
   private void putREWithUpdateInProgressTrue(String region) {
     Region reg = CacheUtils.getRegion(region);
-    Portfolio[] values = new PRQueryDUnitHelper("").createPortfoliosAndPositions(numOfEntries);
+    Portfolio[] values = createPortfoliosAndPositions(numOfEntries);
 
     int i=0;
     for (Object val: values) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
index 19b1dfb..4dc2890 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
@@ -39,7 +39,6 @@ import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
@@ -58,8 +57,6 @@ import com.gemstone.gemfire.test.dunit.Wait;
  */
 public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase {
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   String name;
 
   final int redundancy = 0;
@@ -92,7 +89,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -140,10 +137,10 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i = 0; i < 100; i++) {
           r.put(i, new PortfolioData(i));
@@ -153,7 +150,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -165,7 +162,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         while (!hooked) {
           Wait.pause(100);
@@ -204,7 +201,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         
         Region partitionRegion = null;
         IndexManager.testHook = null;
@@ -305,7 +302,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         while (!hooked) {
           Wait.pause(100);
@@ -355,7 +352,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region region = cache.createRegionFactory(RegionShortcut.LOCAL).create(regionName);
         QueryService qService = cache.getQueryService();
         
@@ -391,7 +388,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       @Override
       public void run2() throws CacheException {
         
-        Region region = PRQHelp.getCache().getRegion(regionName);
+        Region region = getCache().getRegion(regionName);
         for (int i=0; i<100; i++) {
           if (i == 50) IndexManager.testHook = new LocalTestHook();
           region.put(i, new Portfolio(i));
@@ -405,7 +402,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       
       @Override
       public void run2() throws CacheException {
-        Region region = PRQHelp.getCache().getRegion(regionName);
+        Region region = getCache().getRegion(regionName);
         
         while(!hooked) {
           Wait.pause(100);
@@ -417,7 +414,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         }
 
         try {
-            QueryService qservice = PRQHelp.getCache().getQueryService();
+            QueryService qservice = getCache().getQueryService();
             Index index = qservice.getIndex(region, "idIndex");
             if (((CompactRangeIndex)index).getIndexStorage().size() > 1) {
               fail("After clear region size is supposed to be zero as all index updates are blocked. Current region size is: "+ region.size());
@@ -436,7 +433,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       
       @Override
       public void run2() throws CacheException {
-        Region region = PRQHelp.getCache().getRegion(regionName);
+        Region region = getCache().getRegion(regionName);
         if (region.size() > 50) {
           fail("After clear region size is supposed to be 50 as all index updates are blocked " + region.size());
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
index 25e4166..466483d 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
@@ -29,7 +29,6 @@ import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.EvictionAction;
 import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionFactory;
@@ -37,13 +36,9 @@ import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.Query;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
-import com.gemstone.gemfire.cache.query.internal.QueryObserverAdapter;
-import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
-import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegionQueryEvaluator.TestHook;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
@@ -64,16 +59,8 @@ import com.gemstone.gemfire.test.dunit.Wait;
 public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     CacheTestCase {
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
-
   String name;
 
-  final int redundancy = 0;
-
-  private int cnt=0;
-
-  private int cntDest=1;
-
   public static volatile boolean hooked = false;
 
   /**
@@ -95,7 +82,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -138,10 +125,10 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0; i<100; i++) {
           r.put(i, new PortfolioData(i));
@@ -151,7 +138,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -162,16 +149,16 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -197,7 +184,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -240,10 +227,10 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0; i<100; i++) {
           r.put(i, new PortfolioData(i));
@@ -253,7 +240,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -264,16 +251,16 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
 
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -300,7 +287,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -348,10 +335,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i = 0; i < 100; i++) {
           r.put(i, new PortfolioData(i));
@@ -361,7 +346,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -373,16 +358,14 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -409,7 +392,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         "Create local region with synchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         IndexManager.testHook = null;
         try {
@@ -457,10 +440,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i = 0; i < 100; i++) {
           r.put(i, new PortfolioData(i));
@@ -470,7 +451,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
         IndexManager.testHook = new IndexManagerTestHook();
 
         // Destroy one of the values.
-        PRQHelp.getCache().getLogger().fine("Destroying the value");
+        getCache().getLogger().fine("Destroying the value");
         r.destroy(1);
 
         IndexManager.testHook = null;
@@ -482,16 +463,14 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
-        Query statusQuery = PRQHelp.getCache().getQueryService()
+        Query statusQuery = getCache().getQueryService()
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
           Wait.pause(100);
         }
         try {
-          PRQHelp.getCache().getLogger().fine("Querying the region");
+          getCache().getLogger().fine("Querying the region");
           SelectResults results = (SelectResults)statusQuery.execute();
           assertEquals(100, results.size());
         } catch (Exception e) {
@@ -517,7 +496,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
        Region partitionRegion = null;
        IndexManager.testHook = null;
        try {
@@ -551,10 +530,10 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
 
        // Do a put in region.
-       Region r = PRQHelp.getCache().getRegion(name);
+       Region r = getCache().getRegion(name);
 
        for (int i=0; i<100; i++) {
          r.put(i, new PortfolioData(i));
@@ -564,7 +543,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
        IndexManager.testHook = new IndexManagerNoWaitTestHook();
 
        // Destroy one of the values.
-       PRQHelp.getCache().getLogger().fine("Destroying the value");
+       getCache().getLogger().fine("Destroying the value");
        r.destroy(1);
 
        IndexManager.testHook = null;
@@ -575,16 +554,16 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
 
-       Query statusQuery = PRQHelp.getCache().getQueryService()
+       Query statusQuery = getCache().getQueryService()
            .newQuery("select * from /" + name + " p where p.ID > -1");
 
        while (!hooked) {
          Wait.pause(10);
        }
        try {
-         PRQHelp.getCache().getLogger().fine("Querying the region");
+         getCache().getLogger().fine("Querying the region");
          SelectResults results = (SelectResults)statusQuery.execute();
          assertEquals(100, results.size());
        } catch (Exception e) {
@@ -610,7 +589,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
+       Cache cache = getCache();
        Region partitionRegion = null;
        IndexManager.testHook = null;
        try {
@@ -644,10 +623,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
-
        // Do a put in region.
-       Region r = PRQHelp.getCache().getRegion(name);
+       Region r = getCache().getRegion(name);
 
        for (int i=0; i<100; i++) {
          r.put(i, new PortfolioData(i));
@@ -657,7 +634,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
        IndexManager.testHook = new IndexManagerNoWaitTestHook();
 
        // Destroy one of the values.
-       PRQHelp.getCache().getLogger().fine("Destroying the value");
+       getCache().getLogger().fine("Destroying the value");
        r.destroy(1);
 
        IndexManager.testHook = null;
@@ -668,16 +645,14 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
 
      @Override
      public void run2() throws CacheException {
-       Cache cache = PRQHelp.getCache();
-
-       Query statusQuery = PRQHelp.getCache().getQueryService()
+       Query statusQuery = getCache().getQueryService()
            .newQuery("select * from /" + name + " p where p.ID > -1");
 
        while (!hooked) {
          Wait.pause(10);
        }
        try {
-         PRQHelp.getCache().getLogger().fine("Querying the region");
+         getCache().getLogger().fine("Querying the region");
          SelectResults results = (SelectResults)statusQuery.execute();
          assertEquals(100, results.size());
        } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
index 8034931..6a49628 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
@@ -25,6 +25,8 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.CacheExistsException;
+import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.IndexStatistics;
@@ -64,10 +66,10 @@ import com.gemstone.gemfire.test.junit.categories.FlakyTest;
  */
 public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends DistributedTestCase {
   
-  PRQueryDUnitHelper helper = new PRQueryDUnitHelper("ConcurrentIndexUpdateWithoutWLDUnitTest");
+  PRQueryDUnitHelper helper = new PRQueryDUnitHelper();
   private static String regionName = "Portfolios";
   private int redundancy = 1;
-  
+
   // CompactRangeIndex
   private String indexName = "idIndex";
   private String indexedExpression = "ID";
@@ -81,6 +83,31 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
 
   int stepSize = 10;
   private int totalDataSize = 50;
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> getAvailableCacheElseCreateCache());
+    }
+  }
+  private final void getAvailableCacheElseCreateCache() {
+    synchronized(ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.class) {
+      try {
+        Cache newCache = GemFireCacheImpl.getInstance();
+        if(null == newCache) {
+          System.setProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE", "true");
+          newCache = CacheFactory.create(getSystem());
+        }
+        PRQueryDUnitHelper.setCache(newCache);
+      } catch (CacheExistsException e) {
+        Assert.fail("the cache already exists", e); // TODO: remove error handling
+      } catch (RuntimeException ex) {
+        throw ex;
+      } catch (Exception ex) {
+        Assert.fail("Checked exception while initializing cache??", ex);
+      } finally {
+        System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
+      }
+    }
+  }
 
   /**
    * @param name
@@ -132,8 +159,8 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
     // Create a Local Region.
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
     
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, indexName, indexedExpression, fromClause, alias));
     
@@ -177,10 +204,9 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
   public void testRangeIndex() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
-    
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
+
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, rindexName, rindexedExpression, rfromClause, ralias));
     
     AsyncInvocation[] asyncInvs = new AsyncInvocation[2];
@@ -209,7 +235,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0,vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));
@@ -272,7 +298,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends Dis
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
index 28a8f77..7aa3307 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.CacheExistsException;
+import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.query.Index;
 import com.gemstone.gemfire.cache.query.IndexStatistics;
@@ -62,7 +64,7 @@ import com.gemstone.gemfire.test.dunit.ThreadUtils;
 public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     DistributedTestCase {
   
-  PRQueryDUnitHelper helper = new PRQueryDUnitHelper("ConcurrentIndexUpdateWithoutWLDUnitTest");
+  PRQueryDUnitHelper helper = new PRQueryDUnitHelper();
   private static String regionName = "Portfolios";
   private int redundancy = 1;
   
@@ -87,6 +89,31 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     super(name);
   }
 
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> getAvailableCacheElseCreateCache());
+    }
+  }
+  private final void getAvailableCacheElseCreateCache() {
+    synchronized(ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.class) {
+      try {
+        Cache newCache = GemFireCacheImpl.getInstance();
+        if(null == newCache) {
+          System.setProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE", "true");
+          newCache = CacheFactory.create(getSystem());
+        }
+        PRQueryDUnitHelper.setCache(newCache);
+      } catch (CacheExistsException e) {
+        Assert.fail("the cache already exists", e); // TODO: remove error handling
+      } catch (RuntimeException ex) {
+        throw ex;
+      } catch (Exception ex) {
+        Assert.fail("Checked exception while initializing cache??", ex);
+      } finally {
+        System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
+      }
+    }
+  }
   /**
    * Tear down a PartitionedRegionTestCase by cleaning up the existing cache
    * (mainly because we want to destroy any existing PartitionedRegions)
@@ -109,8 +136,8 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     // Create a Local Region.
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
     
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, indexName, indexedExpression, fromClause, alias));
     
@@ -138,8 +165,8 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     // Create a Local Region.
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(1);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
  
     ArrayList<String> names = new ArrayList<String>();
     names.add(indexName);
@@ -193,9 +220,8 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
   public void testRangeIndex() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName, Portfolio.class));
+    setCacheInVMs(vm0);
+    vm0.invoke(helper.getCacheSerializableRunnableForReplicatedRegionCreation(regionName));
     
     vm0.invoke(helper.getCacheSerializableRunnableForPRIndexCreate(regionName, rindexName, rindexedExpression, rfromClause, ralias));
     
@@ -225,7 +251,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));
@@ -287,7 +313,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));
@@ -348,7 +374,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);    
-
+    setCacheInVMs(vm0, vm1, vm2, vm3);
     vm0.invoke(helper.getCacheSerializableRunnableForPRAccessorCreate(regionName, redundancy, Portfolio.class));
     
     vm1.invoke(helper.getCacheSerializableRunnableForPRCreate(regionName, redundancy, Portfolio.class));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
index 0311f38..6e064f1 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.cache.query.internal.index;
 
+import static com.gemstone.gemfire.cache.query.Utils.createPortfolioData;
+
 import java.util.Arrays;
 
 import org.junit.experimental.categories.Category;
@@ -52,7 +54,7 @@ import com.gemstone.gemfire.test.junit.categories.FlakyTest;
  */
 public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   String name;
 
@@ -71,16 +73,21 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
   public InitializeIndexEntryDestroyQueryDUnitTest(String name) {
     super(name);
   }
-
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
   public void testAsyncIndexInitDuringEntryDestroyAndQuery() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
+    setCacheInVMs(vm0);
     name = "PartionedPortfolios";
     //Create Local Region
     vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region localRegion = null;
         try {
           AttributesFactory attr = new AttributesFactory();
@@ -100,7 +107,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
     });
 
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -110,11 +117,9 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
 
-        Region r = PRQHelp.getCache().getRegion(name);
-
         for (int i=0; i<cntDest; i++) {
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
+          Cache cache = getCache();
           Index index = null;
           try {
             index = cache.getQueryService().createIndex("statusIndex", "p.status", "/"+name+" p");
@@ -126,7 +131,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
           Wait.pause(100);
 
-          PRQHelp.getCache().getQueryService().removeIndex(index);
+          getCache().getQueryService().removeIndex(index);
 
           Wait.pause(100);
         }
@@ -138,22 +143,20 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0, j=0; i<1000; i++,j++) {
 
           PortfolioData p = (PortfolioData)r.get(j);
 
-          PRQHelp.getCache().getLogger().fine("Going to destroy the value" + p);
+          getCache().getLogger().fine("Going to destroy the value" + p);
           r.destroy(j);
 
           Wait.pause(100);
 
           //Put the value back again.
-          PRQHelp.getCache().getLogger().fine("Putting the value back" + p);
+          getCache().getLogger().fine("Putting the value back" + p);
           r.put(j, p);
 
           //Reset j
@@ -168,12 +171,10 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
-        Query query = PRQHelp.getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
+        Query query = getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
 
         //Now run the query
         SelectResults results = null;
@@ -182,7 +183,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
         for (int i=0; i<500; i++) {
 
           try {
-            PRQHelp.getCache().getLogger().fine("Querying the region");
+            getCache().getLogger().fine("Querying the region");
             results = (SelectResults)query.execute();
           } catch (Exception e) {
             e.printStackTrace();
@@ -212,13 +213,13 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
   public void testAsyncIndexInitDuringEntryDestroyAndQueryOnPR() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     name = "PartionedPortfoliosPR";
     //Create Local Region
     vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         try {
           AttributesFactory attr = new AttributesFactory();
@@ -237,7 +238,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
     });
 
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -246,12 +247,9 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-
-        Region r = PRQHelp.getCache().getRegion(name);
-
         for (int i=0; i<cntDest; i++) {
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
+          Cache cache = getCache();
           Index index = null;
           try {
             index = cache.getQueryService().createIndex("statusIndex", "p.status", "/"+name+" p");
@@ -261,11 +259,8 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           }
           assertNotNull(index);
 
-          //pause(100);
+          getCache().getQueryService().removeIndex(index);
 
-          PRQHelp.getCache().getQueryService().removeIndex(index);
-
-          //pause(100);
         }
       }
     });
@@ -275,22 +270,20 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
         for (int i=0, j=0; i<1000; i++,j++) {
 
           PortfolioData p = (PortfolioData)r.get(j);
 
-          PRQHelp.getCache().getLogger().fine("Going to destroy the value" + p);
+          getCache().getLogger().fine("Going to destroy the value" + p);
           r.destroy(j);
 
           Wait.pause(20);
 
           //Put the value back again.
-          PRQHelp.getCache().getLogger().fine("Putting the value back" + p);
+          getCache().getLogger().fine("Putting the value back" + p);
           r.put(j, p);
 
           //Reset j
@@ -305,12 +298,8 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
-
-        Query query = PRQHelp.getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
+        Query query = getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active'");
 
         //Now run the query
         SelectResults results = null;
@@ -319,7 +308,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
         for (int i=0; i<500; i++) {
 
           try {
-            PRQHelp.getCache().getLogger().fine("Querying the region");
+            getCache().getLogger().fine("Querying the region");
             results = (SelectResults)query.execute();
           } catch (Exception e) {
             e.printStackTrace(); // TODO: eats exceptions
@@ -348,13 +337,13 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
   public void testConcurrentRemoveIndexAndQueryOnPR() {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
-
+    setCacheInVMs(vm0);
     name = "PartionedPortfoliosPR";
     //Create Local Region
     vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
+        Cache cache = getCache();
         Region partitionRegion = null;
         try {
           AttributesFactory attr = new AttributesFactory();
@@ -374,7 +363,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
     });
 
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
 
@@ -383,10 +372,8 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
 
-        Region r = PRQHelp.getCache().getRegion(name);
-
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
+          Cache cache = getCache();
           Index sindex = null;
           Index iindex = null;
           Index pkindex = null;
@@ -408,19 +395,15 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
 
       @Override
       public void run2() throws CacheException {
-        Cache cache = PRQHelp.getCache();
-
         // Do a put in region.
-        Region r = PRQHelp.getCache().getRegion(name);
-
-        Query query = PRQHelp.getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active' and p.ID > 0 and p.pk != ' ' ");
+        Query query = getCache().getQueryService().newQuery("select * from /"+name+" p where p.status = 'active' and p.ID > 0 and p.pk != ' ' ");
         //Now run the query
         SelectResults results = null;
 
         for (int i=0; i<10; i++) {
 
           try {
-            PRQHelp.getCache().getLogger().fine("Querying the region with " + query);
+            getCache().getLogger().fine("Querying the region with " + query);
             results = (SelectResults)query.execute();
           } catch (Exception e) {
             Assert.fail("Query: " + query + " execution failed with exception", e);
@@ -440,15 +423,12 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
 
-        Region r = PRQHelp.getCache().getRegion(name);
+        Region r = getCache().getRegion(name);
 
           //Create Index first to go in hook.
-          Cache cache = PRQHelp.getCache();
-       
-          PRQHelp.getCache().getQueryService().removeIndexes(r);
+          getCache().getQueryService().removeIndexes(r);
+
 
-          //pause(100);
-        
       }
     });
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6fb84d96/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
index 3ce1952..2cf8c3c 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
@@ -16,18 +16,24 @@
  */
 package com.gemstone.gemfire.cache.query.partitioned;
 
+import static com.gemstone.gemfire.cache.query.Utils.*;
+
+
 import java.util.Collection;
 
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.query.Index;
+import com.gemstone.gemfire.cache.query.IndexNameConflictException;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.ThreadUtils;
@@ -50,11 +56,23 @@ public class PRBasicIndexCreationDUnitTest extends
     super(name);
   }
 
+  public void setCacheInVMsUsingXML(String xmlFile, VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> GemFireCacheImpl.testCacheXml = PRQHelp.findFile(xmlFile));
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
+
+  public void setCacheInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> PRQueryDUnitHelper.setCache(getCache()));
+    }
+  }
   // int totalNumBuckets = 131;
 
   int queryTestCycle = 10;
 
-  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper("");
+  PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
 
   final String name = "PartionedPortfolios";
 
@@ -76,27 +94,23 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate started ....");
-
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
-    // Creating local region on vm0 to compare the results of query.
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
-    // Scope.DISTRIBUTED_ACK, redundancy));
+        redundancy, PortfolioData.class));
 
     // Creating the Datastores Nodes in the VM1.
     LogWriterUtils.getLogWriter()
         .info("PRBasicIndexCreationDUnitTest : creating all the prs ");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -126,21 +140,20 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
 
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
-
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
-
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -168,21 +181,21 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
 
     vm1.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp
-        .getCacheSerializableRunnableForPRCreate(name, redundancy));
+        .getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
 
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -241,14 +254,13 @@ public class PRBasicIndexCreationDUnitTest extends
    */
   public void testCreatePartitionedIndexThroughXML() throws Exception
   {
-
+    IgnoredException ie = IgnoredException.addIgnoredException(IndexNameConflictException.class.getName());
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
-//    VM vm3 = host.getVM(3);
-    // closeAllCache();
+    closeCache();
     final String fileName = "PRIndexCreation.xml";
+    setCacheInVMsUsingXML(fileName, vm0, vm1);
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
     LogWriterUtils.getLogWriter().info(
@@ -256,9 +268,10 @@ public class PRBasicIndexCreationDUnitTest extends
     LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
     AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
-        .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
+        .getCacheSerializableRunnableForPRCreate(name));
     AsyncInvocation asyInvk1 = vm1.invokeAsync(PRQHelp
-        .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
+        .getCacheSerializableRunnableForPRCreate(name));
+
     ThreadUtils.join(asyInvk1, 30 * 1000);
     if (asyInvk1.exceptionOccurred()) {
       Assert.fail("asyInvk1 failed", asyInvk1.getException());
@@ -267,7 +280,9 @@ public class PRBasicIndexCreationDUnitTest extends
     if (asyInvk0.exceptionOccurred()) {
       Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
-    // printing all the indexes are created.
+
+    ie.remove();
+//    // printing all the indexes are created.
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     LogWriterUtils.getLogWriter().info(
@@ -288,20 +303,18 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
 
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedRegionThroughXMLAndAPI started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnStatus", "p.status",null, "p"));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
@@ -311,9 +324,9 @@ public class PRBasicIndexCreationDUnitTest extends
         "PrIndexOnPKID", "p.pkid",null, "p"));
 //  adding a new node to an already existing system.
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -334,40 +347,27 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasAfterPuts started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnStatus", "status",null, ""));
-    //vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    //    "PrIndexOnId", "p.ID", "p"));
-
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    //    "PrIndexOnPKID", "p.pkid", "p"));
-//  adding a new node to an already existing system.
-    //vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-    //    Scope.DISTRIBUTED_ACK, redundancy));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-  //  vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
   }
   
@@ -380,74 +380,28 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasBeforePuts started ");
     // creating all the prs
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-
-    // vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    // "PrIndexOnId", "p.ID", "p"));
+        redundancy, PortfolioData.class));
 
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
-    // "PrIndexOnPKID", "p.pkid", "p"));
-    // adding a new node to an already existing system.
-    // vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-    // Scope.DISTRIBUTED_ACK, redundancy));
     // putting some data in.
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     // Putting the data into the PR's created
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnStatus", "status",null, ""));
-    /*
-    vm1.invoke(new CacheSerializableRunnable("IndexCreationOnPosition") {
-      public void run2(){
-        try {
-          Cache cache = getCache();
-          QueryService qs = cache.getQueryService();
-          Region region = cache.getRegion(name);
-          LogWriter logger = cache.getLogger();
-         // logger.info("Test Creating index with Name : [ "+indexName+" ] " +
-         //               "IndexedExpression : [ "+indexedExpression+" ] Alias : [ "+alias+" ] FromClause : [ "+region.getFullPath() + " " + alias+" ] " );
-          Index parIndex = qs.createIndex("IndexOnPotionMktValue", IndexType.FUNCTIONAL, "pVal.mktValue"
-              ,region.getFullPath()+" pf, pf.positions pVal TYPE Position", "import parReg.\"query\".Position;");
-          logger.info(
-              "Index creted on partitioned region : " + parIndex);
-          logger.info(
-              "Number of buckets indexed in the partitioned region locally : "
-                  + "" + ((PartitionedIndex)parIndex).getNumberOfIndexedBucket()
-                  + " and remote buckets indexed : "
-                  + ((PartitionedIndex)parIndex).getNumRemoteBucketsIndexed());
-          /*
-           * assertEquals("Max num of buckets in the partiotion regions and
-           * the " + "buckets indexed should be equal",
-           * ((PartitionedRegion)region).getTotalNumberOfBuckets(),
-           * (((PartionedIndex)parIndex).getNumberOfIndexedBucket()+((PartionedIndex)parIndex).getNumRemtoeBucketsIndexed()));
-           * should put all the assetion in a seperate function.
-           */ 
-       /* } 
-        catch (Exception ex) {
-          fail("Creating Index in this vm failed : ", ex);
-        }
-      
-      }
-    });*/
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    // vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
   } 
   
@@ -461,38 +415,35 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnId", "p.ID",null, "p"));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
     // validation on index usage with queries over a pr
-    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
+    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
+    vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck());
     LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery done ");
@@ -507,32 +458,28 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
+    setCacheInVMs(vm0,vm1);
 
-    // final String fileName = "PRIndexCreation.xml";
-    // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
-    // fileName));
-    
     int redundancy = 1;
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
-//    vm2.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-//        redundancy, PRQHelp.valueConstraint));
-    
+        redundancy, PortfolioData.class));
+
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     
     
     //Restart a single member
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+    setCacheInVMs(vm0);
     AsyncInvocation regionCreateFuture = vm0.invokeAsync(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
-        redundancy, PRQHelp.valueConstraint));
+        redundancy, PortfolioData.class));
     
     //Ok, I want to do this in parallel
     AsyncInvocation indexCreateFuture = vm1.invokeAsync(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
@@ -543,15 +490,11 @@ public class PRBasicIndexCreationDUnitTest extends
     indexCreateFuture.getResult(20 * 1000);
     
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName,PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
-    // validation on index usage with queries over a pr
-    //The indexes may not have been completely created yet, because the buckets
-    //may still be recovering from disk.
-//    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery done ");
@@ -569,26 +512,26 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnId", "p.ID",null, "p"));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     vm0.invoke(PRQHelp
-        .getCacheSerializableRunnableForLocalRegionCreation(localName));
+        .getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
@@ -608,9 +551,7 @@ public class PRBasicIndexCreationDUnitTest extends
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-//    VM vm2 = host.getVM(2);
-//    VM vm3 = host.getVM(3);
-    // closeAllCache();
+    setCacheInVMs(vm0,vm1);
     final String fileName = "PRIndexCreation.xml";
     LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
@@ -618,35 +559,12 @@ public class PRBasicIndexCreationDUnitTest extends
         "Starting and initializing partitioned regions and indexes using xml");
     LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
-   // AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
-   //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-   // AsyncInvocation asyInvk1 = vm1.invokeAsync(PRQHelp
-   //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-   // asyInvk1.join();
-   // if (asyInvk1.exceptionOccurred()) {
-   //   fail("asyInvk1 failed", asyInvk1.getException());
-   // }
-   // asyInvk0.join();
-   // if (asyInvk0.exceptionOccurred()) {
-    //  fail("asyInvk0 failed", asyInvk0.getException());
-   // }
-    // printing all the indexes are created.
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    //vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    /*
-    <index name="index8">
-    <functional from-clause="/PartionedPortfolios.keys k" expression="k" />
-  </index> */
-  //  vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-    
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
-//    vm0.invoke(PRQHelp
-//        .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+        redundancy, PortfolioData.class));
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
@@ -656,8 +574,6 @@ public class PRBasicIndexCreationDUnitTest extends
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "index7","nvl(k.status.toString(),'nopes')", "/PartionedPortfolios.values k" , ""));
     
-    //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(localName,
-    //    "index8","k", "/LocalPortfolios.keys k" , ""));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
     
@@ -677,18 +593,19 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     // create more vms to host data.
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     //  Putting the data into the PR's created
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
      cnt, cntDest));
@@ -717,14 +634,15 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
+    setCacheInVMs(vm0,vm1,vm2,vm3);
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
-    final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
+    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
     //  Putting the data into the PR's created
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
      cnt, cntDest));
@@ -734,7 +652,7 @@ public class PRBasicIndexCreationDUnitTest extends
     
     // create an accessor vm.
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
-        redundancy));
+        redundancy, PortfolioData.class));
     
     
   }
@@ -766,7 +684,7 @@ public class PRBasicIndexCreationDUnitTest extends
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -796,7 +714,7 @@ public class PRBasicIndexCreationDUnitTest extends
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -864,7 +782,7 @@ public class PRBasicIndexCreationDUnitTest extends
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
    .info(
@@ -894,7 +812,7 @@ public class PRBasicIndexCreationDUnitTest extends
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
@@ -952,7 +870,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
-
+   setCacheInVMs(vm0,vm1,vm2,vm3);
    // Creating PR's on the participating VM's
    LogWriterUtils.getLogWriter()
      .info(
@@ -982,7 +900,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
 
-   final Portfolio[] portfoliosAndPositions = PRQHelp.createPortfoliosAndPositions(totalDataSize);
+   final Portfolio[] portfoliosAndPositions = createPortfoliosAndPositions(totalDataSize);
 
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,


[59/63] [abbrv] incubator-geode git commit: GEODE-1329 auto-reconnect attempts cease if kicked out during boot-up of the cache

Posted by kl...@apache.org.
GEODE-1329 auto-reconnect attempts cease if kicked out during boot-up of the cache

This is a follow-up to the fix for GEODE-1329 that removes the old
reconnectCancelledLock variable and makes reconnectCancelled volatile.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/4a6c779d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/4a6c779d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/4a6c779d

Branch: refs/heads/feature/GEODE-1276
Commit: 4a6c779d386f818306062bc1a84276858592384c
Parents: b8fc3c7
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Tue May 3 13:57:47 2016 -0700
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Tue May 3 14:00:24 2016 -0700

----------------------------------------------------------------------
 .../internal/InternalDistributedSystem.java     | 21 ++++++--------------
 1 file changed, 6 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4a6c779d/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
index df85417..91fa558 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
@@ -1445,9 +1445,7 @@ public class InternalDistributedSystem
    * the attempt has been cancelled.
    */
   public boolean isReconnectCancelled() {
-    synchronized(reconnectCancelledLock) {
-      return reconnectCancelled;
-    }
+    return reconnectCancelled;
   }
 
   /**
@@ -2476,17 +2474,14 @@ public class InternalDistributedSystem
   /**
    * If true then this DS will never reconnect.
    */
-  private boolean reconnectCancelled = false;
-  private Object reconnectCancelledLock = new Object();
+  private volatile boolean reconnectCancelled = false;
 
   /** Make sure this instance of DS never does a reconnect.
    * Also if reconnect is in progress cancel it.
    */
   public void cancelReconnect() {
 //    (new ManagerLogWriter(LogWriterImpl.FINE_LEVEL, System.out)).fine("cancelReconnect invoked", new Exception("stack trace"));
-    synchronized(this.reconnectCancelledLock) {
-      this.reconnectCancelled = true;
-    }
+    this.reconnectCancelled = true;
     if (isReconnecting()) {
       synchronized (this.reconnectLock) { // should the synchronized be first on this and
     	  // then on this.reconnectLock.
@@ -3024,10 +3019,8 @@ public class InternalDistributedSystem
       InternalDistributedSystem recon = this.reconnectDS;
 
       while (isReconnecting()) {
-        synchronized(this.reconnectCancelledLock) {
-          if (this.reconnectCancelled) {
-            break;
-          }
+        if (this.reconnectCancelled) {
+          break;
         }
         if (time != 0) {
           this.reconnectLock.wait(sleepTime);
@@ -3050,9 +3043,7 @@ public class InternalDistributedSystem
   @Override
   public void stopReconnecting() {
 //    (new ManagerLogWriter(LogWriterImpl.FINE_LEVEL, System.out)).fine("stopReconnecting invoked", new Exception("stack trace"));
-    synchronized(this.reconnectCancelledLock) {
-      this.reconnectCancelled = true;
-    }
+    this.reconnectCancelled = true;
     synchronized(this.reconnectLock) {
       this.reconnectLock.notify();
     }