You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by up...@apache.org on 2016/04/27 22:49:47 UTC

[01/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-10 [created] 9f3f10fd2


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
index b427ed3..222e63d 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
@@ -51,7 +51,7 @@ com/gemstone/gemfire/admin/jmx/internal/GemFireHealthConfigJmxImpl,true,14827196
 com/gemstone/gemfire/admin/jmx/internal/ManagedResourceType,true,3752874768667480449,ordinal:int
 com/gemstone/gemfire/admin/jmx/internal/RefreshNotificationType,true,4376763592395613794,ordinal:int
 com/gemstone/gemfire/admin/jmx/internal/StatisticAttributeInfo,true,28022387514935560,stat:com/gemstone/gemfire/admin/Statistic
-com/gemstone/gemfire/cache/AttributesFactory$RegionAttributesImpl,true,-3663000883567530374,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskSynchronous:boolean,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,entryTimeToLive:int,entryTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,evictionAttr
 ibutes:com/gemstone/gemfire/internal/cache/EvictionAttributesImpl,gatewaySenderIds:java/util/Set,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isBucketRegion:boolean,isCloningEnabled:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,regionIdleTimeout:int,regionIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,regionTimeToLive:int,regionTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
+com/gemstone/gemfire/cache/AttributesFactory$RegionAttributesImpl,true,-3663000883567530374,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskSynchronous:boolean,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,entryTimeToLive:int,entryTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,evictionAttr
 ibutes:com/gemstone/gemfire/internal/cache/EvictionAttributesImpl,gatewaySenderIds:java/util/Set,hdfsStoreName:java/lang/String,hdfsWriteOnly:boolean,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isBucketRegion:boolean,isCloningEnabled:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,regionIdleTimeout:int,regionIdleTimeoutExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,regionTimeToLive:int,regionTimeToLiveExpirationAction:com/gemstone/gemfire/cache/ExpirationAction,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
 com/gemstone/gemfire/cache/CacheClosedException,true,-6479561694497811262
 com/gemstone/gemfire/cache/CacheException,false
 com/gemstone/gemfire/cache/CacheExistsException,true,4090002289325418100
@@ -128,6 +128,18 @@ com/gemstone/gemfire/cache/execute/EmtpyRegionFunctionException,true,1
 com/gemstone/gemfire/cache/execute/FunctionAdapter,false
 com/gemstone/gemfire/cache/execute/FunctionException,true,4893171227542647452
 com/gemstone/gemfire/cache/execute/FunctionInvocationTargetException,true,1,id:com/gemstone/gemfire/distributed/DistributedMember
+com/gemstone/gemfire/cache/hdfs/HDFSIOException,false
+com/gemstone/gemfire/cache/hdfs/StoreExistsException,true,1
+com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder,false,autoMajorCompact:boolean,batchIntervalMillis:int,batchSize:int,blockCacheSize:float,clientConfigFile:java/lang/String,diskStoreName:java/lang/String,diskSynchronous:boolean,dispatcherThreads:int,fileRolloverInterval:int,homeDir:java/lang/String,isAutoCompact:boolean,isPersistenceEnabled:boolean,logPrefix:java/lang/String,majorCompactionConcurrency:int,majorCompactionIntervalMins:int,maxConcurrency:int,maxFileSize:int,maxInputFileCount:int,maxInputFileSizeMB:int,maximumQueueMemory:int,minInputFileCount:int,name:java/lang/String,namenodeURL:java/lang/String,oldFileCleanupIntervalMins:int
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager$CompactionIsDisabled,true,1
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction,false
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction,false
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction,false
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController$1,true,1,this$1:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController,val$this$0:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController$2,true,1,this$1:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer$HoplogReadersController,val$this$0:com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog$HoplogVersion,false
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog$Meta,false
+com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile$CompressionType,false
 com/gemstone/gemfire/cache/partition/PartitionNotAvailableException,true,1
 com/gemstone/gemfire/cache/persistence/ConflictingPersistentDataException,true,-2629287782021455875
 com/gemstone/gemfire/cache/persistence/PartitionOfflineException,true,-6471045959318795870,offlineMembers:java/util/Set
@@ -293,12 +305,13 @@ com/gemstone/gemfire/internal/cache/Oplog$OkToSkipResult,false
 com/gemstone/gemfire/internal/cache/Oplog$OplogFileType,false
 com/gemstone/gemfire/internal/cache/PRContainsValueFunction,false
 com/gemstone/gemfire/internal/cache/PRHARedundancyProvider$ArrayListWithClearState,true,1,wasCleared:boolean
-com/gemstone/gemfire/internal/cache/PartitionedRegion$7,true,0,this$0:com/gemstone/gemfire/internal/cache/PartitionedRegion,val$bucketId:int
+com/gemstone/gemfire/internal/cache/PartitionedRegion$8,true,0,this$0:com/gemstone/gemfire/internal/cache/PartitionedRegion,val$bucketId:int
 com/gemstone/gemfire/internal/cache/PartitionedRegion$PRIdMap,true,3667357372967498179,cleared:boolean
 com/gemstone/gemfire/internal/cache/PartitionedRegion$SizeEntry,false,isPrimary:boolean,size:int
 com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore$CreateBucketResult,false,nowExists:boolean
 com/gemstone/gemfire/internal/cache/PartitionedRegionException,true,5113786059279106007
 com/gemstone/gemfire/internal/cache/PartitionedRegionQueryEvaluator$MemberResultsList,false,isLastChunkReceived:boolean
+com/gemstone/gemfire/internal/cache/PartitionedRegionQueryEvaluator$TaintableArrayList,false,isPoison:boolean
 com/gemstone/gemfire/internal/cache/PartitionedRegionStatus,true,-6755318987122602065,numberOfLocalEntries:int
 com/gemstone/gemfire/internal/cache/PrimaryBucketException,true,1
 com/gemstone/gemfire/internal/cache/PutAllPartialResultException,false,result:com/gemstone/gemfire/internal/cache/PutAllPartialResultException$PutAllPartialResult
@@ -322,7 +335,10 @@ com/gemstone/gemfire/internal/cache/execute/InternalFunctionException,false
 com/gemstone/gemfire/internal/cache/execute/InternalFunctionInvocationTargetException,false,failedIds:java/util/Set
 com/gemstone/gemfire/internal/cache/execute/MemberMappedArgument,true,-6465867775653599576,defaultArgument:java/lang/Object,memberToArgMap:java/util/Map
 com/gemstone/gemfire/internal/cache/execute/NoResult,true,-4901369422864228848
+com/gemstone/gemfire/internal/cache/execute/util/CommitFunction,true,7851518767859544501
 com/gemstone/gemfire/internal/cache/execute/util/FindRestEnabledServersFunction,true,7851518767859544678
+com/gemstone/gemfire/internal/cache/execute/util/NestedTransactionFunction,true,1400965724856341543
+com/gemstone/gemfire/internal/cache/execute/util/RollbackFunction,true,1377183180063184795
 com/gemstone/gemfire/internal/cache/ha/ThreadIdentifier$Bits,false,position:int,width:int
 com/gemstone/gemfire/internal/cache/ha/ThreadIdentifier$WanType,false
 com/gemstone/gemfire/internal/cache/lru/HeapLRUCapacityController,true,4970685814429530675,perEntryOverhead:int,sizer:com/gemstone/gemfire/cache/util/ObjectSizer
@@ -338,6 +354,7 @@ com/gemstone/gemfire/internal/cache/partitioned/RedundancyAlreadyMetException,fa
 com/gemstone/gemfire/internal/cache/partitioned/rebalance/PartitionedRegionLoadModel$RefusalReason,false
 com/gemstone/gemfire/internal/cache/persistence/OplogType,false,prefix:java/lang/String
 com/gemstone/gemfire/internal/cache/persistence/PersistentMemberState,false
+com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader$Metadata,false
 com/gemstone/gemfire/internal/cache/snapshot/ClientExporter$ClientArgs,true,1,options:com/gemstone/gemfire/cache/snapshot/SnapshotOptions,prSingleHop:boolean,region:java/lang/String
 com/gemstone/gemfire/internal/cache/snapshot/ClientExporter$ProxyExportFunction,true,1
 com/gemstone/gemfire/internal/cache/snapshot/RegionSnapshotServiceImpl$1,true,1
@@ -363,7 +380,7 @@ com/gemstone/gemfire/internal/cache/wan/GatewaySenderException,true,809014315356
 com/gemstone/gemfire/internal/cache/wan/parallel/BucketRegionQueueUnavailableException,false
 com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlVersion,false,namespace:java/lang/String,publicId:java/lang/String,schemaLocation:java/lang/String,systemId:java/lang/String,version:java/lang/String
 com/gemstone/gemfire/internal/cache/xmlcache/DiskStoreAttributesCreation,false
-com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation,true,2241078661206355376,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,cloningEnabled:boolean,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,entryTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,evictionAttributes:com/gemstone/gemfire/internal/cache/EvictionAttributesIm
 pl,gatewaySenderIds:java/util/Set,id:java/lang/String,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isDiskSynchronous:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,refid:java/lang/String,regionIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,regionTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
+com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation,true,2241078661206355376,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/ArrayList,cacheLoader:com/gemstone/gemfire/cache/CacheLoader,cacheWriter:com/gemstone/gemfire/cache/CacheWriter,cloningEnabled:boolean,compressor:com/gemstone/gemfire/compression/Compressor,concurrencyChecksEnabled:boolean,concurrencyLevel:int,customEntryIdleTimeout:com/gemstone/gemfire/cache/CustomExpiry,customEntryTimeToLive:com/gemstone/gemfire/cache/CustomExpiry,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskDirs:java/io/File[],diskSizes:int[],diskStoreName:java/lang/String,diskWriteAttributes:com/gemstone/gemfire/cache/DiskWriteAttributes,earlyAck:boolean,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,entryTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,evictionAttributes:com/gemstone/gemfire/internal/cache/EvictionAttributesIm
 pl,gatewaySenderIds:java/util/Set,hdfsStoreName:java/lang/String,hdfsWriteOnly:boolean,id:java/lang/String,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isDiskSynchronous:boolean,isLockGrantor:boolean,keyConstraint:java/lang/Class,loadFactor:float,membershipAttributes:com/gemstone/gemfire/cache/MembershipAttributes,multicastEnabled:boolean,offHeap:boolean,partitionAttributes:com/gemstone/gemfire/cache/PartitionAttributes,poolName:java/lang/String,publisher:boolean,refid:java/lang/String,regionIdleTimeout:com/gemstone/gemfire/cache/ExpirationAttributes,regionTimeToLive:com/gemstone/gemfire/cache/ExpirationAttributes,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean,subscriptionAttributes:com/gemstone/gemfire/cache/SubscriptionAttributes,valueConstraint:java/lang/Class
 com/gemstone/gemfire/internal/concurrent/AtomicLong5,true,-1915700199064062938
 com/gemstone/gemfire/internal/concurrent/CompactConcurrentHashSet2,true,7249069246763182397
 com/gemstone/gemfire/internal/concurrent/CompactConcurrentHashSet2$Segment,true,2249069246763182397,loadFactor:float
@@ -656,7 +673,7 @@ com/gemstone/gemfire/management/internal/cli/domain/MemberConfigurationInfo,fals
 com/gemstone/gemfire/management/internal/cli/domain/MemberInformation,true,1,cacheServerList:java/util/List,cacheXmlFilePath:java/lang/String,clientCount:int,cpuUsage:double,groups:java/lang/String,heapUsage:java/lang/String,host:java/lang/String,hostedRegions:java/util/Set,id:java/lang/String,initHeapSize:java/lang/String,isServer:boolean,locatorBindAddress:java/lang/String,locatorPort:int,locators:java/lang/String,logFilePath:java/lang/String,maxHeapSize:java/lang/String,name:java/lang/String,offHeapMemorySize:java/lang/String,processId:java/lang/String,serverBindAddress:java/lang/String,statArchiveFilePath:java/lang/String,workingDirPath:java/lang/String
 com/gemstone/gemfire/management/internal/cli/domain/MemberResult,true,1,errorMessage:java/lang/String,exceptionMessage:java/lang/String,isSuccessful:boolean,memberNameOrId:java/lang/String,opPossible:boolean,successMessage:java/lang/String
 com/gemstone/gemfire/management/internal/cli/domain/PartitionAttributesInfo,true,1,colocatedWith:java/lang/String,fpaInfoList:java/util/List,localMaxMemory:int,nonDefaultAttributes:java/util/Map,partitionResolverName:java/lang/String,recoveryDelay:long,redundantCopies:int,startupRecoveryDelay:long,totalNumBuckets:int
-com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo,true,1,cacheListenerClassNames:java/util/List,cacheLoaderClassName:java/lang/String,cacheWriterClassName:java/lang/String,cloningEnabled:boolean,compressorClassName:java/lang/String,concurrencyChecksEnabled:boolean,concurrencyLevel:int,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskStoreName:java/lang/String,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutAction:java/lang/String,entryTimeToLive:int,entryTimeToLiveAction:java/lang/String,evictionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/EvictionAttributesInfo,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isLockGrantor:boolean,loadFactor:float,multicastEnabled:boolean,nonDefaultAttributes:java/util/Map,offHeap:boolean,partitionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/PartitionAttributesInfo,poolName:java/lang/String,regionId
 leTimeout:int,regionIdleTimeoutAction:java/lang/String,regionTimeToLive:int,regionTimeToLiveAction:java/lang/String,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean
+com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo,true,1,cacheListenerClassNames:java/util/List,cacheLoaderClassName:java/lang/String,cacheWriterClassName:java/lang/String,cloningEnabled:boolean,compressorClassName:java/lang/String,concurrencyChecksEnabled:boolean,concurrencyLevel:int,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,diskStoreName:java/lang/String,enableAsyncConflation:boolean,enableSubscriptionConflation:boolean,entryIdleTimeout:int,entryIdleTimeoutAction:java/lang/String,entryTimeToLive:int,entryTimeToLiveAction:java/lang/String,evictionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/EvictionAttributesInfo,hdfsStoreName:java/lang/String,hdfsWriteOnly:java/lang/Boolean,ignoreJTA:boolean,indexMaintenanceSynchronous:boolean,initialCapacity:int,isLockGrantor:boolean,loadFactor:float,multicastEnabled:boolean,nonDefaultAttributes:java/util/Map,offHeap:boolean,partitionAttributesInfo:com/gemstone/gemfire/management/internal/cli/do
 main/PartitionAttributesInfo,poolName:java/lang/String,regionIdleTimeout:int,regionIdleTimeoutAction:java/lang/String,regionTimeToLive:int,regionTimeToLiveAction:java/lang/String,scope:com/gemstone/gemfire/cache/Scope,statisticsEnabled:boolean
 com/gemstone/gemfire/management/internal/cli/domain/RegionDescription,true,1,cndEvictionAttributes:java/util/Map,cndPartitionAttributes:java/util/Map,cndRegionAttributes:java/util/Map,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,haslocalDataStorage:boolean,isAccessor:boolean,isLocal:boolean,isPartition:boolean,isPersistent:boolean,isReplicate:boolean,isReplicatedProxy:boolean,name:java/lang/String,regionDescPerMemberMap:java/util/Map,scope:com/gemstone/gemfire/cache/Scope
 com/gemstone/gemfire/management/internal/cli/domain/RegionDescriptionPerMember,true,1,hostingMember:java/lang/String,isAccessor:boolean,name:java/lang/String,regionAttributesInfo:com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo,size:int
 com/gemstone/gemfire/management/internal/cli/domain/RegionInformation,true,1,dataPolicy:com/gemstone/gemfire/cache/DataPolicy,isRoot:boolean,name:java/lang/String,parentRegion:java/lang/String,path:java/lang/String,scope:com/gemstone/gemfire/cache/Scope,subRegionInformationSet:java/util/Set
@@ -693,6 +710,7 @@ com/gemstone/gemfire/management/internal/cli/functions/DataCommandFunction$Selec
 com/gemstone/gemfire/management/internal/cli/functions/DataCommandFunction$SelectQuitStep,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DeployFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DescribeDiskStoreFunction,false
+com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DestroyDiskStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DestroyIndexFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/ExportConfigFunction,true,1
@@ -753,6 +771,7 @@ com/gemstone/gemfire/management/internal/cli/shell/jline/ANSIHandler$ANSIStyle,f
 com/gemstone/gemfire/management/internal/cli/util/DiskStoreNotFoundException,false
 com/gemstone/gemfire/management/internal/cli/util/EvictionAttributesInfo,true,1,evictionAction:java/lang/String,evictionAlgorithm:java/lang/String,evictionMaxValue:int
 com/gemstone/gemfire/management/internal/cli/util/FixedPartitionAttributesInfo,false,isPrimary:boolean,numBuckets:int,partitionName:java/lang/String
+com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException,false
 com/gemstone/gemfire/management/internal/cli/util/JConsoleNotFoundException,false
 com/gemstone/gemfire/management/internal/cli/util/MemberInformation,true,1,cacheXmlFilePath:java/lang/String,cpuUsage:java/lang/String,groups:java/lang/String,heapUsage:java/lang/String,host:java/lang/String,id:java/lang/String,initHeapSize:java/lang/String,locatorBindAddress:java/lang/String,locatorPort:int,locators:java/lang/String,logFilePath:java/lang/String,maxHeapSize:java/lang/String,name:java/lang/String,processId:java/lang/String,serverBindAddress:java/lang/String,statArchiveFilePath:java/lang/String,workingDirPath:java/lang/String
 com/gemstone/gemfire/management/internal/cli/util/MemberNotFoundException,false

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-lucene/build.gradle
----------------------------------------------------------------------
diff --git a/geode-lucene/build.gradle b/geode-lucene/build.gradle
index b7c449b..699c0b5 100644
--- a/geode-lucene/build.gradle
+++ b/geode-lucene/build.gradle
@@ -32,6 +32,10 @@ dependencies {
     testCompile 'org.apache.lucene:lucene-test-framework:' + project.'lucene.version'
     testCompile 'org.apache.lucene:lucene-codecs:' + project.'lucene.version'
     testCompile files(project(':geode-core').sourceSets.test.output)
+
+    // the following test dependencies are needed for mocking cache instance
+    testRuntime 'org.apache.hadoop:hadoop-common:' + project.'hadoop.version'
+    testRuntime 'org.apache.hadoop:hadoop-hdfs:' + project.'hadoop.version'
 }
 
 //The lucene integration tests don't have any issues that requiring forking

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
index 215b063..dd89cdb 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/Cluster.java
@@ -1516,6 +1516,7 @@ public class Cluster extends Thread {
     private boolean diskSynchronous;
     private boolean enableOffHeapMemory;
     private String compressionCodec = "";
+    private boolean hdfsWriteOnly;
 
     private List<String> memberName = new ArrayList<String>();
     private List<RegionOnMember> regionOnMembers  = new ArrayList<RegionOnMember>();
@@ -1768,6 +1769,14 @@ public class Cluster extends Thread {
       this.compressionCodec = compressionCodec;
     }
 
+    public boolean isHdfsWriteOnly() {
+      return hdfsWriteOnly;
+    }
+
+    public void setHdfsWriteOnly(boolean hdfsWriteOnly) {
+      this.hdfsWriteOnly = hdfsWriteOnly;
+    }
+
     public Cluster.RegionOnMember[] getRegionOnMembers() {
       Cluster.RegionOnMember[] regionOnMembers = null;
       synchronized (this.regionOnMembers) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
index e442b8e..c2999f8 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/data/PulseConstants.java
@@ -297,6 +297,7 @@ public class PulseConstants {
   public static final String COMPOSITE_DATA_KEY_DISKSTORENAME = "diskStoreName";
   public static final String COMPOSITE_DATA_KEY_DISKSYNCHRONOUS = "diskSynchronous";
   public static final String COMPOSITE_DATA_KEY_COMPRESSIONCODEC = "compressionCodec";
+  public static final String COMPOSITE_DATA_KEY_HDFSWRITEONLY = "hdfsWriteOnly";
   public static final String COMPOSITE_DATA_KEY_ENABLEOFFHEAPMEMORY = "enableOffHeapMemory";
   public static final String COMPOSITE_DATA_KEY_CONNECTIONSACTIVE = "connectionsActive";
   public static final String COMPOSITE_DATA_KEY_CONNECTED = "connected";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
index 350846c..5537c28 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionService.java
@@ -162,6 +162,17 @@ public class ClusterRegionService implements PulseService {
         regionJSON.put("isEnableOffHeapMemory", VALUE_OFF);
       }
 
+      Boolean isHDFSWriteOnly = reg.isHdfsWriteOnly();
+      if (regionType.startsWith("HDFS")) {
+        if (isHDFSWriteOnly) {
+          regionJSON.put("isHDFSWriteOnly", VALUE_ON);
+        } else {
+          regionJSON.put("isHDFSWriteOnly", VALUE_OFF);
+        }
+      } else {
+        regionJSON.put("isHDFSWriteOnly", VALUE_NA);
+      }
+
       String regCompCodec = reg.getCompressionCodec();
       if (StringUtils.isNotNullNotEmptyNotWhiteSpace(regCompCodec)) {
         regionJSON.put("compressionCodec", reg.getCompressionCodec());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
index 3da4e59..bd38b8d 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterRegionsService.java
@@ -156,6 +156,17 @@ public class ClusterRegionsService implements PulseService {
         regionJSON.put("isEnableOffHeapMemory", this.VALUE_OFF);
       }
 
+      Boolean isHDFSWriteOnly = reg.isHdfsWriteOnly();
+      if (regionType.startsWith("HDFS")) {
+        if (isHDFSWriteOnly) {
+          regionJSON.put("isHDFSWriteOnly", this.VALUE_ON);
+        } else {
+          regionJSON.put("isHDFSWriteOnly", this.VALUE_OFF);
+        }
+      } else {
+        regionJSON.put("isHDFSWriteOnly", this.VALUE_NA);
+      }
+
       String regCompCodec = reg.getCompressionCodec();
       if (StringUtils.isNotNullNotEmptyNotWhiteSpace(regCompCodec)) {
         regionJSON.put("compressionCodec", reg.getCompressionCodec());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
index 39a67cf..35e15c6 100644
--- a/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
+++ b/geode-pulse/src/main/java/com/vmware/gemfire/tools/pulse/internal/service/ClusterSelectedRegionService.java
@@ -188,6 +188,12 @@ public class ClusterSelectedRegionService implements PulseService {
 
       regionJSON.put("isEnableOffHeapMemory", reg.isEnableOffHeapMemory() ? PulseService.VALUE_ON : PulseService.VALUE_OFF);
 
+      if (regionType.startsWith("HDFS")) {
+        regionJSON.put("isHDFSWriteOnly", reg.isHdfsWriteOnly() ? PulseService.VALUE_ON : PulseService.VALUE_OFF);
+      } else {
+        regionJSON.put("isHDFSWriteOnly", PulseService.VALUE_NA);
+      }
+
       String regCompCodec = reg.getCompressionCodec();
       if (StringUtils.isNotNullNotEmptyNotWhiteSpace(regCompCodec)) {
         regionJSON.put("compressionCodec", reg.getCompressionCodec());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js b/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
index e19ddf4..adfe90c 100644
--- a/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
+++ b/geode-pulse/src/main/webapp/scripts/pulsescript/PulseCallbacks.js
@@ -1284,6 +1284,7 @@ function updateDataViewDetails(clusterRegions) {
         "persistence" : clusterRegions[i].persistence,
         "isEnableOffHeapMemory" : clusterRegions[i].isEnableOffHeapMemory,
         "compressionCodec" : clusterRegions[i].compressionCodec,
+        "isHDFSWriteOnly" : clusterRegions[i].isHDFSWriteOnly,
         "memberNames" : clusterRegions[i].memberNames,
         "memoryWritesTrend" : clusterRegions[i].memoryWritesTrend,
         "memoryReadsTrend" : clusterRegions[i].memoryReadsTrend,
@@ -1322,6 +1323,7 @@ function updateDataViewDetails(clusterRegions) {
       "persistence" : "",
       "isEnableOffHeapMemory" : "",
       "compressionCodec" : "",
+      "isHDFSWriteOnly" : "",
       "memberNames" : "",
       "memoryWritesTrend" : "",
       "memoryReadsTrend" : "",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
----------------------------------------------------------------------
diff --git a/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js b/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
index 78c9cda..6d14fd2 100644
--- a/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
+++ b/geode-pulse/src/main/webapp/scripts/pulsescript/clusterDetail.js
@@ -1321,7 +1321,7 @@ function createRegionsGridDefault() {
                      'Persistence', 'Entry Count', 'Empty Nodes', 'Data Usage',
                      'Total Data Usage', 'Memory Usage', 'Total Memory',
                      'Member Names', 'Writes', 'Reads','Off Heap Enabled',
-                     'Compression Codec' ],
+                     'Compression Codec','HDFS Write Only' ],
         colModel : [ {
           name : 'name',
           index : 'name',
@@ -1421,6 +1421,10 @@ function createRegionsGridDefault() {
           name : 'compressionCodec',
           index : 'compressionCodec',
           hidden : true
+        }, {
+          name : 'isHDFSWriteOnly',
+          index : 'isHDFSWriteOnly',
+          hidden : true
         }],
         userData : {
           "sortOrder" : "asc",
@@ -1850,6 +1854,7 @@ function buildRegionsTreeMapData(clusterRegions) {
       "persistence" : clusterRegions[i].persistence,
       "isEnableOffHeapMemory" : clusterRegions[i].isEnableOffHeapMemory,
       "compressionCodec" : clusterRegions[i].compressionCodec,
+      "isHDFSWriteOnly" : clusterRegions[i].isHDFSWriteOnly,
       "memberNames" : clusterRegions[i].memberNames,
       "memoryWritesTrend" : clusterRegions[i].memoryWritesTrend,
       "memoryReadsTrend" : clusterRegions[i].memoryReadsTrend,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
index 0dfc2fb..38bf9c4 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/controllers/PulseControllerJUnitTest.java
@@ -345,6 +345,7 @@ public class PulseControllerJUnitTest {
         .andExpect(jsonPath("$.ClusterRegion.region[0].getsRate").value(27.99D))
         .andExpect(jsonPath("$.ClusterRegion.region[0].wanEnabled").value(false))
         .andExpect(jsonPath("$.ClusterRegion.region[0].memberCount").value(1))
+        .andExpect(jsonPath("$.ClusterRegion.region[0].isHDFSWriteOnly").value("NA"))
         .andExpect(jsonPath("$.ClusterRegion.region[0].memberNames[0].name").value(MEMBER_NAME))
         .andExpect(jsonPath("$.ClusterRegion.region[0].memberNames[0].id").value(MEMBER_ID))
         .andExpect(jsonPath("$.ClusterRegion.region[0].emptyNodes").value(0))
@@ -378,6 +379,7 @@ public class PulseControllerJUnitTest {
         .andExpect(jsonPath("$.ClusterRegions.regions[0].getsRate").value(27.99D))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].wanEnabled").value(false))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].memberCount").value(1))
+        .andExpect(jsonPath("$.ClusterRegions.regions[0].isHDFSWriteOnly").value("NA"))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].memberNames[0].name").value(MEMBER_NAME))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].memberNames[0].id").value(MEMBER_ID))
         .andExpect(jsonPath("$.ClusterRegions.regions[0].emptyNodes").value(0))
@@ -428,6 +430,7 @@ public class PulseControllerJUnitTest {
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.memoryUsage").value("0.0000"))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.wanEnabled").value(false))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.memberCount").value(1))
+        .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.isHDFSWriteOnly").value("NA"))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.putsRate").value(12.31D))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.totalMemory").value(0))
         .andExpect(jsonPath("$.ClusterSelectedRegion.selectedRegion.entryCount").value(0))

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
index 1770dd5..70476f9 100644
--- a/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
+++ b/geode-pulse/src/test/java/com/vmware/gemfire/tools/pulse/tests/Region.java
@@ -32,10 +32,10 @@ public class Region extends JMXBaseBean implements RegionMBean {
   private String name = null;
 
   private static String[] regAttItemNames = { "compressionCodec",
-    "enableOffHeapMemory", "scope", "diskStoreName",
+    "enableOffHeapMemory", "hdfsWriteOnly", "scope", "diskStoreName", 
     "diskSynchronous" };
   private static String[] regAttItemDescriptions = { "compressionCodec",
-    "enableOffHeapMemory", "scope", "diskStoreName",
+    "enableOffHeapMemory", "hdfsWriteOnly", "scope", "diskStoreName", 
     "diskSynchronous" };
   private static OpenType[] regAttItemTypes = { SimpleType.STRING,
     SimpleType.BOOLEAN, SimpleType.BOOLEAN, SimpleType.STRING, 
@@ -158,6 +158,11 @@ public class Region extends JMXBaseBean implements RegionMBean {
       itemValuesHM.put(regAttItemNames[1], Boolean.parseBoolean(itemValues[1]));
     }
 
+    // hdfsWriteOnly
+    if (null != itemValues[2]) {
+      itemValuesHM.put(regAttItemNames[2], Boolean.parseBoolean(itemValues[2]));
+    }
+
     // scope
     if (null != itemValues[3]) {
       itemValuesHM.put(regAttItemNames[3], itemValues[3]);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-pulse/src/test/resources/test.properties
----------------------------------------------------------------------
diff --git a/geode-pulse/src/test/resources/test.properties b/geode-pulse/src/test/resources/test.properties
index 7952c0e..b779c16 100644
--- a/geode-pulse/src/test/resources/test.properties
+++ b/geode-pulse/src/test/resources/test.properties
@@ -182,7 +182,7 @@ region.R1.diskUsage=200200
 #region.R1.diskSynchronous=false
 # operations
 # listRegionAttributes operation should values for return 
-# String compressionCodec, boolean enableOffHeapMemory,
+# String compressionCodec, boolean enableOffHeapMemory, boolean hdfsWriteOnly,
 # String scope, String diskStoreName, boolean diskSynchronous
 region.R1.listRegionAttributes=comp-Codec,true,true,local,TestDiskStoreLcl,false
 
@@ -248,7 +248,7 @@ region.R2.diskUsage=200200
 #region.R2.diskSynchronous=true
 # operations
 # listRegionAttributes operation should values for return 
-# string compressionCodec, boolean enableOffHeapMemory,
+# string compressionCodec, boolean enableOffHeapMemory, boolean hdfsWriteOnly
 # String scope, String diskStoreName, boolean diskSynchronous
 region.R2.listRegionAttributes=comp-Codec,true,false,global,TestDiskStoreGbl,true
 
@@ -297,7 +297,7 @@ region.R3.diskUsage=200200
 #region.R3.diskSynchronous=false
 # operations
 # listRegionAttributes operation should values for return 
-# String compressionCodec, boolean enableOffHeapMemory,
+# String compressionCodec, boolean enableOffHeapMemory, boolean hdfsWriteOnly,
 # String scope, String diskStoreName, boolean diskSynchronous
 region.R3.listRegionAttributes=comp-Codec,true,true,local,TestDiskStoreLcl,false
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-rebalancer/build.gradle
----------------------------------------------------------------------
diff --git a/geode-rebalancer/build.gradle b/geode-rebalancer/build.gradle
index 00c43e4..b821590 100644
--- a/geode-rebalancer/build.gradle
+++ b/geode-rebalancer/build.gradle
@@ -25,4 +25,11 @@ dependencies {
   }
   compile ('org.springframework:spring-context:' + project.'springframework.version')
   testCompile project(':geode-junit')
+
+  // the following test dependencies are needed for mocking cache instance
+  testRuntime 'org.apache.hadoop:hadoop-common:' + project.'hadoop.version'
+  testRuntime 'org.apache.hadoop:hadoop-hdfs:' + project.'hadoop.version'
+  testRuntime ('org.apache.hbase:hbase:' + project.'hbase.version') {
+    transitive = false
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java b/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
index e7ba187..0c10246 100644
--- a/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
+++ b/geode-wan/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderFactoryImpl.java
@@ -181,6 +181,10 @@ public class GatewaySenderFactoryImpl implements
     this.attrs.isBucketSorted = isBucketSorted;
     return this;
   }
+  public GatewaySenderFactory setIsHDFSQueue(boolean isHDFSQueue){
+    this.attrs.isHDFSQueue = isHDFSQueue;
+    return this;
+  }
   public GatewaySender create(String id, int remoteDSId) {
     int myDSId = InternalDistributedSystem.getAnyInstance()
         .getDistributionManager().getDistributedSystemId();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
index 0e7e8d8..18a89f8 100644
--- a/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
+++ b/geode-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
@@ -179,7 +179,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
           public boolean done() {
             Entry<?,?> entry = null;
             try {
-              entry = region.getDataStore().getEntryLocally(0, key, false, false);
+              entry = region.getDataStore().getEntryLocally(0, key, false, false, false);
             } catch (EntryNotFoundException e) {
               // expected
             } catch (ForceReattemptException e) {
@@ -443,7 +443,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
           public boolean done() {
             Entry<?,?> entry = null;
             try {
-              entry = region.getDataStore().getEntryLocally(0, key, false, false);
+              entry = region.getDataStore().getEntryLocally(0, key, false, false, false);
             } catch (EntryNotFoundException e) {
               // expected
             } catch (ForceReattemptException e) {
@@ -582,7 +582,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
           public boolean done() {
             Entry<?,?> entry = null;
             try {
-              entry = region.getDataStore().getEntryLocally(0, key, false, false);
+              entry = region.getDataStore().getEntryLocally(0, key, false, false, false);
             } catch (EntryNotFoundException e) {
               // expected
             } catch (ForceReattemptException e) {



[17/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java
new file mode 100644
index 0000000..23dd840
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/RWSplitIterator.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+
+/**
+ * An iterator that iterates over a split in a read/write hoplog
+ */
+public class RWSplitIterator extends HDFSSplitIterator {
+
+  public RWSplitIterator(FileSystem fs, Path[] path, long[] start, long[] len, long startTime, long endTime) throws IOException {
+    super(fs, path, start, len, startTime, endTime);
+  }
+
+  @Override
+  protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
+    SchemaMetrics.configureGlobally(fs.getConf());
+    return HFileSortedOplog.getHoplogForLoner(fs, path); 
+  }
+
+  public PersistedEventImpl getDeserializedValue() throws ClassNotFoundException, IOException {
+    return SortedHDFSQueuePersistedEvent.fromBytes(iterator.getValue());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java
new file mode 100644
index 0000000..bfb2deb
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/StreamSplitIterator.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHDFSQueuePersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog;
+
+/**
+ * An iterator that iterates over a split in a sequential hoplog.
+ */
+public class StreamSplitIterator extends HDFSSplitIterator {
+
+  public StreamSplitIterator(FileSystem fs, Path[] path, long[] start, long[] len, long startTime, long endTime) throws IOException {
+    super(fs, path, start, len, startTime, endTime);
+  }
+
+  public PersistedEventImpl getDeserializedValue() throws ClassNotFoundException, IOException {
+    return UnsortedHDFSQueuePersistedEvent.fromBytes(iterator.getValue());
+  }
+
+  @Override
+  protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
+    return new SequenceFileHoplog(fs, path, null);
+  }
+}


[05/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
new file mode 100644
index 0000000..8746a0b
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
@@ -0,0 +1,1044 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.TreeSet;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Matcher;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer.HoplogComparator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.TieredCompactionJUnitTest.TestHoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HdfsSortedOplogOrganizerJUnitTest extends BaseHoplogTestCase {
+  /**
+   * Tests flush operation
+   */
+  public void testFlush() throws Exception {
+    int count = 10;
+    int bucketId = (int) System.nanoTime();
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < count; i++) {
+      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
+    }
+    organizer.flush(items.iterator(), count);
+
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+                      HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    // only one hoplog should exists
+    assertEquals(1, hoplogs.length);
+    
+    assertEquals(count, organizer.sizeEstimate());
+    assertEquals(0, stats.getActiveReaderCount());
+  }
+
+  /**
+   * Tests reads from a set of hoplogs containing both valid and stale KVs
+   */
+  public void testReopen() throws Exception {
+    int bucketId = (int) System.nanoTime();
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+    
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 100; i++) {
+      items.add(new TestEvent("" + i, ("1-1")));
+    }
+    organizer.flush(items.iterator(), items.size());
+    
+    Hoplog hoplog = organizer.getSortedOplogs().iterator().next().get();
+    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
+    hoplog.close();
+    
+    for (int i = 0; i < 10; i++) {
+      Path path = new Path(testDataDir, getName() + "/" + bucketId + "/" + hoplog.getFileName());
+      HFileSortedOplog oplog = new HFileSortedOplog(hdfsStore, path, blockCache, stats, storeStats);
+      oplog.getReader().read(keyBytes1);
+      oplog.close(false);
+    }
+  }
+  
+  /**
+   * Tests reads from a set of hoplogs containing both valid and stale KVs
+   */
+  public void testRead() throws Exception {
+    doRead(regionManager);
+  }
+  
+//  public void testNewReaderWithNameNodeHA() throws Exception {
+//    deleteMiniClusterDir();
+//    int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
+//    int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
+//    
+//    MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
+//    initClientHAConf(nn1port, nn2port);
+//    
+//    HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
+//    regionfactory.setHDFSStoreName(store1.getName());
+//    Region<Object, Object> region1 = regionfactory.create("region-1");
+//    HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
+//    
+//    HoplogOrganizer<SortedHoplogPersistedEvent> organizer = doRead(regionManager1);
+//    organizer.close();
+//    
+//    dunit.DistributedTestCase.IgnoredException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
+//    NameNode nnode2 = cluster.getNameNode(1);
+//    assertTrue(nnode2.isStandbyState());
+//    cluster.shutdownNameNode(0);
+//    cluster.transitionToActive(1);
+//    assertFalse(nnode2.isStandbyState());
+//    
+//    organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
+//    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
+//    byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
+//    byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
+//    assertEquals("2-1", organizer.read(keyBytes1).getValue());
+//    assertEquals("3-3", organizer.read(keyBytes3).getValue());
+//    assertEquals("1-4", organizer.read(keyBytes4).getValue());
+//    ex.remove();
+//
+//    region1.destroyRegion();
+//    store1.destroy();
+//    cluster.shutdown();
+//    FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
+//  }
+  
+//  public void testActiveReaderWithNameNodeHA() throws Exception {
+//    deleteMiniClusterDir();
+//    int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
+//    int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
+//    
+//    MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
+//    initClientHAConf(nn1port, nn2port);
+//    
+//    HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
+//    regionfactory.setHDFSStoreName(store1.getName());
+//    Region<Object, Object> region1 = regionfactory.create("region-1");
+//    HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
+//    
+//    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
+//    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+//    for (int i = 100000; i < 101000; i++) {
+//      items.add(new TestEvent(("" + i), (i + " some string " + i)));
+//    }
+//    organizer.flush(items.iterator(), items.size());
+//    organizer.getSortedOplogs().get(0).get().getReader();
+//    
+//    dunit.DistributedTestCase.IgnoredException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
+//    NameNode nnode2 = cluster.getNameNode(1);
+//    assertTrue(nnode2.isStandbyState());
+//    cluster.shutdownNameNode(0);
+//    cluster.transitionToActive(1);
+//    assertFalse(nnode2.isStandbyState());
+//    
+//    for (int i = 100000; i < 100500; i++) {
+//      byte[] keyBytes1 = BlobHelper.serializeToBlob("" + i);
+//      assertEquals(i + " some string " + i, organizer.read(keyBytes1).getValue());
+//    }
+//    ex.remove();
+//    region1.destroyRegion();
+//    store1.destroy();
+//    cluster.shutdown();
+//    FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
+//  }
+  
+//  public void testFlushWithNameNodeHA() throws Exception {
+//    deleteMiniClusterDir();
+//    int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
+//    int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
+//    
+//    MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
+//    
+//    initClientHAConf(nn1port, nn2port);
+//    HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
+//    
+//    regionfactory.setHDFSStoreName(store1.getName());
+//    Region<Object, Object> region1 = regionfactory.create("region-1");
+//    HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
+//    
+//    HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
+//    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+//    items.add(new TestEvent(("1"), ("1-1")));
+//    organizer.flush(items.iterator(), items.size());
+//
+//    dunit.DistributedTestCase.IgnoredException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
+//    NameNode nnode2 = cluster.getNameNode(1);
+//    assertTrue(nnode2.isStandbyState());
+//    cluster.shutdownNameNode(0);
+//    cluster.transitionToActive(1);
+//    assertFalse(nnode2.isStandbyState());
+//    
+//    items.add(new TestEvent(("4"), ("1-4")));
+//    organizer.flush(items.iterator(), items.size());
+//    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
+//    byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
+//    assertEquals("1-1", organizer.read(keyBytes1).getValue());
+//    assertEquals("1-4", organizer.read(keyBytes4).getValue());
+//    ex.remove();
+//    
+//    region1.destroyRegion();
+//    store1.destroy();
+//    cluster.shutdown();
+//    FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
+//  }
+
+  public HoplogOrganizer<SortedHoplogPersistedEvent> doRead(HdfsRegionManager rm) throws Exception {
+    HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer(rm, 0);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    items.add(new TestEvent(("4"), ("1-4")));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("1"), ("2-1")));
+    items.add(new TestEvent(("3"), ("2-3")));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("3"), ("3-3")));
+    items.add(new TestEvent(("5"), ("3-5")));
+    organizer.flush(items.iterator(), items.size());
+
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(rm.getStore().getFileSystem(),
+        rm.getRegionFolder() + "/" + 0,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    // expect 3 files are 3 flushes
+    assertEquals(3, hoplogs.length);
+    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
+    byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
+    byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
+    // expect key 1 from hoplog 2
+    assertEquals("2-1", organizer.read(keyBytes1).getValue());
+    // expect key 3 from hoplog 3
+    assertEquals("3-3", organizer.read(keyBytes3).getValue());
+    // expect key 4 from hoplog 1
+    assertEquals("1-4", organizer.read(keyBytes4).getValue());
+    return organizer;
+  }
+
+  /**
+   * Tests bucket organizer initialization during startup. Existing hoplogs should identified and
+   * returned
+   */
+  public void testHoplogIdentification() throws Exception {
+    // create one empty file and one directories in bucket directory
+    Path bucketPath = new Path(testDataDir, getName() + "/0");
+    FileSystem fs = hdfsStore.getFileSystem();
+    fs.createNewFile(new Path(bucketPath, "temp_file"));
+    fs.mkdirs(new Path(bucketPath, "temp_dir"));
+
+    // create 2 hoplogs files each of type flush, minor and major hoplog
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    String[] extensions = { HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION};
+    for (String string : extensions) {
+      Hoplog oplog = organizer.getTmpSortedOplog(null, string);
+      createHoplog(0, oplog);
+      organizer.makeLegitimate(oplog);
+    }
+
+    // create a temp hoplog
+    Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    createHoplog(0, oplog);
+
+    // bucket directory should have 6 hoplogs, 1 temp log, 1 misc file and 1 directory
+    FileStatus[] results = fs.listStatus(bucketPath);
+    assertEquals(9, results.length);
+
+    // only two are hoplogs
+    List<Hoplog> list = organizer.identifyAndLoadSortedOplogs(true);
+    assertEquals(6, list.size());
+  }
+
+  public void testExpiryMarkerIdentification() throws Exception {
+    // epxired hoplogs from the list below should be deleted
+    String[] files = {
+        "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
+        "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
+    
+    Path bucketPath = new Path(testDataDir, getName() + "/0");
+    FileSystem fs = hdfsStore.getFileSystem();
+    for (String file : files) {
+      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
+          blockCache, stats, storeStats);
+      createHoplog(10, oplog);
+    }
+
+    String marker1 = "0-4-1234"
+        + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
+        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
+    fs.createNewFile(new Path(bucketPath, marker1));
+    String marker2 = "0-5-1235"
+        + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
+        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
+    fs.createNewFile(new Path(bucketPath, marker2));    
+    
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
+    assertEquals(7, hoplogs.length);
+    
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
+        regionManager, 0);
+    
+    FileStatus[] markers = organizer.getExpiryMarkers();
+    // one hoplog and one exp marker will be deletion targets
+    assertEquals(2, markers.length);
+    for (FileStatus marker : markers) {
+      String name = marker.getPath().getName();
+      assertTrue(name.equals(marker1) || name.equals(marker2));
+    }
+    organizer.close();
+  }
+  
+  public void testExpiredHoplogCleanup() throws Exception {
+    // epxired hoplogs from the list below should be deleted
+    String[] files = {
+        "0-1-0000" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION
+        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
+        
+        "0-2-0000" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
+        "0-2-2222" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
+        
+        "0-3-0000" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        "0-3-3333" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        "0-3-3333" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
+            + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
+        
+        "0-4-4444" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION };
+    
+    Path bucketPath = new Path(testDataDir, getName() + "/0");
+    FileSystem fs = hdfsStore.getFileSystem();
+    for (String file : files) {
+      if (file.endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
+        fs.createNewFile(new Path(bucketPath, file));
+        continue;
+      }
+      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
+          blockCache, stats, storeStats);
+      createHoplog(10, oplog);
+    }
+    
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
+    assertEquals(9, hoplogs.length);
+
+    long target = System.currentTimeMillis();
+    TimeUnit.SECONDS.sleep(1);
+    
+    // all but minor compacted files from below this will not be deleted as it
+    // is after target delete time
+    files = new String[] { 
+        "0-4-4444" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION
+            + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
+            
+        "0-5-5555" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
+            + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
+        "0-5-5555" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        
+        "0-6-6666" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
+    };
+    for (String file : files) {
+      if (file.endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
+        fs.createNewFile(new Path(bucketPath, file));
+        continue;
+      }
+      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
+          blockCache, stats, storeStats);
+      createHoplog(10, oplog);
+    }
+    
+    hoplogs = getBucketHoplogs(getName() + "/0", "");
+    assertEquals(13, hoplogs.length);
+    int hopSize = 0;
+    for (FileStatus file : hoplogs) {
+      if(file.getLen() > hopSize) {
+        hopSize = (int) file.getLen();
+      }
+    }
+
+    final AtomicInteger behavior = new AtomicInteger(0);
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
+      @Override
+      protected FileStatus[] getExpiryMarkers() throws IOException {
+        if (behavior.get() == 1) {
+          ArrayList<FileStatus> markers = new ArrayList<FileStatus>();
+          for (FileStatus marker : super.getExpiryMarkers()) {
+            markers.add(marker);
+          }
+          // inject a dummy old expiry marker for major compacted file
+          long age = 2 * HDFSStore.DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS * 60 * 1000;
+          String markerName = "0-2-2222" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
+          FileStatus marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
+          markers.add(marker);
+          return markers.toArray(new FileStatus[markers.size()]);
+        }
+        return super.getExpiryMarkers();
+      }
+    };
+
+    List<FileStatus> list = organizer.getOptimizationTargets(target);
+    assertEquals(6, list.size());
+
+    behavior.set(1);
+    list = organizer.getOptimizationTargets(target);
+    assertEquals(8, list.size());
+    
+    assertEquals(9 * hopSize, stats.getStoreUsageBytes());
+    int count = organizer.deleteExpiredFiles(list);
+    assertEquals(8, count);
+    assertEquals(5 * hopSize, stats.getStoreUsageBytes());
+    
+    List<FileStatus> tmp = new ArrayList<FileStatus>(Arrays.asList(hoplogs));
+    for (Iterator<FileStatus> iter = tmp.iterator(); iter.hasNext();) {
+      hoplogs = getBucketHoplogs(getName() + "/0", "");
+      FileStatus file = iter.next();
+      for (FileStatus hoplog : hoplogs) {
+        if(hoplog.getPath().getName().startsWith("0-5-5555")) {
+          fail("this file should have been deleted" + hoplog.getPath().getName());
+        }
+
+        if (hoplog.getPath().getName().equals(file.getPath().getName())) {
+          iter.remove();
+          break;
+        }
+      }
+    }
+
+    assertEquals(7, tmp.size());
+    organizer.close();
+  }
+  
+  public void testAlterPurgeInterval() throws Exception {
+    // epxired hoplogs from the list below should be deleted
+    String[] files = {
+        "0-1-0000" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-2-2222" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-4-4444" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION };
+    
+    Path bucketPath = new Path(testDataDir, getName() + "/0");
+    hdfsStore.getFileSystem();
+    for (String file : files) {
+      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
+          blockCache, stats, storeStats);
+      createHoplog(10, oplog);
+    }
+    
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
+    int hopSize = 0;
+    for (FileStatus file : hoplogs) {
+      if(file.getLen() > hopSize) {
+        hopSize = (int) file.getLen();
+      }
+    }
+
+    final AtomicInteger behavior = new AtomicInteger(0);
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
+      @Override
+      protected FileStatus[] getExpiryMarkers() throws IOException {
+        if (behavior.get() == 1) {
+          ArrayList<FileStatus> markers = new ArrayList<FileStatus>();
+          // inject dummy old expiry markers
+          long age = 120 * 1000; // 120 seconds old
+          String markerName = "0-2-2222" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
+          FileStatus marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
+          markers.add(marker);
+          markerName = "0-4-4444" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
+          marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
+          markers.add(marker);
+          return markers.toArray(new FileStatus[markers.size()]);
+        }
+        return super.getExpiryMarkers();
+      }
+    };
+
+    behavior.set(1);
+    int count = organizer.initiateCleanup();
+    assertEquals(0, count);
+    
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    mutator.setPurgeInterval(1);
+    hdfsStore.alter(mutator);
+    count = organizer.initiateCleanup();
+    assertEquals(4, count);
+  }
+  
+  public void testInUseExpiredHoplogCleanup() throws Exception {
+    Path bucketPath = new Path(testDataDir, getName() + "/0");
+    FileSystem fs = hdfsStore.getFileSystem();
+    
+    String[] files = new String[] {
+        "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
+        "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
+    
+    for (String file : files) {
+      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
+          blockCache, stats, storeStats);
+      createHoplog(10, oplog);
+    }
+    
+    final HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
+        regionManager, 0);
+    List<TrackedReference<Hoplog>> hopRefs = organizer.getSortedOplogs();
+    assertEquals(files.length, hopRefs.size());
+    
+    // this is expiry marker for one of the files that will be compacted below.
+    // While compaction is going on file deletion should not happen
+    files = new String[] { "0-5-1235"
+        + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
+        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION };
+    
+    for (String file : files) {
+      fs.createNewFile(new Path(bucketPath, file));
+    }
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
+    assertEquals(hopRefs.size() + files.length, hoplogs.length);
+    
+    TimeUnit.MILLISECONDS.sleep(200);
+    long target = System.currentTimeMillis();
+    List<FileStatus> list = organizer.getOptimizationTargets(target);
+    assertEquals(2, list.size());
+    
+    for (TrackedReference<Hoplog> ref : hopRefs) {
+      ref.increment("test");
+    }
+
+    fs.delete(new Path(bucketPath, files[0]), false);
+    
+    TimeUnit.MILLISECONDS.sleep(50);
+    organizer.markSortedOplogForDeletion(hopRefs, false);
+    
+    list = organizer.getOptimizationTargets(target);
+    assertEquals(0, list.size());
+    organizer.close();
+  }
+  
+  /**
+   * Tests max sequence initialization when file already exists and server starts
+   */
+  public void testSeqInitialization() throws Exception {
+    // create many hoplogs files
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    String[] extensions = { HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION,
+        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION};
+    for (String string : extensions) {
+      Hoplog oplog = organizer.getTmpSortedOplog(null, string);
+      createHoplog(1, oplog);
+      organizer.makeLegitimate(oplog);
+    }
+
+    // a organizer should start creating files starting at 6 as five files already existed
+    organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    createHoplog(1, oplog);
+    organizer.makeLegitimate(oplog);
+    assertEquals(6, HdfsSortedOplogOrganizer.getSequenceNumber(oplog));
+    organizer.close();
+  }
+
+  /**
+   * Tests temp file creation and making file legitimate
+   */
+  public void testMakeLegitimate() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    // create empty tmp hoplog
+    Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    createHoplog(0, oplog);
+
+    Path hoplogPath = new Path(testDataDir, getName() + "/0/" + oplog.getFileName());
+    FileSystem fs = hdfsStore.getFileSystem();
+    FileStatus hoplogStatus = fs.getFileStatus(hoplogPath);
+    assertNotNull(hoplogStatus);
+
+    organizer.makeLegitimate(oplog);
+
+    try {
+      hoplogStatus = fs.getFileStatus(hoplogPath);
+      assertNull(hoplogStatus);
+    } catch (FileNotFoundException e) {
+      // tmp file is renamed hence should not exist, exception expected
+    }
+
+    assertTrue(oplog.getFileName().endsWith(HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION));
+    hoplogPath = new Path(testDataDir, getName() + "/0/" + oplog.getFileName());
+    hoplogStatus = fs.getFileStatus(hoplogPath);
+    assertNotNull(hoplogStatus);
+  }
+
+  /**
+   * Tests hoplog file name comparator
+   */
+  public void testHoplogFileComparator() throws IOException {
+    String name1 = "bucket1-10-3.hop";
+    String name2 = "bucket1-1-20.hop";
+    String name3 = "bucket1-30-201.hop";
+    String name4 = "bucket1-100-201.hop";
+
+    TreeSet<TrackedReference<Hoplog>> list = new TreeSet<TrackedReference<Hoplog>>(new HoplogComparator());
+    // insert soplog is the list out of expected order
+    hdfsStore.getFileSystem();
+    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name2), blockCache, stats, storeStats)));
+    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name4), blockCache, stats, storeStats)));
+    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name1), blockCache, stats, storeStats)));
+    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name3), blockCache, stats, storeStats)));
+
+    Iterator<TrackedReference<Hoplog>> iter = list.iterator();
+    assertEquals(name4, iter.next().get().getFileName());
+    assertEquals(name3, iter.next().get().getFileName());
+    assertEquals(name2, iter.next().get().getFileName());
+    assertEquals(name1, iter.next().get().getFileName());
+  }
+  
+  /**
+   * Tests clear on a set of hoplogs.
+   */
+  public void testClear() throws Exception {
+    int bucketId = (int) System.nanoTime();
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    items.add(new TestEvent(("4"), ("1-4")));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("1"), ("2-1")));
+    items.add(new TestEvent(("3"), ("2-3")));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("3"), ("3-3")));
+    items.add(new TestEvent(("5"), ("3-5")));
+    organizer.flush(items.iterator(), items.size());
+
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    // expect 3 files are 3 flushes
+    assertEquals(3, hoplogs.length);
+    
+    organizer.clear();
+    
+    // check that all files are now expired
+    hoplogs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    FileStatus[] exs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    FileStatus[] valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, exs);
+    assertEquals(Collections.EMPTY_LIST, Arrays.asList(valids));
+    
+    assertEquals(0, stats.getActiveFileCount());
+    assertEquals(0, stats.getInactiveFileCount());
+  }
+  
+  public void testFixedIntervalMajorCompaction() throws Exception {
+    final AtomicInteger majorCReqCount = new AtomicInteger(0);
+    
+    final Compactor compactor = new AbstractCompactor() {
+      @Override
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        majorCReqCount.incrementAndGet();
+        return true;
+      }
+    };
+    
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
+      @Override
+      public synchronized Compactor getCompactor() {
+        return compactor;
+      }
+    };
+    
+    regionManager.addOrganizer(0, organizer);
+    
+    System.setProperty(HoplogConfig.JANITOR_INTERVAL_SECS, "1");
+    HDFSRegionDirector.resetJanitor();
+    
+    alterMajorCompaction(hdfsStore, true);
+    
+    // create hoplog in the past, 90 seconds before current time
+    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, 100, System.currentTimeMillis() - 90000));
+    TimeUnit.MILLISECONDS.sleep(50);
+    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, 100, System.currentTimeMillis() - 90000));
+    
+    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
+    assertEquals(2, hoplogs.size());
+    
+    for (int i = 0; i < 3; i++) {
+      TimeUnit.SECONDS.sleep(1);
+      assertEquals(0, majorCReqCount.get());
+    }
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    mutator.setMajorCompactionInterval(1);
+    hdfsStore.alter(mutator);
+    TimeUnit.SECONDS.sleep(5);
+    assertTrue(3 < majorCReqCount.get());
+  }
+  
+ 
+  public void testCorruptHfileBucketFail() throws Exception {
+    // create a corrupt file
+    FileSystem fs = hdfsStore.getFileSystem();
+    for (int i = 0; i < 113; i++) {
+      FSDataOutputStream opStream = fs.create(new Path(testDataDir.getName() + "/region-1/" + i + "/1-1-1.hop"));
+      opStream.writeBytes("Some random corrupt file");
+      opStream.close();
+    }
+      
+    // create region with store
+    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+    Region<Object, Object> region1 = regionfactory.create("region-1");
+    IgnoredException ex = IgnoredException.addIgnoredException("CorruptHFileException");
+    try {
+      region1.get("key");
+      fail("get should have failed with corrupt file error");
+    } catch (HDFSIOException e) {
+      // expected
+    } finally {
+      ex.remove();
+    }
+    
+    region1.destroyRegion();
+  }
+
+  public void testMaxOpenReaders() throws Exception {
+    System.setProperty("hoplog.bucket.max.open.files", "5");
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 10; i++) {
+      items.clear();
+      items.add(new TestEvent("" + i, "" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
+    List<TrackedReference<Hoplog>> hoplogs = bucket.getSortedOplogs();
+    int closedCount = 0 ;
+    for (TrackedReference<Hoplog> hoplog : hoplogs) {
+      HFileSortedOplog hfile = (HFileSortedOplog) hoplog.get();
+      if (hfile.isClosed()) { 
+        closedCount++;
+      }
+    }
+    assertEquals(10, closedCount);
+    assertEquals(10, stats.getActiveFileCount());
+    assertEquals(0, stats.getActiveReaderCount());
+    
+    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
+    organizer.read(keyBytes1).getValue();
+    
+    closedCount = 0 ;
+    for (TrackedReference<Hoplog> hoplog : hoplogs) {
+      HFileSortedOplog hfile = (HFileSortedOplog) hoplog.get();
+      if (hfile.isClosed()) { 
+        closedCount++;
+      }
+    }
+    assertEquals(5, closedCount);
+    assertEquals(10, stats.getActiveFileCount());
+    assertEquals(0, stats.getInactiveFileCount());
+    assertEquals(5, stats.getActiveReaderCount());
+    
+    organizer.getCompactor().compact(false, false);
+    assertEquals(1, stats.getActiveFileCount());
+    assertEquals(0, stats.getActiveReaderCount());
+    assertEquals(0, stats.getInactiveFileCount());
+  }
+
+  public void testConcurrentReadInactiveClose() throws Exception {
+    final HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
+    alterMinorCompaction(hdfsStore, true);
+
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 4; i++) {
+      items.clear();
+      items.add(new TestEvent("" + i, "" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    final byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
+    class ReadTask implements Runnable {
+      public void run() {
+        try {
+          organizer.read(keyBytes1);
+        } catch (IOException e) {
+          e.printStackTrace();
+        }
+      }
+    }
+    ScheduledExecutorService[] readers = new ScheduledExecutorService[10];
+    for (int i = 0; i < readers.length; i++) {
+      readers[i] = Executors.newSingleThreadScheduledExecutor();
+      readers[i].scheduleWithFixedDelay(new ReadTask(), 0, 1, TimeUnit.MILLISECONDS);
+    }
+    
+    for (int i = 0; i < 100; i++) {
+      items.clear();
+      items.add(new TestEvent("" + i, "" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    for (int i = 0; i < readers.length; i++) {
+      readers[i].shutdown();
+      readers[i].awaitTermination(1, TimeUnit.SECONDS);
+      TimeUnit.MILLISECONDS.sleep(50);
+    }
+    
+    for (int i = 0; i < 20; i++) {
+      if (stats.getActiveFileCount() < 4) {
+        break;
+      }
+      organizer.getCompactor().compact(false, false);
+    }
+
+    organizer.performMaintenance();
+    TimeUnit.SECONDS.sleep(1);
+    
+    assertTrue("" + stats.getActiveFileCount(), stats.getActiveFileCount() <= 4);
+    assertEquals(stats.getActiveReaderCount(), stats.getActiveReaderCount());
+    assertEquals(0, stats.getInactiveFileCount());
+  }
+  
+  public void testEmptyBucketCleanup() throws Exception {
+    HdfsSortedOplogOrganizer o = new HdfsSortedOplogOrganizer(regionManager, 0);
+    long target = System.currentTimeMillis();
+    o.getOptimizationTargets(target);
+    // making sure empty bucket is not causing IO errors. no assertion needed
+    // for this test case.
+  }
+  
+  public void testExpiredFilterAtStartup() throws Exception {
+    HdfsSortedOplogOrganizer bucket = new HdfsSortedOplogOrganizer(regionManager, 0);
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    items.add(new TestEvent(("4"), ("1-4")));
+    bucket.flush(items.iterator(), items.size());
+    
+    items.clear();
+    items.add(new TestEvent(("1"), ("2-1")));
+    items.add(new TestEvent(("3"), ("2-3")));
+    bucket.flush(items.iterator(), items.size());
+    
+    FileStatus[] files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    
+    files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    HdfsSortedOplogOrganizer bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
+    List<TrackedReference<Hoplog>> hoplogs = bucket2.getSortedOplogs();
+    assertEquals(2, hoplogs.size());
+    
+    bucket.clear();
+    
+    files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    
+    files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    
+    bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
+    hoplogs = bucket2.getSortedOplogs();
+    assertEquals(0, hoplogs.size());
+    
+    items.clear();
+    items.add(new TestEvent(("1"), ("2-1")));
+    items.add(new TestEvent(("3"), ("2-3")));
+    bucket.flush(items.iterator(), items.size());
+    
+    bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
+    hoplogs = bucket2.getSortedOplogs();
+    assertEquals(1, hoplogs.size());
+    bucket.close();
+    bucket2.close();
+  }
+
+  public void testExpireFilterRetartAfterClear() throws Exception {
+    HdfsSortedOplogOrganizer bucket = new HdfsSortedOplogOrganizer(regionManager, 0);
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    items.add(new TestEvent(("4"), ("1-4")));
+    bucket.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("1"), ("2-1")));
+    items.add(new TestEvent(("3"), ("2-3")));
+    bucket.flush(items.iterator(), items.size());
+    
+    FileStatus[] files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    
+    files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    HdfsSortedOplogOrganizer bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
+    List<TrackedReference<Hoplog>> hoplogs = bucket2.getSortedOplogs();
+    assertEquals(2, hoplogs.size());
+    
+    bucket.clear();
+    
+    files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    
+    files = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    
+    bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
+    hoplogs = bucket2.getSortedOplogs();
+    assertEquals(0, hoplogs.size());
+    bucket.close();
+    bucket2.close();
+  }
+  
+  /**
+   * tests maintenance does not fail even if there are no hoplogs
+   */
+  public void testNoFileJanitor() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer;
+    organizer = regionManager.create(0);
+    organizer.performMaintenance();
+  }
+  
+  public void testValidHoplogRegex() {
+    String[] valid = {"1-1-1.hop", "1-1-1.ihop", "1-1-1.chop"};
+    String[] invalid = {"1-1-1.khop", "1-1-1.hop.tmphop", "1-1-1.hop.ehop", "1-1-.hop", "-1-1.hop"};
+    
+    for (String string : valid) {
+      Matcher matcher = HdfsSortedOplogOrganizer.SORTED_HOPLOG_PATTERN.matcher(string);
+      assertTrue(matcher.matches());
+    }
+    
+    for (String string : invalid) {
+      Matcher matcher = HdfsSortedOplogOrganizer.SORTED_HOPLOG_PATTERN.matcher(string);
+      assertFalse(matcher.matches());
+    }
+  }
+  
+  public void testOneHoplogMajorCompaction() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    alterMajorCompaction(hdfsStore, true);
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    organizer.flush(items.iterator(),items.size());    
+    
+    
+    FileStatus[] files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);    
+    
+    //Minor compaction will not perform on 1 .hop file
+    organizer.getCompactor().compact(false, false);
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    //Major compaction will perform on 1 .hop file
+    organizer.getCompactor().compact(true, false);
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);     
+    assertEquals(1, files.length);
+    String hoplogName =files[0].getPath().getName();    
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    organizer.getCompactor().compact(true, false);
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);
+    assertEquals(hoplogName, files[0].getPath().getName());
+    
+    //Minor compaction does not convert major compacted file
+    organizer.getCompactor().compact(false, false);
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);
+    assertEquals(hoplogName, files[0].getPath().getName());
+    
+    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);
+    assertNotSame(hoplogName + HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION, files[0].getPath().getName() );
+  }
+
+  public void testExposeCleanupInterval() throws Exception {
+    FileSystem fs = hdfsStore.getFileSystem();
+    Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
+    assertTrue(fs.exists(cleanUpIntervalPath));
+    long interval = HDFSStore.DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS
+        *60 * 1000;
+    assertEquals(interval, HoplogUtil.readCleanUpIntervalMillis(fs,cleanUpIntervalPath));
+  }
+  
+  @Override
+  protected void setUp() throws Exception {
+    System.setProperty(HoplogConfig.JANITOR_INTERVAL_SECS, "" + HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
+    super.setUp();
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
new file mode 100644
index 0000000..7420437
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
@@ -0,0 +1,540 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReader;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.junit.experimental.categories.Category;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HfileSortedOplogJUnitTest extends BaseHoplogTestCase {
+	ArrayList<Object> toBeCleaned = new ArrayList<>();
+  
+  /**
+   * Tests hoplog creation using a writer. If this test fails, all the tests wills fail as hoplog
+   * creation is the first step
+   */
+  public void testHoplogWriter() throws Exception {
+    String hoplogName = getRandomHoplogName();
+    createHoplog(hoplogName, 1);
+    FileStatus hoplogStatus = hdfsStore.getFileSystem().getFileStatus(new Path(testDataDir, hoplogName));
+    assertNotNull(hoplogStatus);
+  }
+
+  /**
+   * Tests hoplog deletion.
+   */
+  public void testDeletion() throws Exception {
+    String hoplogName = getRandomHoplogName();
+    createHoplog(hoplogName, 1);
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+
+    testHoplog.delete();
+
+    try {
+      FileStatus hoplogStatus = hdfsStore.getFileSystem().getFileStatus(new Path(testDataDir, hoplogName));
+      // hoplog should not exists. fail if it does
+      assertNull("File deletion failed", hoplogStatus);
+    } catch (FileNotFoundException e) {
+      // exception expected after deletion
+    }
+  }
+
+  /**
+   * Tests hoplog reader creation and key based gets
+   */
+  public void testHoplogReader() throws Exception {
+    String hop1 = getRandomHoplogName();
+    Map<String, String> map = createHoplog(hop1, 10);
+
+    HFileSortedOplog testHoplog1 = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hop1), blockCache, stats, storeStats);
+    HoplogReader reader = testHoplog1.getReader();
+    // verify that each entry put in the hoplog is returned by reader
+    for (Entry<String, String> entry : map.entrySet()) {
+      byte[] value = reader.read(entry.getKey().getBytes());
+      assertNotNull(value);
+    }
+  }
+
+  /**
+   * Tests full iteration on a hoplog. Ensures all inserted keys are returned and no key is missing
+   */
+  public void testIterator() throws IOException {
+    int count = 10;
+    ByteArrayComparator bac = new ByteArrayComparator();
+
+    String hoplogName = getRandomHoplogName();
+    TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
+
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    HoplogReader reader = testHoplog.getReader();
+
+    Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
+    HoplogIterator<byte[], byte[]> iter = reader.scan();
+    for (; iter.hasNext();) {
+      byte[] key = iter.next();
+      Entry<String, String> entry = mapIter.next();
+      assertEquals(0, bac.compare(key, iter.getKey()));
+      assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
+      assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
+      count--;
+    }
+    assertEquals(0, count);
+  }
+
+  /**
+   * Tests hoplog iterator. after returning first key, has next should return false and all
+   * subsequent next calls should return null
+   */
+  public void testSingleKVIterator() throws Exception {
+    String hoplogName = getRandomHoplogName();
+    TreeMap<String, String> map = createHoplog(hoplogName, 1);
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    HoplogReader reader = testHoplog.getReader();
+
+    HoplogIterator<byte[], byte[]> iter = reader.scan();
+    assertNull(iter.getKey());
+    assertNull(iter.getValue());
+    assertTrue(iter.hasNext());
+    assertNull(iter.getKey());
+    assertNull(iter.getValue());
+
+    Entry<String, String> entry = map.firstEntry();
+    iter.next();
+    assertNotNull(iter.getKey());
+    assertEquals(entry.getKey(), new String(iter.getKey()));
+    assertNotNull(iter.getValue());
+    assertEquals(entry.getValue(), new String(iter.getValue()));
+
+    assertFalse(iter.hasNext());
+    try {
+      iter.next();
+      fail();
+    } catch (NoSuchElementException e) {
+    }
+  }
+
+  /**
+   * Tests iteration on a hoplog with no keys, using a scanner. Scanner should not return any value
+   * and hasNext should return false everytime
+   */
+  public void testEmptyFileIterator() throws Exception {
+    String hoplogName = getRandomHoplogName();
+    createHoplog(hoplogName, 0);
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    HoplogReader reader = testHoplog.getReader();
+    HoplogIterator<byte[], byte[]> iter = reader.scan();
+    assertNull(iter.getKey());
+    assertNull(iter.getValue());
+    assertFalse(iter.hasNext());
+    assertNull(iter.getKey());
+    assertNull(iter.getValue());
+    try {
+      iter.next();
+      fail();
+    } catch (NoSuchElementException e) {
+    }
+  }
+
+  /**
+   * Tests from exclusive iterator
+   */
+  public void testFromExclusiveIterator() throws Exception {
+    fromIterator(false);
+  }
+
+  /**
+   * Tests from inclusive iterator
+   */
+  public void testFromInclusiveIterator() throws Exception {
+    fromIterator(true);
+  }
+
+  /**
+   * Tests from condition based iteration. creates hoplog with 10 KVs. Creates a scanner starting at
+   * a middle key and verifies the count of KVs iterated on
+   */
+  public void fromIterator(boolean includeFrom) throws Exception {
+    int count = 10;
+    ByteArrayComparator bac = new ByteArrayComparator();
+
+    String hoplogName = getRandomHoplogName();
+    // sorted map contains the keys inserted in the hoplog for testing
+    TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
+
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    HoplogReader reader = testHoplog.getReader();
+
+    int middleKey = 4;
+    // remove top keys from the sorted map as the hoplog scanner should not
+    // return those
+    Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
+    for (int i = 0; i < middleKey; i++) {
+      mapIter.next();
+      count--;
+    }
+    if (!includeFrom) {
+      mapIter.next();
+      count--;
+    }
+
+    // keys are like Key-X, for X=0 till X=9. Start iterator at fifth key,
+    // key-4. if excluding from key, start at sixth key, key-5.
+    HoplogIterator<byte[], byte[]> iter = reader.scan(("key-" + middleKey).getBytes(), includeFrom,
+        null, true);
+
+    for (; iter.hasNext();) {
+      byte[] key = iter.next();
+      Entry<String, String> entry = mapIter.next();
+      // make sure the KV returned by iterator match the inserted KV
+      assertEquals(0, bac.compare(key, iter.getKey()));
+      assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
+      assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
+      count--;
+    }
+    assertEquals(0, count);
+  }
+
+  /**
+   * Tests to exclusive iterator
+   */
+  public void testToExclusiveIterator() throws Exception {
+    toIterator(false);
+  }
+
+  /**
+   * Tests to inclusive iterator
+   */
+  public void testToInclusiveIterator() throws Exception {
+    toIterator(true);
+  }
+
+  /**
+   * Tests to condition based iteration. creates hoplog with 10 KVs. Creates a scanner ending at
+   * a middle key and verifies the count of KVs iterated on
+   */
+  public void toIterator(boolean includeTo) throws Exception {
+    int count = 10;
+    ByteArrayComparator bac = new ByteArrayComparator();
+    
+    String hoplogName = getRandomHoplogName();
+    // sorted map contains the keys inserted in the hoplog for testing
+    TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
+    Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
+    
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    HoplogReader reader = testHoplog.getReader();
+    
+    int middleKey = 4;
+    // keys are like Key-X, for X=0 till X=9. End iterator at fifth key,
+    // key-4. if excluding to key, end at fourth key, key-3.
+    HoplogIterator<byte[], byte[]> iter = reader.scan(null, true, ("key-" + middleKey).getBytes(), includeTo);
+    
+    for (; iter.hasNext();) {
+      byte[] key = iter.next();
+      Entry<String, String> entry = mapIter.next();
+      // make sure the KV returned by iterator match the inserted KV
+      assertEquals(0, bac.compare(key, iter.getKey()));
+      assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
+      assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
+      
+      count --;
+    }
+    
+    if (includeTo) {
+      count++;
+    }
+
+    assertEquals(10, count + middleKey);
+  }
+  
+  /**
+   * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
+   */
+  public void testFromToIterator() throws IOException {
+    ByteArrayComparator bac = new ByteArrayComparator();
+    String hoplogName = getRandomHoplogName();
+    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    
+    int count = 5;
+    HoplogWriter writer = hoplog.createWriter(5);
+    for (int i = 0; i < count; i++) {
+      String value = "value-" + (i * 2);
+      // even keys key-[0 2 4 6 8]
+      writer.append(("key-" + (i * 2)).getBytes(), value.getBytes());
+    }
+    writer.close();
+    
+    HoplogReader reader = hoplog.getReader();
+    HoplogIterator<byte[], byte[]> iter = reader.scan("key-1".getBytes(), true, "key-7".getBytes(), true);
+
+    for (int i = 2; i < 7; i += 2) {
+      assertTrue(iter.hasNext());
+      iter.next();
+      assertEquals(0, bac.compare(("key-" + i).getBytes(), iter.getKey()));
+      assertEquals(0, bac.compare(("value-" + i).getBytes(), iter.getValue()));
+      System.out.println(new String(iter.getKey()));
+    }
+    assertFalse(iter.hasNext());
+  }
+  
+  /**
+   * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
+   */
+  public void testDuplicateKeys() throws IOException {
+    String hoplogName = getRandomHoplogName();
+    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+
+    // write duplicate keys
+    int count = 2;
+    HoplogWriter writer = hoplog.createWriter(2);
+    List<String> values = new ArrayList<String>();
+    for(int i = 1; i <= count; i++) {
+      String value = "value" + i;
+      writer.append("key-1".getBytes(), value.getBytes());
+      values.add(value);
+    }
+    writer.close();
+
+    HoplogReader reader = hoplog.getReader();
+    HoplogIterator<byte[], byte[]> scanner = reader.scan();
+    for (byte[] key = null; scanner.hasNext();) {
+      key = scanner.next();
+      count--;
+      assertEquals(0, Bytes.compareTo(key, "key-1".getBytes()));
+      values.remove(new String(scanner.getValue()));
+    }
+    assertEquals(0, count);
+    assertEquals(0, values.size());
+  }
+  
+  public void testOffsetBasedScan() throws Exception {
+    // Each record is 43 bytes. each block is 256 bytes. each block will have 6
+    // records
+     
+    int blocksize = 1 << 8;
+    System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
+        String.valueOf(blocksize));
+
+    int count = 50;
+    String hoplogName = getRandomHoplogName();
+    createHoplog(hoplogName, count);
+
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
+        testDataDir, hoplogName), blockCache, stats, storeStats);
+
+    HoplogReader reader = testHoplog.getReader();
+    
+    HoplogIterator<byte[], byte[]> scanner = reader.scan(blocksize * 1, blocksize * 2);
+    int range1Count = 0;
+    String range1EndKey = null;
+    for (byte[] key = null; scanner.hasNext();) {
+      key = scanner.next();
+      range1Count++;
+      range1EndKey = new String(key);
+    }
+    int range1EndKeyNum = Integer.valueOf(range1EndKey.substring("Key-".length()));
+
+    scanner = reader.scan(blocksize * 2, blocksize * 1);
+    int range2Count = 0;
+    String range2EndKey = null;
+    for (byte[] key = null; scanner.hasNext();) {
+      key = scanner.next();
+      range2Count++;
+      range2EndKey = new String(key);
+    }
+    
+    assertEquals(range2EndKey, range1EndKey);
+    assertEquals(2, range1Count/range2Count);
+    
+    scanner = reader.scan(blocksize * 3, blocksize * 1);
+    String range3FirstKey = new String(scanner.next());
+    
+    int range3FirstKeyNum = Integer.valueOf(range3FirstKey.substring("Key-"
+        .length()));
+    
+    // range 3 starts at the end of range 1. so the two keys must be consecutive
+    assertEquals(range1EndKeyNum + 1, range3FirstKeyNum);
+    
+    testHoplog.close();
+  }
+  
+  public void testOffsetScanBeyondFileSize() throws Exception {
+    // Each record is 43 bytes. each block is 256 bytes. each block will have 6
+    // records
+    
+    int blocksize = 1 << 8;
+    System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
+        String.valueOf(blocksize));
+    
+    int count = 20;
+    String hoplogName = getRandomHoplogName();
+    createHoplog(hoplogName, count);
+    
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
+        testDataDir, hoplogName), blockCache, stats, storeStats);
+    
+    HoplogReader reader = testHoplog.getReader();
+    
+    HoplogIterator<byte[], byte[]> scanner = reader.scan(blocksize * 5, blocksize * 2);
+    assertFalse(scanner.hasNext());
+    
+    testHoplog.close();
+  }
+  
+  public void testZeroValueOffsetScan() throws Exception {
+    // Each record is 43 bytes. each block is 256 bytes. each block will have 6
+    // records
+    
+    int blocksize = 1 << 8;
+    System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
+        String.valueOf(blocksize));
+    
+    int count = 20;
+    String hoplogName = getRandomHoplogName();
+    createHoplog(hoplogName, count);
+    
+    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
+        testDataDir, hoplogName), blockCache, stats, storeStats);
+    
+    HoplogReader reader = testHoplog.getReader();
+    
+    HoplogIterator<byte[], byte[]> scanner = reader.scan(0, blocksize * 2);
+    assertTrue(scanner.hasNext());
+    int keyNum = Integer.valueOf(new String(scanner.next()).substring("Key-"
+        .length()));
+    assertEquals(100000, keyNum);
+
+    testHoplog.close();
+  }
+  
+  /*
+   * Tests reader succeeds to read data even if FS client is recycled without
+   * this reader knowing
+   */
+  public void testReaderDetectAndUseRecycledFs() throws Exception {
+    HDFSStoreFactoryImpl storeFactory = getCloseableLocalHdfsStoreFactory();
+    HDFSStoreImpl store = (HDFSStoreImpl) storeFactory.create("Store-1");
+    toBeCleaned.add(store);
+
+    HFileSortedOplog hop = new HFileSortedOplog(store, new Path(getName() + "-1-1.hop"), blockCache, stats, storeStats);
+    toBeCleaned.add(hop);
+    TreeMap<String, String> map = createHoplog(10, hop);
+
+    HoplogReader reader = hop.getReader();
+    // verify that each entry put in the hoplog is returned by reader
+    for (Entry<String, String> entry : map.entrySet()) {
+      byte[] value = reader.read(entry.getKey().getBytes());
+      assertNotNull(value);
+    }
+
+    cache.getLogger().info("<ExpectedException action=add>java.io.IOException</ExpectedException>");
+    try {
+      store.getFileSystem().close();
+      store.checkAndClearFileSystem();
+      
+      for (Entry<String, String> entry : map.entrySet()) {
+        reader = hop.getReader();
+        byte[] value = reader.read(entry.getKey().getBytes());
+        assertNotNull(value);
+      }
+    } finally {
+      cache.getLogger().info("<ExpectedException action=remove>java.io.IOException</ExpectedException>");
+    }
+  }
+
+  public void testNewScannerDetechAndUseRecycledFs() throws Exception {
+    HDFSStoreFactoryImpl storeFactory = getCloseableLocalHdfsStoreFactory();
+    HDFSStoreImpl store = (HDFSStoreImpl) storeFactory.create("Store-1");
+    toBeCleaned.add(store);
+
+    HFileSortedOplog hop = new HFileSortedOplog(store, new Path(getName() + "-1-1.hop"), blockCache, stats, storeStats);
+    createHoplog(10, hop);
+
+    HoplogIterator<byte[], byte[]> scanner = hop.getReader().scan();
+    // verify that each entry put in the hoplog is returned by reader
+    int i = 0;
+    while (scanner.hasNext()) {
+      byte[] key = scanner.next();
+      assertNotNull(key);
+      i++;
+    }
+    assertEquals(10, i);
+    // flush block cache
+    hop.close(true);
+    hop.delete();
+    
+    hop = new HFileSortedOplog(store, new Path(getName()+"-1-1.hop"), blockCache, stats, storeStats);
+		createHoplog(10, hop);
+  	toBeCleaned.add(hop);
+    hop.getReader();
+    
+    cache.getLogger().info("<ExpectedException action=add>java.io.IOException</ExpectedException>");
+    try {
+      store.getFileSystem().close();
+      store.checkAndClearFileSystem();
+      
+      scanner = hop.getReader().scan();
+      // verify that each entry put in the hoplog is returned by reader
+      i = 0;
+      while (scanner.hasNext()) {
+        byte[] key = scanner.next();
+        assertNotNull(key);
+        i++;
+      }
+      assertEquals(10, i);
+    } finally {
+      cache.getLogger().info("<ExpectedException action=remove>java.io.IOException</ExpectedException>");
+    }
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    for (Object obj : toBeCleaned) {
+      try {
+        if (HDFSStoreImpl.class.isInstance(obj)) {
+          ((HDFSStoreImpl) obj).clearFolder();
+        } else if (AbstractHoplog.class.isInstance(obj)) {
+          ((AbstractHoplog) obj).close();
+          ((AbstractHoplog) obj).delete();
+        }
+      } catch (Exception e) {
+        System.out.println(e);
+      }
+    }
+    super.tearDown();
+  }
+    
+  private TreeMap<String, String> createHoplog(String hoplogName, int numKeys) throws IOException {
+    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
+    TreeMap<String, String> map = createHoplog(numKeys, hoplog);
+    return map;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
new file mode 100644
index 0000000..13aa6a9
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
@@ -0,0 +1,178 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class SortedOplogListIterJUnitTest extends BaseHoplogTestCase {
+  public void testOneIterOneKey() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("0"), ("0")));
+    organizer.flush(items.iterator(), items.size());
+
+    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
+    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
+    assertTrue(iter.hasNext());
+    int count = 0;
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
+      count++;
+    }
+    assertEquals(1, count);
+    organizer.close();
+  }
+  
+  public void testOneIterDuplicateKey() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("0"), ("V2")));
+    items.add(new TestEvent(("0"), ("V1")));
+    items.add(new TestEvent(("1"), ("V2")));
+    items.add(new TestEvent(("1"), ("V1")));
+    organizer.flush(items.iterator(), items.size());
+    
+    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
+    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
+    assertTrue(iter.hasNext());
+    int count = 0;
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
+      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
+      assertEquals("V2", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
+      count++;
+    }
+    assertEquals(2, count);
+    organizer.close();
+  }
+  
+  public void testTwoIterSameKey() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("0"), ("V1")));
+    organizer.flush(items.iterator(), items.size());
+    items.clear();
+    items.add(new TestEvent(("0"), ("V2")));
+    organizer.flush(items.iterator(), items.size());
+    
+    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
+    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
+    assertTrue(iter.hasNext());
+    int count = 0;
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
+      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
+      assertEquals("V2", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
+      count++;
+    }
+    assertEquals(1, count);
+    organizer.close();
+  }
+  
+  public void testTwoIterDiffKey() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("0"), ("V1")));
+    organizer.flush(items.iterator(), items.size());
+    items.clear();
+    items.add(new TestEvent(("1"), ("V1")));
+    organizer.flush(items.iterator(), items.size());
+    
+    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
+    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
+    assertTrue(iter.hasNext());
+    int count = 0;
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
+      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
+      assertEquals("V1", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
+      count++;
+    }
+    assertEquals(2, count);
+    organizer.close();
+  }
+  
+  public void testMergedIterator() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    // #1
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1")));
+    items.add(new TestEvent(("2"), ("1")));
+    items.add(new TestEvent(("3"), ("1")));
+    items.add(new TestEvent(("4"), ("1")));
+    organizer.flush(items.iterator(), items.size());
+
+    // #2
+    items.clear();
+    items.add(new TestEvent(("2"), ("1")));
+    items.add(new TestEvent(("4"), ("1")));
+    items.add(new TestEvent(("6"), ("1")));
+    items.add(new TestEvent(("8"), ("1")));
+    organizer.flush(items.iterator(), items.size());
+
+    // #3
+    items.clear();
+    items.add(new TestEvent(("1"), ("1")));
+    items.add(new TestEvent(("3"), ("1")));
+    items.add(new TestEvent(("5"), ("1")));
+    items.add(new TestEvent(("7"), ("1")));
+    items.add(new TestEvent(("9"), ("1")));
+    organizer.flush(items.iterator(), items.size());
+
+    // #4
+    items.clear();
+    items.add(new TestEvent(("0"), ("1")));
+    items.add(new TestEvent(("1"), ("1")));
+    items.add(new TestEvent(("4"), ("1")));
+    items.add(new TestEvent(("5"), ("1")));
+    organizer.flush(items.iterator(), items.size());
+
+    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
+    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
+    // the iteration pattern for this test should be 0-9:
+    // 0 1 4 5 oplog #4
+    // 1 3 5 7 9 oplog #3
+    // 2 4 6 8 oplog #2
+    // 1 2 3 4 oplog #1
+    int count = 0;
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
+      count++;
+    }
+    assertEquals(10, count);
+    organizer.close();
+  }
+}


[13/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
index 328c196..c75286e 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
@@ -41,6 +41,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.FutureTask;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
@@ -92,12 +93,22 @@ import com.gemstone.gemfire.cache.TransactionDataNotColocatedException;
 import com.gemstone.gemfire.cache.TransactionDataRebalancedException;
 import com.gemstone.gemfire.cache.TransactionException;
 import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
 import com.gemstone.gemfire.cache.execute.EmtpyRegionFunctionException;
 import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.FunctionContext;
 import com.gemstone.gemfire.cache.execute.FunctionException;
 import com.gemstone.gemfire.cache.execute.FunctionService;
 import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.CompactionStatus;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSFlushQueueFunction;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionArgs;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionFunction;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionResultCollector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSLastCompactionTimeFunction;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
 import com.gemstone.gemfire.cache.partition.PartitionListener;
 import com.gemstone.gemfire.cache.partition.PartitionNotAvailableException;
 import com.gemstone.gemfire.cache.query.FunctionDomainException;
@@ -213,6 +224,7 @@ import com.gemstone.gemfire.internal.cache.partitioned.PutAllPRMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.PutMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.PutMessage.PutResult;
 import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor;
+import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor.BucketVisitor;
 import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor.PartitionProfile;
 import com.gemstone.gemfire.internal.cache.partitioned.RemoveAllPRMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.RemoveIndexesMessage;
@@ -244,6 +256,7 @@ import com.gemstone.gemfire.internal.offheap.annotations.Released;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
 import com.gemstone.gemfire.internal.sequencelog.RegionLogger;
 import com.gemstone.gemfire.internal.util.TransformUtils;
+import com.gemstone.gemfire.internal.util.concurrent.FutureResult;
 import com.gemstone.gemfire.internal.util.concurrent.StoppableCountDownLatch;
 import com.gemstone.gemfire.i18n.StringId;
 
@@ -695,9 +708,17 @@ public class PartitionedRegion extends LocalRegion implements
   private final PartitionListener[] partitionListeners;
 
   private boolean isShadowPR = false;
-
+  private boolean isShadowPRForHDFS = false;
+  
   private AbstractGatewaySender parallelGatewaySender = null;
   
+  private final ThreadLocal<Boolean> queryHDFS = new ThreadLocal<Boolean>() {
+    @Override
+    protected Boolean initialValue() {
+      return false;
+    }
+  };
+  
   public PartitionedRegion(String regionname, RegionAttributes ra,
       LocalRegion parentRegion, GemFireCacheImpl cache,
       InternalRegionArguments internalRegionArgs) {
@@ -717,6 +738,12 @@ public class PartitionedRegion extends LocalRegion implements
     // (which prevents pridmap cleanup).
     cache.getDistributedSystem().addDisconnectListener(dsPRIdCleanUpListener);
     
+    // add an async queue for the region if the store name is not null. 
+    if (this.getHDFSStoreName() != null) {
+      String eventQueueName = getHDFSEventQueueName();
+      super.addAsyncEventQueueId(eventQueueName);
+    }
+
     // this.userScope = ra.getScope();
     this.partitionAttributes = ra.getPartitionAttributes();
     this.localMaxMemory = this.partitionAttributes.getLocalMaxMemory();
@@ -795,6 +822,8 @@ public class PartitionedRegion extends LocalRegion implements
     if (internalRegionArgs.isUsedForParallelGatewaySenderQueue()) {
       this.isShadowPR = true;
       this.parallelGatewaySender = internalRegionArgs.getParallelGatewaySender();
+      if (internalRegionArgs.isUsedForHDFSParallelGatewaySenderQueue())
+        this.isShadowPRForHDFS = true;
     }
     
     
@@ -838,10 +867,38 @@ public class PartitionedRegion extends LocalRegion implements
     });
   }
 
+  @Override
+  public final boolean isHDFSRegion() {
+    return this.getHDFSStoreName() != null;
+  }
+
+  @Override
+  public final boolean isHDFSReadWriteRegion() {
+    return isHDFSRegion() && !getHDFSWriteOnly();
+  }
+
+  @Override
+  protected final boolean isHDFSWriteOnly() {
+    return isHDFSRegion() && getHDFSWriteOnly();
+  }
+
+  public final void setQueryHDFS(boolean includeHDFS) {
+    queryHDFS.set(includeHDFS);
+  }
+
+  @Override
+  public final boolean includeHDFSResults() {
+    return queryHDFS.get();
+  }
+
   public final boolean isShadowPR() {
     return isShadowPR;
   }
 
+  public final boolean isShadowPRForHDFS() {
+    return isShadowPRForHDFS;
+  }
+  
   public AbstractGatewaySender getParallelGatewaySender() {
     return parallelGatewaySender;
   }
@@ -1607,7 +1664,7 @@ public class PartitionedRegion extends LocalRegion implements
       try {
         final boolean loc = (this.localMaxMemory > 0) && retryNode.equals(getMyId());
         if (loc) {
-          ret = this.dataStore.getEntryLocally(bucketId, key, access, allowTombstones);
+          ret = this.dataStore.getEntryLocally(bucketId, key, access, allowTombstones, true);
         } else {
           ret = getEntryRemotely(retryNode, bucketIdInt, key, access, allowTombstones);
           // TODO:Suranjan&Yogesh : there should be better way than this one
@@ -2066,7 +2123,8 @@ public class PartitionedRegion extends LocalRegion implements
           bucketStorageAssigned=false;
           // if this is a Delta update, then throw exception since the key doesn't
           // exist if there is no bucket for it yet
-          if (event.hasDelta()) {
+          // For HDFS region, we will recover key, so allow bucket creation
+          if (!this.dataPolicy.withHDFS() && event.hasDelta()) {
             throw new EntryNotFoundException(LocalizedStrings.
               PartitionedRegion_CANNOT_APPLY_A_DELTA_WITHOUT_EXISTING_ENTRY
                 .toLocalizedString());
@@ -3261,9 +3319,9 @@ public class PartitionedRegion extends LocalRegion implements
    */
    @Override
   public Object get(Object key, Object aCallbackArgument,
-                    boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
-                    ClientProxyMembershipID requestingClient,
-                    EntryEventImpl clientEvent, boolean returnTombstones) throws TimeoutException, CacheLoaderException
+      boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
+      ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws TimeoutException, CacheLoaderException
   {
     validateKey(key);
     validateCallbackArg(aCallbackArgument);
@@ -3277,7 +3335,7 @@ public class PartitionedRegion extends LocalRegion implements
       // if scope is local and there is no loader, then
       // don't go further to try and get value
       Object value = getDataView().findObject(getKeyInfo(key, aCallbackArgument), this, true/*isCreate*/, generateCallbacks,
-                                      null /*no local value*/, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
+                                      null /*no local value*/, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
       if (value != null && !Token.isInvalid(value)) {
         miss = false;
       }
@@ -3323,7 +3381,7 @@ public class PartitionedRegion extends LocalRegion implements
     if (primary == null) {
       return null;
     }
-    if (isTX()) {
+    if (isTX() || this.hdfsStoreName != null) {
       return getNodeForBucketWrite(bucketId, null);
     }
     InternalDistributedMember result =  getRegionAdvisor().getPreferredNode(bucketId);
@@ -3337,7 +3395,7 @@ public class PartitionedRegion extends LocalRegion implements
    */
   private InternalDistributedMember getNodeForBucketReadOrLoad(int bucketId) {
     InternalDistributedMember targetNode;
-    if (!this.haveCacheLoader) {
+    if (!this.haveCacheLoader && (this.hdfsStoreName == null)) {
       targetNode = getNodeForBucketRead(bucketId);
     }
     else {
@@ -3470,16 +3528,9 @@ public class PartitionedRegion extends LocalRegion implements
   }
 
   @Override
-  protected Object findObjectInSystem(KeyInfo keyInfo,
-                                      boolean isCreate,
-                                      TXStateInterface tx,
-                                      boolean generateCallbacks,
-                                      Object localValue,
-                                      boolean disableCopyOnRead,
-                                      boolean preferCD,
-                                      ClientProxyMembershipID requestingClient,
-                                      EntryEventImpl clientEvent,
-                                      boolean returnTombstones)
+  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
+      TXStateInterface tx, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
       throws CacheLoaderException, TimeoutException
   {
     Object obj = null;
@@ -3515,7 +3566,7 @@ public class PartitionedRegion extends LocalRegion implements
         return null;
       }
       
-      obj = getFromBucket(targetNode, bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowRetry);
+      obj = getFromBucket(targetNode, bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowRetry, allowReadFromHDFS);
     }
     finally {
       this.prStats.endGet(startTime);
@@ -4098,22 +4149,15 @@ public class PartitionedRegion extends LocalRegion implements
 
   /**
    * no docs
-   * @param preferCD
+   * @param preferCD 
    * @param requestingClient the client requesting the object, or null if not from a client
    * @param clientEvent TODO
    * @param returnTombstones TODO
    * @param allowRetry if false then do not retry
    */
   private Object getFromBucket(final InternalDistributedMember targetNode,
-                               int bucketId,
-                               final Object key,
-                               final Object aCallbackArgument,
-                               boolean disableCopyOnRead,
-                               boolean preferCD,
-                               ClientProxyMembershipID requestingClient,
-                               EntryEventImpl clientEvent,
-                               boolean returnTombstones,
-                               boolean allowRetry) {
+      int bucketId, final Object key, final Object aCallbackArgument,
+      boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowRetry, boolean allowReadFromHDFS) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     
     final int retryAttempts = calcRetry();
@@ -4143,7 +4187,7 @@ public class PartitionedRegion extends LocalRegion implements
       try {
         if (isLocal) {
           obj = this.dataStore.getLocally(bucketId, key, aCallbackArgument,
-              disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false);
+              disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false, allowReadFromHDFS);
         }
         else {
             if (localCacheEnabled && null != (obj = localCacheGet(key))) { // OFFHEAP: copy into heap cd; TODO optimize for preferCD case
@@ -4152,14 +4196,14 @@ public class PartitionedRegion extends LocalRegion implements
               }
               return obj;
             }
-            else if (this.haveCacheLoader) {
+            else if (this.haveCacheLoader || this.hdfsStoreName != null) {
               // If the region has a cache loader, 
               // the target node is the primary server of the bucket. But, if the 
               // value can be found in a local bucket, we should first try there. 
 
               /* MergeGemXDHDFSToGFE -readoing from local bucket was disabled in GemXD*/
 			  if (null != ( obj = getFromLocalBucket(bucketId, key, aCallbackArgument,
-                  disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones))) {
+                  disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS))) {
                 return obj;
               } 
             }
@@ -4167,7 +4211,7 @@ public class PartitionedRegion extends LocalRegion implements
           //  Test hook
           if (((LocalRegion)this).isTest())
             ((LocalRegion)this).incCountNotFoundInLocal();
-          obj = getRemotely(retryNode, bucketId, key, aCallbackArgument, preferCD, requestingClient, clientEvent, returnTombstones);
+          obj = getRemotely(retryNode, bucketId, key, aCallbackArgument, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
  
           // TODO:Suranjan&Yogesh : there should be better way than this one
           String name = Thread.currentThread().getName();
@@ -4265,9 +4309,9 @@ public class PartitionedRegion extends LocalRegion implements
    *   
    */
   public Object getFromLocalBucket(int bucketId, final Object key,
-                                   final Object aCallbackArgument, boolean disableCopyOnRead,
-                                   boolean preferCD, ClientProxyMembershipID requestingClient,
-                                   EntryEventImpl clientEvent, boolean returnTombstones)
+		final Object aCallbackArgument, boolean disableCopyOnRead,
+		boolean preferCD, ClientProxyMembershipID requestingClient,
+		EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
 		throws ForceReattemptException, PRLocallyDestroyedException {
     Object obj;
     // try reading locally. 
@@ -4276,7 +4320,7 @@ public class PartitionedRegion extends LocalRegion implements
       return null; // fixes 51657
     }
     if (readNode.equals(getMyId()) && null != ( obj = this.dataStore.getLocally(bucketId, key, aCallbackArgument,
-      disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, true))) {
+      disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, true, allowReadFromHDFS))) {
 	  if (logger.isTraceEnabled()) {
             logger.trace("getFromBucket: Getting key {} ({}) locally - success", key, key.hashCode());
 	  }
@@ -5072,13 +5116,7 @@ public class PartitionedRegion extends LocalRegion implements
    *                 if the peer is no longer available
    */
   public Object getRemotely(InternalDistributedMember targetNode,
-                            int bucketId,
-                            final Object key,
-                            final Object aCallbackArgument,
-                            boolean preferCD,
-                            ClientProxyMembershipID requestingClient,
-                            EntryEventImpl clientEvent,
-                            boolean returnTombstones) throws PrimaryBucketException,
+      int bucketId, final Object key, final Object aCallbackArgument, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws PrimaryBucketException,
       ForceReattemptException {
     Object value;
     if (logger.isDebugEnabled()) {
@@ -5086,7 +5124,7 @@ public class PartitionedRegion extends LocalRegion implements
           getPRId(), BUCKET_ID_SEPARATOR, bucketId, key);
     }
     GetResponse response = GetMessage.send(targetNode, this, key,
-        aCallbackArgument, requestingClient, returnTombstones);
+        aCallbackArgument, requestingClient, returnTombstones, allowReadFromHDFS);
     this.prStats.incPartitionMessagesSent();
     value = response.waitForResponse(preferCD);
     if (clientEvent != null) {
@@ -7040,6 +7078,9 @@ public class PartitionedRegion extends LocalRegion implements
   public int entryCount(Set<Integer> buckets,
       boolean estimate) {
     Map<Integer, SizeEntry> bucketSizes = null;
+ 	if (isHDFSReadWriteRegion() && (includeHDFSResults() || estimate)) {
+      bucketSizes = getSizeForHDFS( buckets, estimate);
+	} else {
     if (buckets != null) {
       if (this.dataStore != null) {
         List<Integer> list = new ArrayList<Integer>();	
@@ -7071,6 +7112,7 @@ public class PartitionedRegion extends LocalRegion implements
         }
       }
     }
+ 	}
 
     int size = 0;
     if (bucketSizes != null) {
@@ -7093,7 +7135,81 @@ public class PartitionedRegion extends LocalRegion implements
       return 0;
     }
   }
+  private Map<Integer, SizeEntry> getSizeForHDFS(final Set<Integer> buckets, boolean estimate) {
+    // figure out which buckets to include
+    Map<Integer, SizeEntry> bucketSizes = new HashMap<Integer, SizeEntry>();
+    getRegionAdvisor().accept(new BucketVisitor<Map<Integer, SizeEntry>>() {
+      @Override
+      public boolean visit(RegionAdvisor advisor, ProxyBucketRegion pbr,
+          Map<Integer, SizeEntry> map) {
+        if (buckets == null || buckets.contains(pbr.getBucketId())) {
+          map.put(pbr.getBucketId(), null);
+          // ensure that the bucket has been created
+          pbr.getPartitionedRegion().getOrCreateNodeForBucketWrite(pbr.getBucketId(), null);
+        }
+        return true;
+      }
+    }, bucketSizes);
 
+    RetryTimeKeeper retry = new RetryTimeKeeper(retryTimeout);
+
+    while (true) {
+      // get the size from local buckets
+      if (dataStore != null) {
+        Map<Integer, SizeEntry> localSizes;
+        if (estimate) {
+          localSizes = dataStore.getSizeEstimateForLocalPrimaryBuckets();
+        } else {
+          localSizes = dataStore.getSizeForLocalPrimaryBuckets();
+        }
+        for (Map.Entry<Integer, SizeEntry> me : localSizes.entrySet()) {
+          if (bucketSizes.containsKey(me.getKey())) {
+            bucketSizes.put(me.getKey(), me.getValue());
+          }
+        }
+      }
+      // all done
+      int count = 0;
+      Iterator it = bucketSizes.values().iterator();
+      while (it.hasNext()) {
+        if (it.next() != null) count++;
+      }
+      if (bucketSizes.size() == count) {
+        return bucketSizes;
+      }
+      
+      Set<InternalDistributedMember> remotes = getRegionAdvisor().adviseDataStore(true);
+      remotes.remove(getMyId());
+      
+      // collect remote sizes
+      if (!remotes.isEmpty()) {
+        Map<Integer, SizeEntry> remoteSizes = new HashMap<Integer, PartitionedRegion.SizeEntry>();
+        try {
+          remoteSizes = getSizeRemotely(remotes, estimate);
+        } catch (ReplyException e) {
+          // Remote member will never throw ForceReattemptException or
+          // PrimaryBucketException, so any exception on the remote member
+          // should be re-thrown
+          e.handleAsUnexpected();
+        }
+        for (Map.Entry<Integer, SizeEntry> me : remoteSizes.entrySet()) {
+          Integer k = me.getKey();
+          if (bucketSizes.containsKey(k) && me.getValue().isPrimary()) {
+            bucketSizes.put(k, me.getValue());
+          }
+        }
+      }
+      
+      if (retry.overMaximum()) {
+        checkReadiness();
+        PRHARedundancyProvider.timedOut(this, null, null, "calculate size", retry.getRetryTime());
+      }
+      
+      // throttle subsequent attempts
+      retry.waitForBucketsRecovery();
+    }
+  }
+  
   /**
    * This method gets a PartitionServerSocketConnection to targetNode and sends
    * size request to the node. It returns size of all the buckets "primarily"
@@ -7491,7 +7607,9 @@ public class PartitionedRegion extends LocalRegion implements
       .append("; isClosed=").append(this.isClosed)
       .append("; retryTimeout=").append(this.retryTimeout)
       .append("; serialNumber=").append(getSerialNumber())
-
+	  .append("; hdfsStoreName=").append(getHDFSStoreName())
+      .append("; hdfsWriteOnly=").append(getHDFSWriteOnly())
+      
       .append("; partition attributes=").append(getPartitionAttributes().toString())
       .append("; on VM ").append(getMyId())
       .append("]")
@@ -7634,6 +7752,18 @@ public class PartitionedRegion extends LocalRegion implements
   @Override
   public void destroyRegion(Object aCallbackArgument)
       throws CacheWriterException, TimeoutException {
+    //For HDFS regions, we need a data store
+    //to do the global destroy so that it can delete
+    //the data from HDFS as well.
+    if(!isDataStore() && this.dataPolicy.withHDFS()) {
+      if(destroyOnDataStore(aCallbackArgument)) {
+        //If we were able to find a data store to do the destroy,
+        //stop here.
+        //otherwise go ahead and destroy the region from this member
+        return;
+      }
+    }
+
     checkForColocatedChildren();
     getDataView().checkSupportsRegionDestroy();
     checkForLimitedOrNoAccess();
@@ -7681,6 +7811,7 @@ public class PartitionedRegion extends LocalRegion implements
 
     boolean keepWaiting = true;
 
+    AsyncEventQueueImpl hdfsQueue = getHDFSEventQueue();
     while(true) {
       List<String> pausedSenders = new ArrayList<String>();
       List<ConcurrentParallelGatewaySenderQueue> parallelQueues = new ArrayList<ConcurrentParallelGatewaySenderQueue>();
@@ -7798,6 +7929,11 @@ public class PartitionedRegion extends LocalRegion implements
         }
       }
     }
+    
+    if(hdfsQueue != null) {
+      hdfsQueue.destroy();
+      cache.removeAsyncEventQueue(hdfsQueue);
+    }
   }
         
   @Override
@@ -7978,6 +8114,9 @@ public class PartitionedRegion extends LocalRegion implements
     final boolean isClose = event.getOperation().isClose();
     destroyPartitionedRegionLocally(!isClose);
     destroyCleanUp(event, serials);
+	if(!isClose) {
+      destroyHDFSData();
+    }
     return true;
   }
 
@@ -8270,6 +8409,8 @@ public class PartitionedRegion extends LocalRegion implements
       }
     }
     
+    HDFSRegionDirector.getInstance().clear(getFullPath());
+    
     RegionLogger.logDestroy(getName(), cache.getMyId(), null, op.isClose());
   }
 
@@ -10914,6 +11055,11 @@ public class PartitionedRegion extends LocalRegion implements
         }
       }
       
+      //hoplogs - pause HDFS dispatcher while we 
+      //clear the buckets to avoid missing some files
+      //during the clear
+      pauseHDFSDispatcher();
+
       try {
         // now clear the bucket regions; we go through the primary bucket
         // regions so there is distribution for every bucket but that
@@ -10929,6 +11075,7 @@ public class PartitionedRegion extends LocalRegion implements
           }
         }
       } finally {
+        resumeHDFSDispatcher();
         // release the bucket locks
         for (BucketRegion br : lockedRegions) {
           try {
@@ -10944,6 +11091,247 @@ public class PartitionedRegion extends LocalRegion implements
     }
     
   }
+  
+  /**Destroy all data in HDFS, if this region is using HDFS persistence.*/
+  private void destroyHDFSData() {
+    if(getHDFSStoreName() == null) {
+      return;
+    }
+    
+    try {
+      hdfsManager.destroyData();
+    } catch (IOException e) {
+      logger.warn(LocalizedStrings.HOPLOG_UNABLE_TO_DELETE_HDFS_DATA, e);
+    }
+  }
+
+  private void pauseHDFSDispatcher() {
+    if(!isHDFSRegion()) {
+      return;
+    }
+    AbstractGatewaySenderEventProcessor eventProcessor = getHDFSEventProcessor();
+    if (eventProcessor == null) return;
+    eventProcessor.pauseDispatching();
+    eventProcessor.waitForDispatcherToPause();
+  }
+  
+  /**
+   * Get the statistics for the HDFS event queue associated with this region,
+   * if any
+   */
+  public AsyncEventQueueStats getHDFSEventQueueStats() {
+    AsyncEventQueueImpl asyncQ = getHDFSEventQueue();
+    if(asyncQ == null) {
+      return null;
+    }
+    return asyncQ.getStatistics();
+  }
+  
+  protected AbstractGatewaySenderEventProcessor getHDFSEventProcessor() {
+    final AsyncEventQueueImpl asyncQ = getHDFSEventQueue();
+    final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender();
+    AbstractGatewaySenderEventProcessor eventProcessor = gatewaySender.getEventProcessor();
+    return eventProcessor;
+  }
+
+  public AsyncEventQueueImpl getHDFSEventQueue() {
+    String asyncQId = getHDFSEventQueueName();
+    if(asyncQId == null) {
+      return null;
+    }
+    final AsyncEventQueueImpl asyncQ =  (AsyncEventQueueImpl)this.getCache().getAsyncEventQueue(asyncQId);
+    return asyncQ;
+  }
+  
+  private void resumeHDFSDispatcher() {
+    if(!isHDFSRegion()) {
+      return;
+    }
+    AbstractGatewaySenderEventProcessor eventProcessor = getHDFSEventProcessor();
+    if (eventProcessor == null) return;
+    eventProcessor.resumeDispatching();
+  }
+
+  protected String getHDFSEventQueueName() {
+    if (!this.getDataPolicy().withHDFS()) return null;
+    String colocatedWith = this.getPartitionAttributes().getColocatedWith();
+    String eventQueueName;
+    if (colocatedWith != null) {
+      PartitionedRegion leader = ColocationHelper.getLeaderRegionName(this);
+      eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(leader
+          .getFullPath());
+    }
+    else {
+      eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(getFullPath());
+    }
+    return eventQueueName;
+  }
+
+  /**
+   * schedules compaction on all members where this region is hosted.
+   * 
+   * @param isMajor
+   *          true for major compaction
+   * @param maxWaitTime
+   *          time to wait for the operation to complete, 0 will wait forever
+   */
+  @Override
+  public void forceHDFSCompaction(boolean isMajor, Integer maxWaitTime) {
+    if (!this.isHDFSReadWriteRegion()) {
+      if (this.isHDFSRegion()) {
+        throw new UnsupportedOperationException(
+            LocalizedStrings.HOPLOG_CONFIGURED_AS_WRITEONLY
+                .toLocalizedString(getName()));
+      }
+      throw new UnsupportedOperationException(
+          LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
+              .toLocalizedString(getName()));
+    }
+    // send request to remote data stores
+    long start = System.currentTimeMillis();
+    int waitTime = maxWaitTime * 1000;
+    HDFSForceCompactionArgs args = new HDFSForceCompactionArgs(getRegionAdvisor().getBucketSet(), isMajor, waitTime);
+    HDFSForceCompactionResultCollector rc = new HDFSForceCompactionResultCollector();
+    AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(this).withArgs(args).withCollector(rc);
+    execution.setWaitOnExceptionFlag(true); // wait for all exceptions
+    if (logger.isDebugEnabled()) {
+      logger.debug("HDFS: ForceCompat invoking function with arguments "+args);
+    }
+    execution.execute(HDFSForceCompactionFunction.ID);
+    List<CompactionStatus> result = rc.getResult();
+    Set<Integer> successfulBuckets = rc.getSuccessfulBucketIds();
+    if (rc.shouldRetry()) {
+      int retries = 0;
+      while (retries < HDFSForceCompactionFunction.FORCE_COMPACTION_MAX_RETRIES) {
+        waitTime -= System.currentTimeMillis() - start;
+        if (maxWaitTime > 0 && waitTime < 0) {
+          break;
+        }
+        start = System.currentTimeMillis();
+        retries++;
+        Set<Integer> retryBuckets = new HashSet<Integer>(getRegionAdvisor().getBucketSet());
+        retryBuckets.removeAll(successfulBuckets);
+        
+        for (int bucketId : retryBuckets) {
+          getNodeForBucketWrite(bucketId, new PartitionedRegion.RetryTimeKeeper(waitTime));
+          long now = System.currentTimeMillis();
+          waitTime -= now - start;
+          start = now;
+        }
+        
+        args = new HDFSForceCompactionArgs(retryBuckets, isMajor, waitTime);
+        rc = new HDFSForceCompactionResultCollector();
+        execution = (AbstractExecution) FunctionService.onRegion(this).withArgs(args).withCollector(rc);
+        execution.setWaitOnExceptionFlag(true); // wait for all exceptions
+        if (logger.isDebugEnabled()) {
+          logger.debug("HDFS: ForceCompat re-invoking function with arguments "+args+" filter:"+retryBuckets);
+        }
+        execution.execute(HDFSForceCompactionFunction.ID);
+        result = rc.getResult();
+        successfulBuckets.addAll(rc.getSuccessfulBucketIds());
+      }
+    }
+    if (successfulBuckets.size() != getRegionAdvisor().getBucketSet().size()) {
+      checkReadiness();
+      Set<Integer> uncessfulBuckets = new HashSet<Integer>(getRegionAdvisor().getBucketSet());
+      uncessfulBuckets.removeAll(successfulBuckets);
+      throw new FunctionException("Could not run compaction on following buckets:"+uncessfulBuckets);
+    }
+  }
+
+  /**
+   * Schedules compaction on local buckets
+   * @param buckets the set of buckets to compact
+   * @param isMajor true for major compaction
+   * @param time TODO use this
+   * @return a list of futures for the scheduled compaction tasks
+   */
+  public List<Future<CompactionStatus>> forceLocalHDFSCompaction(Set<Integer> buckets, boolean isMajor, long time) {
+    List<Future<CompactionStatus>> futures = new ArrayList<Future<CompactionStatus>>();
+    if (!isDataStore() || hdfsManager == null || buckets == null || buckets.isEmpty()) {
+      if (logger.isDebugEnabled()) {
+        logger.debug(
+            "HDFS: did not schedule local " + (isMajor ? "Major" : "Minor") + " compaction");
+      }
+      // nothing to do
+      return futures;
+    }
+    if (logger.isDebugEnabled()) {
+      logger.debug(
+          "HDFS: scheduling local " + (isMajor ? "Major" : "Minor") + " compaction for buckets:"+buckets);
+    }
+    Collection<HoplogOrganizer> organizers = hdfsManager.getBucketOrganizers(buckets);
+    
+    for (HoplogOrganizer hoplogOrganizer : organizers) {
+      Future<CompactionStatus> f = hoplogOrganizer.forceCompaction(isMajor);
+      futures.add(f);
+    }
+    return futures;
+  }
+  
+  @Override
+  public void flushHDFSQueue(int maxWaitTime) {
+    if (!this.isHDFSRegion()) {
+      throw new UnsupportedOperationException(
+          LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
+              .toLocalizedString(getName()));
+    }
+    HDFSFlushQueueFunction.flushQueue(this, maxWaitTime);
+  }
+  
+  @Override
+  public long lastMajorHDFSCompaction() {
+    if (!this.isHDFSReadWriteRegion()) {
+      if (this.isHDFSRegion()) {
+        throw new UnsupportedOperationException(
+            LocalizedStrings.HOPLOG_CONFIGURED_AS_WRITEONLY
+                .toLocalizedString(getName()));
+      }
+      throw new UnsupportedOperationException(
+          LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
+              .toLocalizedString(getName()));
+    }
+    List<Long> result = (List<Long>) FunctionService.onRegion(this)
+        .execute(HDFSLastCompactionTimeFunction.ID)
+        .getResult();
+    if (logger.isDebugEnabled()) {
+      logger.debug("HDFS: Result of LastCompactionTimeFunction "+result);
+    }
+    long min = Long.MAX_VALUE;
+    for (long ts : result) {
+      if (ts !=0 && ts < min) {
+        min = ts;
+      }
+    }
+    min = min == Long.MAX_VALUE ? 0 : min;
+    return min;
+  }
+
+  public long lastLocalMajorHDFSCompaction() {
+    if (!isDataStore() || hdfsManager == null) {
+      // nothing to do
+      return 0;
+    }
+    if (logger.isDebugEnabled()) {
+      logger.debug(
+          "HDFS: getting local Major compaction time");
+    }
+    Collection<HoplogOrganizer> organizers = hdfsManager.getBucketOrganizers();
+    long minTS = Long.MAX_VALUE;
+    for (HoplogOrganizer hoplogOrganizer : organizers) {
+      long ts = hoplogOrganizer.getLastMajorCompactionTimestamp();
+      if (ts !=0 && ts < minTS) {
+        minTS = ts;
+      }
+    }
+    minTS = minTS == Long.MAX_VALUE ? 0 : minTS;
+    if (logger.isDebugEnabled()) {
+      logger.debug(
+          "HDFS: local Major compaction time: "+minTS);
+    }
+    return minTS;
+  }
+
 
   public void shadowPRWaitForBucketRecovery() {
     assert this.isShadowPR();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
index bda68e3..57b1e71 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataStore.java
@@ -64,6 +64,7 @@ import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.FunctionException;
 import com.gemstone.gemfire.cache.execute.ResultSender;
 import com.gemstone.gemfire.cache.query.QueryInvalidException;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
 import com.gemstone.gemfire.cache.query.internal.IndexUpdater;
 import com.gemstone.gemfire.cache.query.internal.QCompiler;
 import com.gemstone.gemfire.cache.query.internal.index.IndexCreationData;
@@ -2058,13 +2059,13 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
       ForceReattemptException, PRLocallyDestroyedException
   {
 	  return getLocally(bucketId, key,aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, 
-			  clientEvent, returnTombstones, false);
+			  clientEvent, returnTombstones, false, false);
   }
   /**
    * Returns value corresponding to this key.
    * @param key
    *          the key to look for
-   * @param preferCD
+   * @param preferCD 
    * @param requestingClient the client making the request, or null
    * @param clientEvent client's event (for returning version tag)
    * @param returnTombstones whether tombstones should be returned
@@ -2075,28 +2076,21 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    * @throws PrimaryBucketException if the locally managed bucket is not primary
    * @throws PRLocallyDestroyedException if the PartitionRegion is locally destroyed
    */
-  public Object getLocally(int bucketId,
-                           final Object key,
-                           final Object aCallbackArgument,
-                           boolean disableCopyOnRead,
-                           boolean preferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent,
-                           boolean returnTombstones,
-                           boolean opScopeIsLocal) throws PrimaryBucketException,
+  public Object getLocally(int bucketId, final Object key,
+      final Object aCallbackArgument, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, 
+      boolean returnTombstones, boolean opScopeIsLocal, boolean allowReadFromHDFS) throws PrimaryBucketException,
       ForceReattemptException, PRLocallyDestroyedException
   {
     final BucketRegion bucketRegion = getInitializedBucketForId(key, Integer.valueOf(bucketId));
     //  check for primary (when a loader is present) done deeper in the BucketRegion
     Object ret=null;
     if (logger.isDebugEnabled()) {
-      logger.debug("getLocally:  key {}) bucketId={}{}{} region {} returnTombstones {} ", key,
-          this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), returnTombstones);
+      logger.debug("getLocally:  key {}) bucketId={}{}{} region {} returnTombstones {} allowReadFromHDFS {}", key,
+          this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), returnTombstones, allowReadFromHDFS);
     }
     invokeBucketReadHook();
     try {
-      ret = bucketRegion.get(key, aCallbackArgument, true, disableCopyOnRead , preferCD, requestingClient, clientEvent, returnTombstones, opScopeIsLocal,
-        false);
+      ret = bucketRegion.get(key, aCallbackArgument, true, disableCopyOnRead , preferCD, requestingClient, clientEvent, returnTombstones, opScopeIsLocal, allowReadFromHDFS, false);
       checkIfBucketMoved(bucketRegion);
     }
     catch (RegionDestroyedException rde) {
@@ -2128,11 +2122,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    * @throws PrimaryBucketException if the locally managed bucket is not primary
    * @see #getLocally(int, Object, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean)
    */
-  public RawValue getSerializedLocally(KeyInfo keyInfo,
-                                       boolean doNotLockEntry,
-                                       ClientProxyMembershipID requestingClient,
-                                       EntryEventImpl clientEvent,
-                                       boolean returnTombstones) throws PrimaryBucketException,
+  public RawValue getSerializedLocally(KeyInfo keyInfo, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws PrimaryBucketException,
       ForceReattemptException {
     final BucketRegion bucketRegion = getInitializedBucketForId(keyInfo.getKey(), keyInfo.getBucketId());
     //  check for primary (when loader is present) done deeper in the BucketRegion
@@ -2143,7 +2133,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
     invokeBucketReadHook();
 
     try {
-      RawValue result = bucketRegion.getSerialized(keyInfo, true, doNotLockEntry, requestingClient, clientEvent, returnTombstones);
+      RawValue result = bucketRegion.getSerialized(keyInfo, true, doNotLockEntry, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
       checkIfBucketMoved(bucketRegion);
       return result;
     } catch (RegionDestroyedException rde) {
@@ -2167,7 +2157,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    * @param access
    *          true if caller wants last accessed time updated
    * @param allowTombstones whether a tombstoned entry can be returned
-   *
+   * 
    * @throws ForceReattemptException
    *           if bucket region is not present in this process
    * @return a RegionEntry for the given key, which will be null if the key is
@@ -2178,7 +2168,7 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
    *           if the PartitionRegion is locally destroyed
    */
   public EntrySnapshot getEntryLocally(int bucketId, final Object key,
-                                       boolean access, boolean allowTombstones)
+      boolean access, boolean allowTombstones, boolean allowReadFromHDFS)
       throws EntryNotFoundException, PrimaryBucketException,
       ForceReattemptException, PRLocallyDestroyedException
   {
@@ -2191,7 +2181,12 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
     EntrySnapshot res = null;
     RegionEntry ent = null;
     try {
-      ent = bucketRegion.entries.getEntry(key);
+      if (allowReadFromHDFS) {
+        ent = bucketRegion.entries.getEntry(key);
+      }
+      else {
+        ent = bucketRegion.entries.getOperationalEntryInVM(key);
+      }
 
       if (ent == null) {
         this.getPartitionedRegion().checkReadiness();
@@ -2301,8 +2296,14 @@ public class PartitionedRegionDataStore implements HasCachePerfStats
     try{
       if (r != null) {
         Set keys = r.keySet(allowTombstones);
+        if (getPartitionedRegion().isHDFSReadWriteRegion()) {
+          // hdfs regions can't copy all keys into memory
+          ret = keys;
+
+        } else  { 
         // A copy is made so that the bucket is free to move
         ret = new HashSet(r.keySet(allowTombstones));
+		}
         checkIfBucketMoved(r);
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
index de1f7d8..f083268 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDataView.java
@@ -65,19 +65,12 @@ public class PartitionedRegionDataView extends LocalRegionDataView {
   }
 
   @Override
-  public Object findObject(KeyInfo key,
-                           LocalRegion r,
-                           boolean isCreate,
-                           boolean generateCallbacks,
-                           Object value,
-                           boolean disableCopyOnRead,
-                           boolean preferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent,
-                           boolean returnTombstones) {
+  public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate,
+      boolean generateCallbacks, Object value, boolean disableCopyOnRead,
+      boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
     TXStateProxy tx = r.cache.getTXMgr().internalSuspend();
     try {
-      return r.findObjectInSystem(key, isCreate, tx, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
+      return r.findObjectInSystem(key, isCreate, tx, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
     } finally {
       r.cache.getTXMgr().resume(tx);
     }
@@ -89,14 +82,10 @@ public class PartitionedRegionDataView extends LocalRegionDataView {
     return pr.nonTXContainsKey(keyInfo);
   }
   @Override
-  public Object getSerializedValue(LocalRegion localRegion,
-                                   KeyInfo keyInfo,
-                                   boolean doNotLockEntry,
-                                   ClientProxyMembershipID requestingClient,
-                                   EntryEventImpl clientEvent,
-                                   boolean returnTombstones) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion, KeyInfo keyInfo, boolean doNotLockEntry, ClientProxyMembershipID requestingClient,
+  EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
     PartitionedRegion pr = (PartitionedRegion)localRegion;
-    return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, requestingClient, clientEvent, returnTombstones);
+    return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
   }
   @Override
   public boolean putEntryOnRemote(EntryEventImpl event, boolean ifNew,
@@ -129,7 +118,7 @@ public class PartitionedRegionDataView extends LocalRegionDataView {
       boolean allowTombstones) throws DataLocationException {
     PartitionedRegion pr = (PartitionedRegion)localRegion;
     return pr.getDataStore().getEntryLocally(keyInfo.getBucketId(),
-        keyInfo.getKey(), false, allowTombstones);
+        keyInfo.getKey(), false, allowTombstones, true);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
index 6ce783a..a3ed32a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
@@ -122,6 +122,8 @@ public class PartitionedRegionHelper
     Set policies = new HashSet();
     policies.add(DEFAULT_DATA_POLICY);
     policies.add(DataPolicy.PERSISTENT_PARTITION);
+    policies.add(DataPolicy.HDFS_PARTITION);
+    policies.add(DataPolicy.HDFS_PERSISTENT_PARTITION);
 //    policies.add(DataPolicy.NORMAL);
     ALLOWED_DATA_POLICIES = Collections.unmodifiableSet(policies);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
index 74c134b..f0a6543 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ProxyRegionMap.java
@@ -626,6 +626,27 @@ final class ProxyRegionMap implements RegionMap {
     }
 
     @Override
+    public boolean isMarkedForEviction() {
+      throw new UnsupportedOperationException(LocalizedStrings
+          .ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0
+              .toLocalizedString(DataPolicy.EMPTY));
+    }
+
+    @Override
+    public void setMarkedForEviction() {
+      throw new UnsupportedOperationException(LocalizedStrings
+          .ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0
+              .toLocalizedString(DataPolicy.EMPTY));
+    }
+
+    @Override
+    public void clearMarkedForEviction() {
+      throw new UnsupportedOperationException(LocalizedStrings
+          .ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0
+              .toLocalizedString(DataPolicy.EMPTY));
+    }
+
+    @Override
     public boolean isValueNull() {
       throw new UnsupportedOperationException(LocalizedStrings.ProxyRegionMap_NO_ENTRY_SUPPORT_ON_REGIONS_WITH_DATAPOLICY_0.toLocalizedString(DataPolicy.EMPTY));
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
index bedbf81..5838ead 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.internal.offheap.StoredObject;
 import com.gemstone.gemfire.internal.offheap.annotations.Released;
 import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
+import com.gemstone.gemfire.cache.EvictionCriteria;
 
 /**
  * Internal interface for a region entry.
@@ -414,6 +415,25 @@ public interface RegionEntry {
   public void setUpdateInProgress(final boolean underUpdate);
 
   /**
+   * Returns true if this entry has been marked for eviction for custom eviction
+   * via {@link EvictionCriteria}.
+   */
+  public boolean isMarkedForEviction();
+
+  /**
+   * Marks this entry for eviction by custom eviction via
+   * {@link EvictionCriteria}.
+   */
+  public void setMarkedForEviction();
+
+  /**
+   * Clears this entry as for eviction by custom eviction via
+   * {@link EvictionCriteria} or when an update is done after it was marked for
+   * eviction.
+   */
+  public void clearMarkedForEviction();
+
+  /**
    * Event containing this RegionEntry is being passed through
    * dispatchListenerEvent for CacheListeners under RegionEntry lock. This is
    * used during deserialization for a VMCacheSerializable value contained by

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
index 7a97408..2a7f0c4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionMapFactory.java
@@ -39,6 +39,12 @@ class RegionMapFactory {
     //.getDataPolicy().withPartitioning());
     if (owner.isProxy() /*|| owner instanceof PartitionedRegion*/) { // TODO enabling this causes eviction tests to fail
       return new ProxyRegionMap(owner, attrs, internalRegionArgs);
+    } else if (internalRegionArgs.isReadWriteHDFSRegion()) {
+      if (owner.getEvictionController() == null) {
+        return new HDFSRegionMapImpl(owner, attrs, internalRegionArgs);
+      }
+      return new HDFSLRURegionMap(owner, attrs, internalRegionArgs);
+    //else if (owner.getEvictionController() != null && isNotPartitionedRegion) {
     } else if (owner.getEvictionController() != null ) {
       return new VMLRURegionMap(owner, attrs,internalRegionArgs);
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
index b565a2c..c754339 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/RemoteGetMessage.java
@@ -122,7 +122,7 @@ public final class RemoteGetMessage extends RemoteOperationMessageWithDirectRepl
           ((KeyWithRegionContext)this.key).setRegionContext(r);
         }
         KeyInfo keyInfo = r.getKeyInfo(key, cbArg);
-        val = r.getDataView().getSerializedValue(r, keyInfo, false, this.context, null, false /*for replicate regions*/);
+        val = r.getDataView().getSerializedValue(r, keyInfo, false, this.context, null, false, false/*for replicate regions*/);
         valueBytes = val instanceof RawValue ? (RawValue)val : new RawValue(val);
 
         if (logger.isTraceEnabled(LogMarker.DM)) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
index 2906ff6..983f928 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXEntry.java
@@ -113,8 +113,7 @@ public class TXEntry implements Region.Entry
   {
     checkTX();
 //    Object value = this.localRegion.getDeserialized(this.key, false, this.myTX, this.rememberReads);
-    @Unretained Object value = this.myTX.getDeserializedValue(keyInfo, this.localRegion, false, false, false, null, false,
-      false);
+    @Unretained Object value = this.myTX.getDeserializedValue(keyInfo, this.localRegion, false, false, false, null, false, false, false);
     if (value == null) {
       throw new EntryDestroyedException(this.keyInfo.getKey().toString());
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
index 617873c..a67d3cc 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXState.java
@@ -1407,14 +1407,7 @@ public class TXState implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo,
-                                     LocalRegion localRegion,
-                                     boolean updateStats,
-                                     boolean disableCopyOnRead,
-                                     boolean preferCD,
-                                     EntryEventImpl clientEvent,
-                                     boolean returnTombstones,
-                                     boolean retainResult) {
+  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult) {
     TXEntryState tx = txReadEntry(keyInfo, localRegion, true, true/*create txEntry is absent*/);
     if (tx != null) {
       Object v = tx.getValue(keyInfo, localRegion, preferCD);
@@ -1423,8 +1416,7 @@ public class TXState implements TXStateInterface {
       }
       return v;
     } else {
-      return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones,
-        retainResult);
+      return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones, allowReadFromHDFS, retainResult);
     }
   }
 
@@ -1433,19 +1425,15 @@ public class TXState implements TXStateInterface {
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object)
    */
   @Retained
-  public Object getSerializedValue(LocalRegion localRegion,
-                                   KeyInfo keyInfo,
-                                   boolean doNotLockEntry,
-                                   ClientProxyMembershipID requestingClient,
-                                   EntryEventImpl clientEvent,
-                                   boolean returnTombstones) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion, KeyInfo keyInfo, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, 
+      boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
     final Object key = keyInfo.getKey();
     TXEntryState tx = txReadEntry(keyInfo, localRegion, true,true/*create txEntry is absent*/);
     if (tx != null) {
       Object val = tx.getPendingValue();
       if(val==null || Token.isInvalidOrRemoved(val)) {
         val = findObject(keyInfo,localRegion, val!=Token.INVALID,
-            true, val, false, false, requestingClient, clientEvent, false);
+            true, val, false, false, requestingClient, clientEvent, false, allowReadFromHDFS);
       }
       return val;
     } else {
@@ -1453,7 +1441,7 @@ public class TXState implements TXStateInterface {
       // so we should never come here
       assert localRegion instanceof PartitionedRegion;
       PartitionedRegion pr = (PartitionedRegion)localRegion;
-      return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, null, null, returnTombstones);
+      return pr.getDataStore().getSerializedLocally(keyInfo, doNotLockEntry, null, null, returnTombstones, allowReadFromHDFS);
     }
   }
 
@@ -1531,17 +1519,9 @@ public class TXState implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
-  public Object findObject(KeyInfo key,
-                           LocalRegion r,
-                           boolean isCreate,
-                           boolean generateCallbacks,
-                           Object value,
-                           boolean disableCopyOnRead,
-                           boolean preferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent,
-                           boolean returnTombstones) {
-    return r.findObjectInSystem(key, isCreate, this, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
+  public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate,
+      boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
+    return r.findObjectInSystem(key, isCreate, this, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
   }
 
   private boolean readEntryAndCheckIfDestroyed(KeyInfo keyInfo, LocalRegion localRegion,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
index 3fa9351..5da20d8 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateInterface.java
@@ -123,14 +123,8 @@ public interface TXStateInterface extends Synchronization, InternalDataView {
    * @param localRegion
    * @param updateStats TODO
    */
-  public Object getDeserializedValue(KeyInfo keyInfo,
-                                     LocalRegion localRegion,
-                                     boolean updateStats,
-                                     boolean disableCopyOnRead,
-                                     boolean preferCD,
-                                     EntryEventImpl clientEvent,
-                                     boolean returnTombstones,
-                                     boolean retainResult);
+  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
+      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadsFromHDFS, boolean retainResult);
 
   public TXEvent getEvent();
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
index 0939ab0..e66302e 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateProxyImpl.java
@@ -341,16 +341,9 @@ public class TXStateProxyImpl implements TXStateProxy {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo,
-                                     LocalRegion localRegion,
-                                     boolean updateStats,
-                                     boolean disableCopyOnRead,
-                                     boolean preferCD,
-                                     EntryEventImpl clientEvent,
-                                     boolean returnTombstones,
-                                     boolean retainResult) {
-    Object val = getRealDeal(keyInfo, localRegion).getDeserializedValue(keyInfo, localRegion, updateStats, disableCopyOnRead, preferCD, null, false,
-      retainResult);
+  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
+      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult) {
+    Object val = getRealDeal(keyInfo, localRegion).getDeserializedValue(keyInfo, localRegion, updateStats, disableCopyOnRead, preferCD, null, false, allowReadFromHDFS, retainResult);
     if (val != null) {
       // fixes bug 51057: TXStateStub  on client always returns null, so do not increment
       // the operation count it will be incremented in findObject()
@@ -606,13 +599,13 @@ public class TXStateProxyImpl implements TXStateProxy {
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
   public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate,
-                           boolean generateCallbacks, Object value, boolean disableCopyOnRead,
-                           boolean preferCD, ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent, boolean returnTombstones) {
+      boolean generateCallbacks, Object value, boolean disableCopyOnRead,
+      boolean preferCD, ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
     try {
       this.operationCount++;
       Object retVal = getRealDeal(key, r).findObject(key, r, isCreate, generateCallbacks,
-          value, disableCopyOnRead, preferCD, requestingClient, clientEvent, false);
+          value, disableCopyOnRead, preferCD, requestingClient, clientEvent, false, allowReadFromHDFS);
       trackBucketForTx(key);
       return retVal;
     } catch (TransactionDataRebalancedException | PrimaryBucketException re) {
@@ -727,14 +720,9 @@ public class TXStateProxyImpl implements TXStateProxy {
    * (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object)
    */
-  public Object getSerializedValue(LocalRegion localRegion,
-                                   KeyInfo key,
-                                   boolean doNotLockEntry,
-                                   ClientProxyMembershipID requestingClient,
-                                   EntryEventImpl clientEvent,
-                                   boolean returnTombstones) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
     this.operationCount++;
-    return getRealDeal(key, localRegion).getSerializedValue(localRegion, key, doNotLockEntry, requestingClient, clientEvent, returnTombstones);
+    return getRealDeal(key, localRegion).getSerializedValue(localRegion, key, doNotLockEntry, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
   }
 
   /* (non-Javadoc)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
index 0b226e0..ac35425 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/TXStateStub.java
@@ -184,14 +184,8 @@ public abstract class TXStateStub implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo,
-                                     LocalRegion localRegion,
-                                     boolean updateStats,
-                                     boolean disableCopyOnRead,
-                                     boolean preferCD,
-                                     EntryEventImpl clientEvent,
-                                     boolean returnTombstones,
-                                     boolean retainResult) {
+  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
+      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS,  boolean retainResult) {
     // We never have a local value if we are a stub...
     return null;
   }
@@ -379,17 +373,10 @@ public abstract class TXStateStub implements TXStateInterface {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
-  public Object findObject(KeyInfo keyInfo,
-                           LocalRegion r,
-                           boolean isCreate,
-                           boolean generateCallbacks,
-                           Object value,
-                           boolean disableCopyOnRead,
-                           boolean preferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent,
-                           boolean returnTombstones) {
-    return getTXRegionStub(r).findObject(keyInfo,isCreate,generateCallbacks,value, preferCD, requestingClient, clientEvent);
+  public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate,
+      boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
+    return getTXRegionStub(r).findObject(keyInfo,isCreate,generateCallbacks,value, preferCD, requestingClient, clientEvent, allowReadFromHDFS);
   }
 
   /* (non-Javadoc)
@@ -445,12 +432,7 @@ public abstract class TXStateStub implements TXStateInterface {
    * (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object)
    */
-  public Object getSerializedValue(LocalRegion localRegion,
-                                   KeyInfo key,
-                                   boolean doNotLockEntry,
-                                   ClientProxyMembershipID requestingClient,
-                                   EntryEventImpl clientEvent,
-                                   boolean returnTombstones) {
+  public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
index 269f891..a17650c 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/UserSpecifiedRegionAttributes.java
@@ -114,6 +114,10 @@ public abstract class UserSpecifiedRegionAttributes<K,V> implements RegionAttrib
    */
   private boolean hasCloningEnabled = false;
   
+  private boolean hasHDFSStoreName = false;
+  
+  private boolean hasHDFSWriteOnly = false;
+  
 /**
    * Whether this region has entry value compression.
    * 
@@ -522,7 +526,7 @@ public abstract class UserSpecifiedRegionAttributes<K,V> implements RegionAttrib
   {
     this.hasDiskSynchronous = val;
   }
-  private static final int HAS_COUNT = 41;
+  private static final int HAS_COUNT = 43;
   
   public void initHasFields(UserSpecifiedRegionAttributes<K,V> other)
   {
@@ -598,4 +602,22 @@ public abstract class UserSpecifiedRegionAttributes<K,V> implements RegionAttrib
   public List getIndexes() {
     return this.indexes;
   }
+
+  public boolean hasHDFSStoreName()
+  {
+    return this.hasHDFSStoreName;
+  }
+  public void setHasHDFSStoreName(boolean val)
+  {
+    this.hasHDFSStoreName = val;
+  }
+  
+  public void setHasHDFSWriteOnly(boolean val)
+  {
+    this.hasHDFSWriteOnly = val;
+  }
+  public boolean hasHDFSWriteOnly()
+  {
+    return this.hasHDFSWriteOnly;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
index 54133cc..f587e39 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ValidatingDiskRegion.java
@@ -408,6 +408,19 @@ public class ValidatingDiskRegion extends DiskRegion implements DiskRecoveryStor
       // TODO Auto-generated method stub
     }
     @Override
+    public boolean isMarkedForEviction() {
+      // TODO Auto-generated method stub
+      return false;
+    }
+    @Override
+    public void setMarkedForEviction() {
+      // TODO Auto-generated method stub
+    }
+    @Override
+    public void clearMarkedForEviction() {
+      // TODO Auto-generated method stub
+    }
+    @Override
     public boolean isInvalid() {
       // TODO Auto-generated method stub
       return false;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
index ea47e91..d3078a9 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/FetchBulkEntriesMessage.java
@@ -299,7 +299,7 @@ public final class FetchBulkEntriesMessage extends PartitionMessage
             Object key = it.next();
             VersionTagHolder clientEvent = new VersionTagHolder();
             Object value = map.get(key, null, true, true, true, null,
-                clientEvent, allowTombstones);
+                clientEvent, allowTombstones, false);
 
             if (needToWriteBucketInfo) {
               DataSerializer.writePrimitiveInt(map.getId(), mos);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
index 3fef790..d7e50f1 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/GetMessage.java
@@ -93,9 +93,11 @@ public final class GetMessage extends PartitionMessageWithDirectReply
   
   private boolean returnTombstones;
 
+  private boolean allowReadFromHDFS;
   // reuse some flags
   protected static final int HAS_LOADER = NOTIFICATION_ONLY;
   protected static final int CAN_START_TX = IF_NEW;
+  protected static final int READ_FROM_HDFS = IF_OLD;
 
   /**
    * Empty constructor to satisfy {@link DataSerializer} requirements
@@ -104,14 +106,15 @@ public final class GetMessage extends PartitionMessageWithDirectReply
   }
   
   private GetMessage(InternalDistributedMember recipient, int regionId,
-                     DirectReplyProcessor processor,
-                     final Object key, final Object aCallbackArgument, ClientProxyMembershipID context,
-                     boolean returnTombstones) {
+      DirectReplyProcessor processor,
+      final Object key, final Object aCallbackArgument, ClientProxyMembershipID context,
+      boolean returnTombstones, boolean allowReadFromHDFS) {
     super(recipient, regionId, processor);
     this.key = key;
     this.cbArg = aCallbackArgument;
     this.context = context;
     this.returnTombstones = returnTombstones;
+	this.allowReadFromHDFS = allowReadFromHDFS;
   }
 
   private static final boolean ORDER_PR_GETS = Boolean.getBoolean("gemfire.order-pr-gets");
@@ -188,7 +191,7 @@ public final class GetMessage extends PartitionMessageWithDirectReply
         KeyInfo keyInfo = r.getKeyInfo(key, cbArg);
         boolean lockEntry = forceUseOfPRExecutor || isDirectAck();
         
-        val = r.getDataView().getSerializedValue(r, keyInfo, !lockEntry, this.context, event, returnTombstones);
+        val = r.getDataView().getSerializedValue(r, keyInfo, !lockEntry, this.context, event, returnTombstones, allowReadFromHDFS);
         
         if(val == BucketRegion.REQUIRES_ENTRY_LOCK) {
           Assert.assertTrue(!lockEntry);
@@ -269,12 +272,14 @@ public final class GetMessage extends PartitionMessageWithDirectReply
   @Override
   protected short computeCompressedShort(short s) {
     s = super.computeCompressedShort(s);
+    if (this.allowReadFromHDFS) s |= READ_FROM_HDFS;
     return s;
   }
 
   @Override
   protected void setBooleans(short s, DataInput in) throws ClassNotFoundException, IOException {
     super.setBooleans(s, in);
+    if ((s & READ_FROM_HDFS) != 0) this.allowReadFromHDFS = true;
   }
 
   public void setKey(Object key)
@@ -298,18 +303,15 @@ public final class GetMessage extends PartitionMessageWithDirectReply
    * @throws ForceReattemptException if the peer is no longer available
    */
   public static GetResponse send(InternalDistributedMember recipient,
-                                 PartitionedRegion r,
-                                 final Object key,
-                                 final Object aCallbackArgument,
-                                 ClientProxyMembershipID requestingClient,
-                                 boolean returnTombstones)
+      PartitionedRegion r, final Object key, final Object aCallbackArgument,
+      ClientProxyMembershipID requestingClient, boolean returnTombstones, boolean allowReadFromHDFS)
       throws ForceReattemptException
   {
     Assert.assertTrue(recipient != null,
         "PRDistribuedGetReplyMessage NULL reply message");
     GetResponse p = new GetResponse(r.getSystem(), Collections.singleton(recipient), key);
     GetMessage m = new GetMessage(recipient, r.getPRId(), p,
-        key, aCallbackArgument, requestingClient, returnTombstones);
+        key, aCallbackArgument, requestingClient, returnTombstones, allowReadFromHDFS);
     Set failures = r.getDistributionManager().putOutgoing(m);
     if (failures != null && failures.size() > 0) {
       throw new ForceReattemptException(LocalizedStrings.GetMessage_FAILED_SENDING_0.toLocalizedString(m));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
index 8aaf587..a88f96f 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage.java
@@ -101,8 +101,9 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
 
   protected static final short HAS_BRIDGE_CONTEXT = UNRESERVED_FLAGS_START;
   protected static final short SKIP_CALLBACKS = (HAS_BRIDGE_CONTEXT << 1);
+  protected static final short FETCH_FROM_HDFS = (SKIP_CALLBACKS << 1);
   //using the left most bit for IS_PUT_DML, the last available bit
-  protected static final short IS_PUT_DML = (short) (SKIP_CALLBACKS << 1);
+  protected static final short IS_PUT_DML = (short) (FETCH_FROM_HDFS << 1);
 
   private transient InternalDistributedSystem internalDs;
 
@@ -117,6 +118,9 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
   
   transient VersionedObjectList versions = null;
 
+  /** whether this operation should fetch oldValue from HDFS */
+  private boolean fetchFromHDFS;
+  
   private boolean isPutDML;
   /**
    * Empty constructor to satisfy {@link DataSerializer}requirements
@@ -125,7 +129,7 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
   }
 
   public PutAllPRMessage(int bucketId, int size, boolean notificationOnly,
-      boolean posDup, boolean skipCallbacks, Object callbackArg, boolean isPutDML) {
+      boolean posDup, boolean skipCallbacks, Object callbackArg, boolean fetchFromHDFS, boolean isPutDML) {
     this.bucketId = Integer.valueOf(bucketId);
     putAllPRData = new PutAllEntryData[size];
     this.notificationOnly = notificationOnly;
@@ -133,7 +137,8 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
     this.skipCallbacks = skipCallbacks;
     this.callbackArg = callbackArg;
     initTxMemberId();
-    this.isPutDML = isPutDML;
+    this.fetchFromHDFS = fetchFromHDFS;
+    this.isPutDML = isPutDML; 
   }
 
   public void addEntry(PutAllEntryData entry) {
@@ -302,6 +307,7 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
     s = super.computeCompressedShort(s);
     if (this.bridgeContext != null) s |= HAS_BRIDGE_CONTEXT;
     if (this.skipCallbacks) s |= SKIP_CALLBACKS;
+    if (this.fetchFromHDFS) s |= FETCH_FROM_HDFS;
     if (this.isPutDML) s |= IS_PUT_DML;
     return s;
   }
@@ -311,6 +317,7 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
       ClassNotFoundException {
     super.setBooleans(s, in);
     this.skipCallbacks = ((s & SKIP_CALLBACKS) != 0);
+    this.fetchFromHDFS = ((s & FETCH_FROM_HDFS) != 0);
     this.isPutDML = ((s & IS_PUT_DML) != 0);
   }
 
@@ -488,6 +495,9 @@ public final class PutAllPRMessage extends PartitionMessageWithDirectReply
 
             ev.setPutAllOperation(dpao);
 
+            // set the fetchFromHDFS flag
+            ev.setFetchFromHDFS(this.fetchFromHDFS);
+            
             // make sure a local update inserts a cache de-serializable
             ev.makeSerializedNewValue();
             

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
index a6a39dc..d5abaa1 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PutMessage.java
@@ -182,6 +182,9 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
 
   private VersionTag versionTag;
 
+  /** whether this operation should fetch oldValue from HDFS*/
+  private transient boolean fetchFromHDFS;
+
   private transient boolean isPutDML;
   
   // additional bitmask flags used for serialization/deserialization
@@ -205,6 +208,7 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
   // masks there are taken
   // also switching the masks will impact backwards compatibility. Need to
   // verify if it is ok to break backwards compatibility
+  protected static final int FETCH_FROM_HDFS = getNextByteMask(HAS_CALLBACKARG);  
 
   /*
   private byte[] oldValBytes;
@@ -604,6 +608,9 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
       this.originalSender = (InternalDistributedMember)DataSerializer
         .readObject(in);
     }
+    if ((extraFlags & FETCH_FROM_HDFS) != 0) {
+      this.fetchFromHDFS = true;
+    }
     this.eventId = new EventID();
     InternalDataSerializer.invokeFromData(this.eventId, in);
     
@@ -690,6 +697,7 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
       extraFlags |= HAS_DELTA_WITH_FULL_VALUE;
     }
     if (this.originalSender != null) extraFlags |= HAS_ORIGINAL_SENDER;
+    if (this.event.isFetchFromHDFS()) extraFlags |= FETCH_FROM_HDFS;
     out.writeByte(extraFlags);
 
     DataSerializer.writeObject(getKey(), out);
@@ -814,6 +822,7 @@ public final class PutMessage extends PartitionMessageWithDirectReply implements
     ev.setCausedByMessage(this);
     ev.setInvokePRCallbacks(!notificationOnly);
     ev.setPossibleDuplicate(this.posDup);
+	ev.setFetchFromHDFS(this.fetchFromHDFS);
     ev.setPutDML(this.isPutDML);
     /*if (this.hasOldValue) {
       if (this.oldValueIsSerialized) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java
new file mode 100644
index 0000000..5c199ae
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/ByteComparator.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedReader.SerializedComparator;
+
+/**
+ * Compares objects byte-by-byte.  This is fast and sufficient for cases when
+ * lexicographic ordering is not important or the serialization is order-
+ * preserving. 
+ * 
+ */
+public class ByteComparator implements SerializedComparator {
+  @Override
+  public int compare(byte[] rhs, byte[] lhs) {
+    return compare(rhs, 0, rhs.length, lhs, 0, lhs.length);
+  }
+
+  @Override
+  public int compare(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
+    return compareBytes(r, rOff, rLen, l, lOff, lLen);
+  }
+  
+  /**
+   * Compares two byte arrays element-by-element.
+   * 
+   * @param r the right array
+   * @param rOff the offset of r
+   * @param rLen the length of r to compare
+   * @param l the left array
+   * @param lOff the offset of l
+   * @param lLen the length of l to compare
+   * @return -1 if r < l; 0 if r == l; 1 if r > 1
+   */
+  
+  public static int compareBytes(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
+    return Bytes.compareTo(r, rOff, rLen, l, lOff, lLen);
+  }
+}


[10/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
index 5a51b62..c4588f6 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/domain/RegionAttributesInfo.java
@@ -83,7 +83,11 @@ public class RegionAttributesInfo implements Serializable{
 	private String regionIdleTimeoutAction = ExpirationAction.INVALIDATE.toString();
 	
 	private boolean offHeap;
-
+	private String hdfsStoreName;
+	private Boolean hdfsWriteOnly;
+	
+	
+	
 	/***
 	 * Non-default-attribute map in the constructor
 	 */
@@ -179,6 +183,8 @@ public class RegionAttributesInfo implements Serializable{
 		
 		}
 		this.offHeap = ra.getOffHeap();
+		this.hdfsStoreName = ra.getHDFSStoreName();
+		this.hdfsWriteOnly = ra.getHDFSWriteOnly();
 	}
 	
 	
@@ -308,6 +314,15 @@ public class RegionAttributesInfo implements Serializable{
 	  return this.offHeap;
 	}
 	
+	public String getHdfsStoreName() {
+		return hdfsStoreName;
+	}
+
+
+	public Boolean getHdfsWriteOnly() {
+		return hdfsWriteOnly;
+	}
+	
 	@Override
 	public boolean equals(Object arg0) {
 		return super.equals(arg0);
@@ -467,6 +482,10 @@ public class RegionAttributesInfo implements Serializable{
             if (this.offHeap != RegionAttributesDefault.OFF_HEAP) {
                 nonDefaultAttributes.put(RegionAttributesNames.OFF_HEAP, Boolean.toString(this.offHeap));
              }            
+            if (this.hdfsStoreName != null ) {
+                nonDefaultAttributes.put(RegionAttributesNames.HDFSSTORE, this.hdfsStoreName);
+                nonDefaultAttributes.put(RegionAttributesNames.HDFS_WRITEONLY, Boolean.toString(this.hdfsWriteOnly));
+             }
 		}
 		return this.nonDefaultAttributes;
 	}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
new file mode 100644
index 0000000..b5b5341
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
@@ -0,0 +1,228 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import java.io.Serializable;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.SystemFailure;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreMutatorImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.InternalCache;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+
+/**
+ * Function used by the 'alter hdfs-store' gfsh command to alter a hdfs store on
+ * each member.
+ * 
+ * @author Namrata Thanvi
+ */
+
+public class AlterHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
+  private static final Logger logger = LogService.getLogger();
+
+  private static final String ID = AlterHDFSStoreFunction.class.getName();
+
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public void execute(FunctionContext context) {
+    String memberId = "";
+
+    try {
+      final AlterHDFSStoreAttributes alterAttributes = (AlterHDFSStoreAttributes)context.getArguments();      
+      GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
+      DistributedMember member = getDistributedMember(cache);
+
+      memberId = member.getId();
+      // If they set a name use it instead
+      if (!member.getName().equals("")) {
+        memberId = member.getName();
+      }      
+      HDFSStore hdfsStore = cache.findHDFSStore(alterAttributes.getHdfsUniqueName());      
+      CliFunctionResult result;
+      if (hdfsStore != null) {
+        // TODO - Need to verify what all attributes needs to be persisted in
+        // cache.xml
+        XmlEntity xmlEntity = getXMLEntity(hdfsStore.getName());
+        alterHdfsStore(hdfsStore, alterAttributes);
+        result = new CliFunctionResult(memberId, xmlEntity, "Success");
+      }
+      else {
+        result = new CliFunctionResult(memberId, false, "Hdfs store not found on this member");
+      }
+      context.getResultSender().lastResult(result);
+
+    } catch (CacheClosedException cce) {
+      CliFunctionResult result = new CliFunctionResult(memberId, false, null);
+      context.getResultSender().lastResult(result);
+
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+
+    } catch (Throwable th) {
+      SystemFailure.checkFailure();
+      logger.error("Could not alter hdfs store: {}", th.getMessage(), th);
+
+      CliFunctionResult result = new CliFunctionResult(memberId, th, null);
+      context.getResultSender().lastResult(result);
+    }
+
+  }
+
+  @Override
+  public String getId() {
+    return ID;
+  }
+
+  /**
+   * Alter HDFSStore with given configuration.
+   * 
+   * @param hdfsStore
+   * @param alterAttributes
+   * @return HDFSStore
+   */
+
+  protected HDFSStore alterHdfsStore(HDFSStore hdfsStore, AlterHDFSStoreAttributes alterAttributes) {
+    HDFSStoreMutator storeMutator = new HDFSStoreMutatorImpl(hdfsStore);
+  
+		if (alterAttributes.getFileRolloverInterval() != null)
+			storeMutator.setWriteOnlyFileRolloverInterval(alterAttributes
+					.getFileRolloverInterval());
+
+		if (alterAttributes.getMaxWriteonlyFileSize() != null)
+			storeMutator.setWriteOnlyFileRolloverSize(alterAttributes.getMaxWriteonlyFileSize());
+
+		if (alterAttributes.getMinorCompact() != null)
+			storeMutator.setMinorCompaction(alterAttributes.getMinorCompact());
+
+		if (alterAttributes.getMajorCompact() != null)
+		  storeMutator.setMajorCompaction(alterAttributes.getMajorCompact());
+
+		if (alterAttributes.getMajorCompactionInterval() != null)
+		  storeMutator.setMajorCompactionInterval(alterAttributes.getMajorCompactionInterval());
+
+		if (alterAttributes.getMajorCompactionThreads() != null)
+		  storeMutator.setMajorCompactionThreads(alterAttributes.getMajorCompactionThreads());
+
+		if (alterAttributes.getMajorCompactionThreads() != null)
+		  storeMutator.setMinorCompactionThreads(alterAttributes.getMajorCompactionThreads());
+
+		if (alterAttributes.getPurgeInterval() != null)
+			storeMutator.setPurgeInterval(alterAttributes.getPurgeInterval());
+
+		if (alterAttributes.getBatchSize() != null)
+		  storeMutator.setBatchSize(alterAttributes.getBatchSize());
+
+		if (alterAttributes.getBatchInterval() != null)
+		  storeMutator.setBatchInterval(alterAttributes.getBatchInterval());
+
+		hdfsStore.alter(storeMutator);
+		return hdfsStore;
+  }
+  
+  
+  public static class AlterHDFSStoreAttributes implements Serializable {
+	private static final long serialVersionUID = 1L;
+	String hdfsUniqueName;
+      Integer batchSize , batchInterval;
+      Boolean minorCompact,  majorCompact;
+      Integer minorCompactionThreads, majorCompactionInterval, majorCompactionThreads, purgeInterval;
+      Integer fileRolloverInterval, maxWriteonlyFileSize;
+      
+	public AlterHDFSStoreAttributes(String hdfsUniqueName, Integer batchSize,
+			Integer batchInterval, Boolean minorCompact, Boolean majorCompact,
+			Integer minorCompactionThreads, Integer majorCompactionInterval,
+			Integer majorCompactionThreads, Integer purgeInterval,
+			Integer fileRolloverInterval, Integer maxWriteonlyFileSize) {
+		this.hdfsUniqueName = hdfsUniqueName;
+		this.batchSize = batchSize;
+		this.batchInterval = batchInterval;
+		this.minorCompact = minorCompact;
+		this.majorCompact = majorCompact;
+		this.minorCompactionThreads = minorCompactionThreads;
+		this.majorCompactionInterval = majorCompactionInterval;
+		this.majorCompactionThreads = majorCompactionThreads;
+		this.purgeInterval = purgeInterval;
+		this.fileRolloverInterval = fileRolloverInterval;
+		this.maxWriteonlyFileSize = maxWriteonlyFileSize;
+	}
+
+	public String getHdfsUniqueName() {
+		return hdfsUniqueName;
+	}
+
+	public Integer getBatchSize() {
+		return batchSize;
+	}
+
+	public Integer getBatchInterval() {
+		return batchInterval;
+	}
+
+	public Boolean getMinorCompact() {
+		return minorCompact;
+	}
+
+	public Boolean getMajorCompact() {
+		return majorCompact;
+	}
+
+	public Integer getMinorCompactionThreads() {
+		return minorCompactionThreads;
+	}
+
+	public Integer getMajorCompactionInterval() {
+		return majorCompactionInterval;
+	}
+
+	public Integer getMajorCompactionThreads() {
+		return majorCompactionThreads;
+	}
+
+	public Integer getPurgeInterval() {
+		return purgeInterval;
+	}
+
+	public Integer getFileRolloverInterval() {
+		return fileRolloverInterval;
+	}
+
+	public Integer getMaxWriteonlyFileSize() {
+		return maxWriteonlyFileSize;
+	}
+	  
+	
+  }
+  
+  
+  protected Cache getCache() {
+    return CacheFactory.getAnyInstance();
+  }
+  
+  protected DistributedMember getDistributedMember(Cache cache){
+    return ((InternalCache)cache).getMyId();
+  }
+  
+  protected XmlEntity getXMLEntity(String storeName){
+    return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
new file mode 100644
index 0000000..b4e5033
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
@@ -0,0 +1,124 @@
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import org.apache.logging.log4j.Logger;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.SystemFailure;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.InternalCache;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
+
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+
+
+/**
+ * Function used by the 'create hdfs-store' gfsh command to create a hdfs store
+ * on each member.
+ * 
+ * @author Namrata Thanvi
+ */
+
+public class CreateHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
+  
+  private static final long serialVersionUID = 1L;
+
+  private static final Logger logger = LogService.getLogger();
+
+  public static final CreateHDFSStoreFunction INSTANCE = new CreateHDFSStoreFunction();
+
+  private static final String ID = CreateHDFSStoreFunction.class.getName();
+
+  @Override
+  public void execute(FunctionContext context) {
+    String memberId = "";
+    try {
+      Cache cache = getCache();      
+      DistributedMember member = getDistributedMember(cache);
+      
+      memberId = member.getId();
+      if (!member.getName().equals("")) {
+        memberId = member.getName();
+      }
+      HDFSStoreConfigHolder configHolder = (HDFSStoreConfigHolder)context.getArguments();
+     
+      HDFSStore hdfsStore = createHdfsStore(cache, configHolder);
+      // TODO - Need to verify what all attributes needs to be persisted in
+      // cache.xml
+      XmlEntity xmlEntity = getXMLEntity(hdfsStore.getName());
+      context.getResultSender().lastResult(new CliFunctionResult(memberId, xmlEntity, "Success"));
+
+    } catch (CacheClosedException cce) {
+      context.getResultSender().lastResult(new CliFunctionResult(memberId, false, null));
+
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+
+    } catch (Throwable th) {
+      SystemFailure.checkFailure();
+      logger.error("Could not create hdfs store: {}", CliUtil.stackTraceAsString(th), th);
+      context.getResultSender().lastResult(new CliFunctionResult(memberId, th, th.getMessage()));
+    }
+  }
+
+  @Override
+  public String getId() {
+    return ID;
+  } 
+  
+  /**
+   * Creates the HDFSStore with given configuration.
+   * 
+   * @param cache
+   * @param configHolder
+   * @return HDFSStore
+   */
+
+  protected HDFSStore createHdfsStore(Cache cache, HDFSStoreConfigHolder configHolder) {    
+    HDFSStoreFactory hdfsStoreFactory = cache.createHDFSStoreFactory();
+    hdfsStoreFactory.setName(configHolder.getName());
+    hdfsStoreFactory.setNameNodeURL(configHolder.getNameNodeURL());
+    hdfsStoreFactory.setBlockCacheSize(configHolder.getBlockCacheSize());
+    hdfsStoreFactory.setWriteOnlyFileRolloverInterval(configHolder.getWriteOnlyFileRolloverInterval());
+    hdfsStoreFactory.setHomeDir(configHolder.getHomeDir());
+    hdfsStoreFactory.setHDFSClientConfigFile(configHolder.getHDFSClientConfigFile());
+    hdfsStoreFactory.setWriteOnlyFileRolloverSize(configHolder.getWriteOnlyFileRolloverSize());
+    hdfsStoreFactory.setMajorCompaction(configHolder.getMajorCompaction());
+    hdfsStoreFactory.setMajorCompactionInterval(configHolder.getMajorCompactionInterval());
+    hdfsStoreFactory.setMajorCompactionThreads(configHolder.getMajorCompactionThreads());
+    hdfsStoreFactory.setMinorCompaction(configHolder.getMinorCompaction());
+    hdfsStoreFactory.setMaxMemory(configHolder.getMaxMemory());
+    hdfsStoreFactory.setBatchSize(configHolder.getBatchSize());
+    hdfsStoreFactory.setBatchInterval(configHolder.getBatchInterval());
+    hdfsStoreFactory.setDiskStoreName(configHolder.getDiskStoreName());
+    hdfsStoreFactory.setDispatcherThreads(configHolder.getDispatcherThreads());
+    hdfsStoreFactory.setMinorCompactionThreads(configHolder.getMinorCompactionThreads());
+    hdfsStoreFactory.setPurgeInterval(configHolder.getPurgeInterval());
+    hdfsStoreFactory.setSynchronousDiskWrite(configHolder.getSynchronousDiskWrite());
+    hdfsStoreFactory.setBufferPersistent(configHolder.getBufferPersistent());
+    
+    return hdfsStoreFactory.create(configHolder.getName());   
+  }
+  
+  protected Cache getCache() {
+    return CacheFactory.getAnyInstance();
+  }
+  
+  protected DistributedMember getDistributedMember(Cache cache){
+    return ((InternalCache)cache).getMyId();
+  }
+  
+  protected XmlEntity getXMLEntity(String storeName){
+    return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java
new file mode 100644
index 0000000..e6828bc
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.InternalCache;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
+
+/**
+ *  Function used by the 'describe hdfs-store' gfsh command to collect information
+ * and details about a particular hdfs store for a particular GemFire distributed system member.
+ * 
+ */
+public class DescribeHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
+  private static final long serialVersionUID = 1L;
+
+  private static final Logger logger = LogService.getLogger();
+
+  public static DescribeHDFSStoreFunction INSTANCE = new DescribeHDFSStoreFunction();
+
+  private static final String ID = DescribeHDFSStoreFunction.class.getName();
+  
+  protected Cache getCache() {
+    return CacheFactory.getAnyInstance();
+  }
+  
+  protected DistributedMember getDistributedMemberId(Cache cache){
+    return ((InternalCache)cache).getMyId();
+  }
+  
+  public void execute(final FunctionContext context) {
+    try {
+      Cache cache = getCache();
+      final DistributedMember member = getDistributedMemberId(cache);      
+      if (cache instanceof GemFireCacheImpl) {
+        GemFireCacheImpl cacheImpl = (GemFireCacheImpl)cache;
+        final String hdfsStoreName = (String)context.getArguments();
+        final String memberName = member.getName();
+        HDFSStoreImpl hdfsStore = cacheImpl.findHDFSStore(hdfsStoreName);        
+        if (hdfsStore != null) {
+          HDFSStoreConfigHolder configHolder = new HDFSStoreConfigHolder (hdfsStore);
+          context.getResultSender().lastResult(configHolder);
+        }
+        else {
+          context.getResultSender().sendException(
+              new HDFSStoreNotFoundException(
+                  String.format("A hdfs store with name (%1$s) was not found on member (%2$s).",
+                  hdfsStoreName, memberName)));
+        }
+      }  
+    } catch (Exception e) {
+      logger.error("Error occurred while executing 'describe hdfs-store': {}!", e.getMessage(), e);
+      context.getResultSender().sendException(e);
+    }
+  }
+
+  @Override
+  public String getId() {
+    return ID;
+  }	
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
new file mode 100644
index 0000000..83f6740
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
@@ -0,0 +1,100 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.SystemFailure;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.InternalCache;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+
+/**
+ * Function used by the 'destroy hdfs-store' gfsh command to destroy a hdfs
+ * store on each member.
+ * 
+ * @author Namrata Thanvi
+ */
+
+public class DestroyHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
+  private static final Logger logger = LogService.getLogger();
+
+  private static final String ID = DestroyHDFSStoreFunction.class.getName();
+
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public void execute(FunctionContext context) {
+    String memberId = "";
+    try {
+      final String hdfsStoreName = (String)context.getArguments();
+      GemFireCacheImpl cache = (GemFireCacheImpl)getCache();      
+      DistributedMember member = getDistributedMember(cache);     
+      CliFunctionResult result;
+      
+      memberId = member.getId();
+      if (!member.getName().equals("")) {
+        memberId = member.getName();
+      }
+      
+      HDFSStoreImpl hdfsStore = cache.findHDFSStore(hdfsStoreName);
+      
+      if (hdfsStore != null) {
+        hdfsStore.destroy();
+        // TODO - Need to verify what all attributes needs to be persisted in cache.xml and how
+        XmlEntity xmlEntity = getXMLEntity(hdfsStoreName); 
+        result = new CliFunctionResult(memberId, xmlEntity, "Success");
+      }
+      else {
+        result = new CliFunctionResult(memberId, false, "Hdfs store not found on this member");
+      }
+      context.getResultSender().lastResult(result);   
+
+    } catch (CacheClosedException cce) {
+      CliFunctionResult result = new CliFunctionResult(memberId, false, null);
+      context.getResultSender().lastResult(result);
+
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+
+    } catch (Throwable th) {
+      SystemFailure.checkFailure();
+      logger.error("Could not destroy hdfs store: {}", th.getMessage(), th);
+      CliFunctionResult result = new CliFunctionResult(memberId, th, null);
+      context.getResultSender().lastResult(result);
+    }
+  }
+
+  @Override
+  public String getId() {
+    return ID;
+  }
+  
+  protected Cache getCache() {
+    return CacheFactory.getAnyInstance();
+  }
+  
+  protected DistributedMember getDistributedMember(Cache cache){
+    return ((InternalCache)cache).getMyId();
+  }
+  
+  protected XmlEntity getXMLEntity(String storeName){
+    return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
new file mode 100644
index 0000000..fb947ae
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * more patents listed at http://www.pivotal.io/patents.
+ */
+
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.InternalCache;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+/**
+ * Function used by the 'list hdfs-stores' gfsh command to determine all the
+ * Hdfs stores that exist for the entire cache, distributed across the GemFire distributed system.
+ * on each member.
+ * 
+ * @author Namrata Thanvi
+ */
+
+public class ListHDFSStoresFunction extends FunctionAdapter implements InternalEntity {
+
+  private static final long serialVersionUID = 1L;
+
+  private static final String ID = ListHDFSStoresFunction.class.getName();
+
+  private static final Logger logger = LogService.getLogger();
+
+  protected Cache getCache() {
+    return CacheFactory.getAnyInstance();
+  }
+  
+  protected DistributedMember getDistributedMemberId(Cache cache){
+    return ((InternalCache)cache).getMyId();
+  }
+  
+  public void execute(final FunctionContext context) {
+    Set<HdfsStoreDetails>  hdfsStores = new HashSet<HdfsStoreDetails>();
+    try {
+      final Cache cache = getCache();     
+      if (cache instanceof GemFireCacheImpl) {    
+        final GemFireCacheImpl gemfireCache = (GemFireCacheImpl)cache;
+        final DistributedMember member = getDistributedMemberId(cache);        
+        for (final HDFSStore store : gemfireCache.getHDFSStores()) {  
+          hdfsStores.add(new HdfsStoreDetails (store.getName() , member.getId() , member.getName()));      
+        }             
+      }
+      context.getResultSender().lastResult(hdfsStores);
+    } catch (Exception e) {
+      context.getResultSender().sendException(e);
+    }
+  } 
+  
+  @Override
+  public String getId() {
+    return ID;
+  }
+
+  
+  public static class HdfsStoreDetails implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private String storeName;
+    private String memberId, memberName;
+    
+    public HdfsStoreDetails(String storeName, String memberId, String memberName) {
+      super();
+      this.storeName = storeName;
+      this.memberId = memberId;
+      this.memberName = memberName;
+    }
+    
+    public String getStoreName() {
+      return storeName;
+    }
+   
+    public String getMemberId() {
+      return memberId;
+    }
+   
+    public String getMemberName() {
+      return memberName;
+    }
+
+}
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
index fd5db59..0e952ae 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
@@ -321,6 +321,14 @@ public class RegionCreateFunction extends FunctionAdapter implements InternalEnt
     
     String regionName = regionPathData.getName();
     
+    final String hdfsStoreName = regionCreateArgs.getHDFSStoreName();
+	if (hdfsStoreName != null && !hdfsStoreName.isEmpty()) {
+		factory.setHDFSStoreName(hdfsStoreName);		
+	}
+	if (regionCreateArgs.isSetHDFSWriteOnly()) {
+		factory.setHDFSWriteOnly(regionCreateArgs.getHDFSWriteOnly());
+	}
+	  
     if (parentRegion != null) {
       createdRegion = factory.createSubregion(parentRegion, regionName);
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
index 98bde7e..083f612 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
@@ -78,9 +78,48 @@ public class RegionFunctionArgs implements Serializable {
   private final boolean isSetCompressor;
   private Boolean offHeap;
   private final boolean isSetOffHeap;
+  private String hdfsStoreName;
+  private Boolean isSetHdfsWriteOnly = false;
+  private Boolean hdfsWriteOnly;
+
   private RegionAttributes<?, ?> regionAttributes;
 
   public RegionFunctionArgs(String regionPath,
+	      RegionShortcut regionShortcut, String useAttributesFrom,
+	      boolean skipIfExists, String keyConstraint, String valueConstraint,
+	      Boolean statisticsEnabled, 
+	      RegionFunctionArgs.ExpirationAttrs entryExpirationIdleTime, 
+	      RegionFunctionArgs.ExpirationAttrs entryExpirationTTL, 
+	      RegionFunctionArgs.ExpirationAttrs regionExpirationIdleTime, 
+	      RegionFunctionArgs.ExpirationAttrs regionExpirationTTL, String diskStore,
+	      Boolean diskSynchronous, Boolean enableAsyncConflation,
+	      Boolean enableSubscriptionConflation, String[] cacheListeners,
+	      String cacheLoader, String cacheWriter, String[] asyncEventQueueIds,
+	      String[] gatewaySenderIds, Boolean concurrencyChecksEnabled,
+	      Boolean cloningEnabled, Integer concurrencyLevel, String prColocatedWith,
+	      Integer prLocalMaxMemory, Long prRecoveryDelay,
+	      Integer prRedundantCopies, Long prStartupRecoveryDelay,
+	      Long prTotalMaxMemory, Integer prTotalNumBuckets, Integer evictionMax,
+	      String compressor, Boolean offHeap , Boolean mcastEnabled, String hdfsStoreName , Boolean hdfsWriteOnly) {
+		this(regionPath, regionShortcut, useAttributesFrom, skipIfExists,
+				keyConstraint, valueConstraint, statisticsEnabled,
+				entryExpirationIdleTime, entryExpirationTTL,
+				regionExpirationIdleTime, regionExpirationTTL, diskStore,
+				diskSynchronous, enableAsyncConflation,
+				enableSubscriptionConflation, cacheListeners, cacheLoader,
+				cacheWriter, asyncEventQueueIds, gatewaySenderIds,
+				concurrencyChecksEnabled, cloningEnabled, concurrencyLevel,
+				prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
+				prRedundantCopies, prStartupRecoveryDelay, prTotalMaxMemory,
+				prTotalNumBuckets, evictionMax, compressor, offHeap, mcastEnabled);
+		this.isSetHdfsWriteOnly = hdfsWriteOnly != null;
+		if (isSetHdfsWriteOnly) {
+			this.hdfsWriteOnly = hdfsWriteOnly;
+		}
+		if (hdfsStoreName != null )
+		  this.hdfsStoreName = hdfsStoreName;
+  }
+  public RegionFunctionArgs(String regionPath,
       RegionShortcut regionShortcut, String useAttributesFrom,
       boolean skipIfExists, String keyConstraint, String valueConstraint,
       Boolean statisticsEnabled, 
@@ -192,8 +231,8 @@ public class RegionFunctionArgs implements Serializable {
       Integer prLocalMaxMemory, Long prRecoveryDelay,
       Integer prRedundantCopies, Long prStartupRecoveryDelay,
       Long prTotalMaxMemory, Integer prTotalNumBuckets, 
-      Boolean offHeap,
-      Boolean mcastEnabled, RegionAttributes<?, ?> regionAttributes) {   
+      Boolean offHeap, Boolean mcastEnabled, String hdfsStoreName , Boolean hdfsWriteOnly ,
+      RegionAttributes<?, ?> regionAttributes) {   
     this(regionPath, null, useAttributesFrom, skipIfExists, keyConstraint,
         valueConstraint, statisticsEnabled, entryExpirationIdleTime,
         entryExpirationTTL, regionExpirationIdleTime, regionExpirationTTL,
@@ -203,7 +242,7 @@ public class RegionFunctionArgs implements Serializable {
         concurrencyChecksEnabled, cloningEnabled, concurrencyLevel, 
         prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
         prRedundantCopies, prStartupRecoveryDelay,
-        prTotalMaxMemory, prTotalNumBuckets, null, null, offHeap , mcastEnabled);
+        prTotalMaxMemory, prTotalNumBuckets, null, null, offHeap , mcastEnabled, hdfsStoreName , hdfsWriteOnly);
     this.regionAttributes = regionAttributes;
   }
 
@@ -250,6 +289,28 @@ public class RegionFunctionArgs implements Serializable {
   }  
 
   /**
+   * @return the hdfsStoreName
+   */
+  public String getHDFSStoreName() {
+    return this.hdfsStoreName;
+  }  
+
+  /**
+   * @return the hdfsWriteOnly
+   */
+  public Boolean getHDFSWriteOnly() {
+    return this.hdfsWriteOnly;
+  }
+  
+  /**
+   * @return the isSetHDFSWriteOnly
+   */
+  public Boolean isSetHDFSWriteOnly() {
+    return this.isSetHdfsWriteOnly;
+  }
+  
+  
+  /**
    * @return the valueConstraint
    */
   public String getValueConstraint() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
index 9bb573b..99a7929 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
@@ -19,7 +19,10 @@ package com.gemstone.gemfire.management.internal.cli.i18n;
 import java.text.MessageFormat;
 
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
+import com.gemstone.gemfire.cache.wan.GatewayEventSubstitutionFilter;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
@@ -109,6 +112,8 @@ public class CliStrings {
   public static final String TOPIC_SHARED_CONFIGURATION = "Cluster Configuration";
   public static final String TOPIC_SHARED_CONFIGURATION_HELP = "Configuration for cluster and various groups. It consists of cache.xml, gemfire properties and deployed jars.\nChanges due to gfshs command are persisted to the locator hosting the cluster configuration service.";
   public static final String TOPIC_CHANGELOGLEVEL = "User can change the log-level for a  member run time and generate log contents as per the need";
+  public static final String TOPIC_GEMFIRE_HDFSSTORE = "Hdfs Store";
+  public static final String TOPIC_GEMFIRE_HDFSSTORE__DESC = "Hdfs stores are used to persist data to hadoop distributed file system as a backup to your in-memory copy or as overflow storage when eviction criteria is specified.";
 
   /*-*************************************************************************
    * ********* String Constants other than command name, options & help ******
@@ -685,7 +690,114 @@ public class CliStrings {
 
   public static final String CREATE_REGION__OFF_HEAP = "off-heap";
   public static final String CREATE_REGION__OFF_HEAP__HELP = "Causes the values of the region to be stored in off-heap memory. The default is on heap.";
+  public static final String CREATE_REGION__HDFSSTORE_NAME = "hdfs-store";
+  public static final String CREATE_REGION__HDFSSTORE_NAME__HELP = "HDFS Store to be used by this region. \"list hdfs-store\" can be used to display existing HDFSStores.";
+  public static final String CREATE_REGION__HDFSSTORE_WRITEONLY = "hdfs-write-only";
+  public static final String CREATE_REGION__HDFSSTORE_WRITEONLY__HELP = "HDFS write-only mode will be used. All data will be persisted in the HDFS store, and user can access the stored data only through the MapReduce API";
+  /* hdfsstore commands  */  
+  public static final String CREATE_HDFS_STORE ="create hdfs-store";
+  public static final String CREATE_HDFS_STORE__HELP = "Create a hdfsstore and persist region data on the specified hadoop cluster.";
+  public static final String CREATE_HDFS_STORE__NAME = "name";
+  public static final String CREATE_HDFS_STORE__NAME__HELP = "Name of the store.";
+  public static final String CREATE_HDFS_STORE__NAMENODE = "namenode";
+  public static final String CREATE_HDFS_STORE__NAMENODE__HELP = "The URL of the Hadoop NameNode for your HD cluster.HDFSStore persists data on a HDFS cluster identified by cluster's NameNode URL or NameNode Service URL.NameNode URL can also be provided via hdfs-site.xml";
+  public static final String CREATE_HDFS_STORE__HOMEDIR = "home-dir";
+  public static final String CREATE_HDFS_STORE__HOMEDIR__HELP ="The HDFS directory path in which HDFSStore stores files. The value must not contain the NameNode URL";
+  public static final String CREATE_HDFS_STORE__READCACHESIZE= "read-cache-size";
+  public static final String CREATE_HDFS_STORE__READCACHESIZE__HELP ="The maximum amount of memory in megabytes used by HDFSStore read cache.";  
+  public static final String CREATE_HDFS_STORE__BATCHSIZE = "batch-size";
+  public static final String CREATE_HDFS_STORE__BATCHSIZE__HELP ="HDFSStore buffer data is persisted on HDFS in batches, and the BatchSize defines the maximum size (in megabytes) of each batch that is written to HDFS.";
+  public static final String CREATE_HDFS_STORE__BATCHINTERVAL = "batch-interval";
+  public static final String CREATE_HDFS_STORE__BATCHINTERVAL__HELP ="It defines the maximum time that can elapse between writing batches to HDFS. ";
+  public static final String CREATE_HDFS_STORE__MAXMEMORY = "max-memory";
+  public static final String CREATE_HDFS_STORE__MAXMEMORY__HELP ="The maximum amount of memory in megabytes used by HDFSStore";
+  public static final String CREATE_HDFS_STORE__DISPATCHERTHREADS = "dispatcher-threads";
+  public static final String CREATE_HDFS_STORE__DISPATCHERTHREADS__HELP ="The maximum number of threads (per region) used to write batches of HDFS.";
+  public static final String CREATE_HDFS_STORE__BUFFERPERSISTENT = "buffer-persistent";
+  public static final String CREATE_HDFS_STORE__BUFFERPERSISTENT__HELP ="Configure if HDFSStore in-memory buffer data, that has not been persisted on HDFS yet, should be persisted to a local disk to buffer prevent data loss";
+  public static final String CREATE_HDFS_STORE__SYNCDISKWRITE = "synchronous-disk-write";
+  public static final String CREATE_HDFS_STORE__SYNCDISKWRITE__HELP ="Enable or disable synchronous writes to the local DiskStore.";
+  public static final String CREATE_HDFS_STORE__DISKSTORENAME = "disk-store-name";
+  public static final String CREATE_HDFS_STORE__DISKSTORENAME__HELP ="The named DiskStore to use for any local disk persistence needs of HDFSStore.";
+  public static final String CREATE_HDFS_STORE__MINORCOMPACT= "minor-compact";
+  public static final String CREATE_HDFS_STORE__MINORCOMPACT__HELP ="Minor compaction reorganizes data in files to optimize read performance and reduce number of files created on HDFS.";
+  
+  public static final String CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS = "minor-compaction-threads";
+  public static final String CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform minor compaction in this HDFS store.";
+  public static final String CREATE_HDFS_STORE__MAJORCOMPACT= "major-compact";
+  public static final String CREATE_HDFS_STORE__MAJORCOMPACT__HELP ="Major compaction removes old values of a key and deleted records from the HDFS files.";
+  public static final String CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL= "major-compaction-interval";
+  public static final String CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP ="Interval Between two major compactions.";
+  public static final String CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS = "major-compaction-threads";
+  public static final String CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform major compaction in this HDFS store.";
+  public static final String CREATE_HDFS_STORE__PURGEINTERVAL = "purge-interval";
+  public static final String CREATE_HDFS_STORE__PURGEINTERVAL__HELP ="PurgeInterval defines the amount of time old files remain available for MapReduce jobs. After this interval has passed, old files are deleted.";
+  public static final String CREATE_HDFS_STORE__WRITEONLYFILESIZE = "max-write-only-file-size";
+  public static final String CREATE_HDFS_STORE__WRITEONLYFILESIZE__HELP ="For HDFS write-only regions, this defines the maximum size (in megabytes) that an HDFS log file can reach before HDFSStore closes the file and begins writing to a new file.";
+  public static final String CREATE_HDFS_STORE__FILEROLLOVERINTERVAL = "write-only-file-rollover-interval";
+  public static final String CREATE_HDFS_STORE__FILEROLLOVERINTERVAL__HELP ="For HDFS write-only regions, this defines the maximum time that can elapse before HDFSStore closes an HDFS file and begins writing to a new file.";  
+  public static final String CREATE_HDFS_STORE__CLIENTCONFIGFILE = "client-config-files";
+  public static final String CREATE_HDFS_STORE__CLIENTCONFIGFILE__HELP ="The full path to the HDFS client configuration file that the store uses.The full path to the HDFS client configuration files, for e.g. hdfs-site.xml and core-site.xml. These files must be accessible to any node where an instance of this HDFSStore will be created";
+  public static final String CREATE_HDFS_STORE__ERROR_WHILE_CREATING_REASON_0 = "An error occurred while creating the hdfs store: \"{0}\"";
+  public static final String CREATE_HDFS_STORE__GROUP = "group";
+  public static final String CREATE_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be created. If no group is specified the hdfs store will be created on all members.";
+    
+  /*HDFS describe command*/
+  public static final String DESCRIBE_HDFS_STORE = "describe hdfs-store";
+  public static final String DESCRIBE_HDFS_STORE__HELP = "Display information about a hdfs store.";
+  public static final String DESCRIBE_HDFS_STORE__NAME = "name";
+  public static final String DESCRIBE_HDFS_STORE__NAME__HELP = "name of the hdfs store";
+  public static final String DESCRIBE_HDFS_STORE__MEMBER = "member";
+  public static final String DESCRIBE_HDFS_STORE__MEMBER__HELP = "Name/Id of the member with the hdfs store to be described.";
+  public static final String DESCRIBE_HDFS_STORE__ERROR_MESSAGE = "An error occurred while getting information about the hdfs store: \"{0}\"";
+  
+  /*HDFS list command*/
+  public static final String LIST_HDFS_STORE = "list hdfs-stores";
+  public static final String LIST_HDFS_STORE__HELP = "Display hdfs stores for all members.";
+  public static final String LIST_HDFS_STORE__NAME__HELP = "name of the hdfs store";
+  public static final String LIST_HDFS_STORE__ERROR_MESSAGE = "An error occurred while collecting Hdfs Store information for all members across the GemFire cluster: %1$s";
+  public static final String LIST_HDFS_STORE__HDFS_STORES_NOT_FOUND_MESSAGE = "No Hdfs Stores Found";
+  
+  
+  /* 'destroy hdfs-store' command */
+  public static final String DESTROY_HDFS_STORE = "destroy hdfs-store";
+  public static final String DESTROY_HDFS_STORE__HELP = "Destroy a hdfs store";
+  public static final String DESTROY_HDFS_STORE__NAME = "name";
+  public static final String DESTROY_HDFS_STORE__NAME__HELP = "Name of the hdfs store that will be destroyed.";
+  public static final String DESTROY_HDFS_STORE__GROUP = "group";
+  public static final String DESTROY_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be destroyed. If no group is specified the hdfs store will be destroyed on all members.";
+  public static final String DESTROY_HDFS_STORE__ERROR_WHILE_DESTROYING_REASON_0 = "An error occurred while destroying the hdfs store: \"{0}\"";
 
+  
+  /* 'alter hdfs-store' command */
+  public static final String ALTER_HDFS_STORE = "alter hdfs-store";
+  public static final String ALTER_HDFS_STORE__HELP = "Alter a hdfs store";
+  public static final String ALTER_HDFS_STORE__NAME = "name";
+  public static final String ALTER_HDFS_STORE__NAME__HELP = "Name of the hdfs store that will be Altered.";
+  public static final String ALTER_HDFS_STORE__GROUP = "group";  
+  public static final String ALTER_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be altered. If no group is specified the hdfs store will be altered on all members.";
+  public static final String ALTER_HDFS_STORE__ERROR_WHILE_ALTERING_REASON_0 = "An error occurred while altering the hdfs store: \"{0}\"";
+  public static final String ALTER_HDFS_STORE__BATCHSIZE = "batch-size";
+  public static final String ALTER_HDFS_STORE__BATCHSIZE__HELP ="HDFSStore buffer data is persisted on HDFS in batches, and the BatchSize defines the maximum size (in megabytes) of each batch that is written to HDFS.";
+  public static final String ALTER_HDFS_STORE__BATCHINTERVAL = "batch-interval";
+  public static final String ALTER_HDFS_STORE__BATCHINTERVAL__HELP ="It defines the maximum time that can elapse between writing batches to HDFS. ";
+  public static final String ALTER_HDFS_STORE__MINORCOMPACT= "minor-compact";
+  public static final String ALTER_HDFS_STORE__MINORCOMPACT__HELP ="Minor compaction reorganizes data in files to optimize read performance and reduce number of files created on HDFS.";  
+  public static final String ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS = "minor-compaction-threads";
+  public static final String ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform minor compaction in this HDFS store.";
+  public static final String ALTER_HDFS_STORE__MAJORCOMPACT= "major-compact";
+  public static final String ALTER_HDFS_STORE__MAJORCOMPACT__HELP ="Major compaction removes old values of a key and deleted records from the HDFS files.";
+  public static final String ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL= "major-compaction-interval";
+  public static final String ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP ="Interval Between two major compactions.";
+  public static final String ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS = "major-compaction-threads";
+  public static final String ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform major compaction in this HDFS store.";
+  public static final String ALTER_HDFS_STORE__PURGEINTERVAL = "purge-interval";
+  public static final String ALTER_HDFS_STORE__PURGEINTERVAL__HELP ="PurgeInterval defines the amount of time old files remain available for MapReduce jobs. After this interval has passed, old files are deleted.";
+  public static final String ALTER_HDFS_STORE__FILEROLLOVERINTERVAL = "write-only-file-rollover-interval";
+  public static final String ALTER_HDFS_STORE__FILEROLLOVERINTERVAL__HELP = "For HDFS write-only regions, this defines the maximum time that can elapse before HDFSStore closes an HDFS file and begins writing to a new file.";  
+  public static final String ALTER_HDFS_STORE__WRITEONLYFILESIZE = "max-write-only-file-size";
+  public static final String ALTER_HDFS_STORE__WRITEONLYFILESIZE__HELP ="For HDFS write-only regions, this defines the maximum size (in megabytes) that an HDFS log file can reach before HDFSStore closes the file and begins writing to a new file.";
+  
   /* debug command */
   public static final String DEBUG = "debug";
   public static final String DEBUG__HELP = "Enable/Disable debugging output in GFSH.";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java
new file mode 100644
index 0000000..ad569f0
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/HDFSStoreNotFoundException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.management.internal.cli.util;
+
+import com.gemstone.gemfire.GemFireException;
+
+/**
+ * The HDFSStoreNotFoundException is a GemFireException class indicating that a hdfs store by name could not be found
+ * on a member specified by name!
+ * </p>
+ * @see com.gemstone.gemfire.GemFireException
+ */
+// TODO this GemFireException should be moved to a more appropriate package!
+  @SuppressWarnings("unused")
+public class HDFSStoreNotFoundException extends GemFireException {
+
+  public HDFSStoreNotFoundException() {
+  }
+
+  public HDFSStoreNotFoundException(final String message) {
+    super(message);
+  }
+
+  public HDFSStoreNotFoundException(final Throwable cause) {
+    super(cause);
+  }
+
+  public HDFSStoreNotFoundException(final String message, final Throwable cause) {
+    super(message, cause);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
index e0db821..e842bee 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/util/RegionAttributesNames.java
@@ -46,7 +46,9 @@ public class RegionAttributesNames {
 	public static final String POOL_NAME =  "pool-name";
 	public static final String COMPRESSOR = "compressor";
     public static final String OFF_HEAP = "off-heap";
-
+    public static final String HDFSSTORE = "hdfs-store";
+    public static final String HDFS_WRITEONLY = "hdfs-write-only";
+	
 	//Partition attributes
 	public static final String LOCAL_MAX_MEMORY =  "local-max-memory";
 	public static final String REDUNDANT_COPIES =  "redundant-copies";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
new file mode 100644
index 0000000..c182edd
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
@@ -0,0 +1,229 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.management.internal.web.controllers;
+
+import org.springframework.shell.core.annotation.CliOption;
+import org.springframework.stereotype.Controller;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.ResponseBody;
+
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.management.cli.CliMetaData;
+import com.gemstone.gemfire.management.cli.ConverterHint;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+
+/**
+ * The HDFSStoreCommandsController class implements GemFire Management REST API web service endpoints for the
+ * Gfsh Hdfs Store Commands.
+ * <p/>
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommands
+ * @see com.gemstone.gemfire.management.internal.web.controllers.AbstractCommandsController
+ * @see org.springframework.stereotype.Controller
+ * @see org.springframework.web.bind.annotation.PathVariable
+ * @see org.springframework.web.bind.annotation.RequestMapping
+ * @see org.springframework.web.bind.annotation.RequestMethod
+ * @see org.springframework.web.bind.annotation.RequestParam
+ * @see org.springframework.web.bind.annotation.ResponseBody
+ * @since 9.0
+ */
+@Controller("hdfsStoreController")
+@RequestMapping(AbstractCommandsController.REST_API_VERSION)
+@SuppressWarnings("unused")
+public class HDFSStoreCommandsController extends AbstractCommandsController {
+  @RequestMapping(method = RequestMethod.GET, value = "/hdfsstores")
+  @ResponseBody
+  public String listHDFSStores() {
+    String my= processCommand(CliStrings.LIST_HDFS_STORE);
+    return my;
+  }
+  
+  @RequestMapping(method = RequestMethod.POST, value = "/hdfsstores")
+  @ResponseBody
+  public String createHdfsStore(
+		  @RequestParam(CliStrings.CREATE_HDFS_STORE__NAME) final String storeName,		  
+		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__NAMENODE, required=false) final String  namenode,
+		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__HOMEDIR, required=false) final String  homedir,
+		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BATCHSIZE,required=false) final Integer batchSize,                    
+		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL, required=false) final Integer batchInterval,          
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__READCACHESIZE, required=false) final Float readCachesize,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS, required=false) final Integer dispatcherThreads,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAXMEMORY, required=false) final Integer maxMemory,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT, required=false) final Boolean persistence,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE, required=false) final Boolean  synchronousDiskWrite,                    
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME, required=false) final String diskStoreName,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT, required=false) final Boolean minorCompaction,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS, required=false) final Integer minorCompactionThreads,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT, required=false) final Boolean majorCompact,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL, required=false) final Integer majorCompactionInterval,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS, required=false) final Integer majorCompactionThreads,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL, required=false) final Integer purgeInterval,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE, required=false) final Integer writeOnlyFileSize,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL, required=false) final Integer fileRolloverInterval,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE, required=false) final String clientConfigFile,
+          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__GROUP, required = false) final String[] groups)
+  {
+		CommandStringBuilder command = new CommandStringBuilder(CliStrings.CREATE_HDFS_STORE);
+
+		command.addOption(CliStrings.CREATE_HDFS_STORE__NAME, storeName);
+		
+		if (hasValue(namenode))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__NAMENODE, namenode);
+		
+		if (hasValue(homedir))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__HOMEDIR, homedir);
+		
+		if (hasValue(batchSize))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__BATCHSIZE, String.valueOf(batchSize));
+		
+		if (hasValue(batchInterval))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL, String.valueOf(batchInterval));
+		
+		if (hasValue(readCachesize))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__READCACHESIZE, String.valueOf(readCachesize));
+		
+		if (hasValue(dispatcherThreads))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS, String.valueOf(dispatcherThreads));
+		
+		if (hasValue(maxMemory))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__MAXMEMORY,String.valueOf(maxMemory));
+		
+		if (hasValue(persistence))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT,String.valueOf(Boolean.TRUE.equals(persistence)));
+		
+		if (hasValue(synchronousDiskWrite))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE,String.valueOf(Boolean.TRUE.equals(synchronousDiskWrite)));
+		
+		if (hasValue(diskStoreName))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__DISKSTORENAME,String.valueOf(diskStoreName));
+		
+		if (hasValue(minorCompaction))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__MINORCOMPACT,String.valueOf(Boolean.TRUE.equals(minorCompaction)));
+		
+		if (hasValue(minorCompactionThreads))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS,String.valueOf(minorCompactionThreads));
+		
+		if (hasValue(majorCompact))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT,String.valueOf(Boolean.TRUE.equals(majorCompact)));
+		
+		if (hasValue(majorCompactionInterval))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL,String.valueOf(majorCompactionInterval));
+		
+		if (hasValue(majorCompactionThreads))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS,String.valueOf(majorCompactionThreads));
+		
+		if (hasValue(purgeInterval))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL,String.valueOf(purgeInterval));
+		
+		if (hasValue(writeOnlyFileSize))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE,String.valueOf(writeOnlyFileSize));
+		
+		if (hasValue(fileRolloverInterval))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL,String.valueOf(fileRolloverInterval));
+		
+		if (hasValue(clientConfigFile))
+			command.addOption(CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE,String.valueOf(clientConfigFile));		
+
+		if (hasValue(groups)) {
+			command.addOption(CliStrings.CREATE_HDFS_STORE__GROUP,StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
+		}
+
+		return processCommand(command.toString());
+  }  
+  
+  @RequestMapping(method = RequestMethod.GET, value = "/hdfsstores/{name}")
+  @ResponseBody
+  public String describeHDFSStore(
+		  @PathVariable("name") final String hdfsStoreName,
+          @RequestParam(CliStrings.DESCRIBE_HDFS_STORE__MEMBER) final String memberNameId)
+  {	  
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.DESCRIBE_HDFS_STORE);
+    command.addOption(CliStrings.DESCRIBE_HDFS_STORE__NAME, decode(hdfsStoreName));
+    command.addOption(CliStrings.DESCRIBE_HDFS_STORE__MEMBER, memberNameId);    
+    return processCommand(command.toString());
+  }  
+  
+  @RequestMapping(method = RequestMethod.PUT, value = "/hdfsstores/{name}")
+  @ResponseBody
+  public String alterHdfsStore(
+		  @PathVariable("name") final String hdfsStoreName,	  								
+		  @RequestParam(value = CliStrings.ALTER_HDFS_STORE__BATCHSIZE, required=false) final Integer batchSize,                                    				                                
+		  @RequestParam(value = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL, required=false) final Integer batchInterval,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT, required=false) final Boolean minorCompaction,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS, required=false) final Integer minorCompactionThreads,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT, required=false) final Boolean majorCompact,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL, required=false) final Integer majorCompactionInterval,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS, required=false) final Integer majorCompactionThreads,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL, required=false) final Integer purgeInterval,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE, required=false) final Integer writeOnlyFileSize,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL, required=false) final Integer fileRolloverInterval,
+          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__GROUP, required = false) final String[] groups)
+  {
+	  CommandStringBuilder command = new CommandStringBuilder(CliStrings.ALTER_HDFS_STORE);
+
+		command.addOption(CliStrings.ALTER_HDFS_STORE__NAME, hdfsStoreName);
+		
+		
+		if (hasValue(batchSize))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__BATCHSIZE, String.valueOf(batchSize));
+		
+		if (hasValue(batchInterval))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL, String.valueOf(batchInterval));	
+		
+		if (hasValue(minorCompaction))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__MINORCOMPACT,String.valueOf(Boolean.TRUE.equals(minorCompaction)));
+		
+		if (hasValue(minorCompactionThreads))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS,String.valueOf(minorCompactionThreads));
+		
+		if (hasValue(majorCompact))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT,String.valueOf(Boolean.TRUE.equals(majorCompact)));
+		
+		if (hasValue(majorCompactionInterval))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL,String.valueOf(majorCompactionInterval));
+		
+		if (hasValue(majorCompactionThreads))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS,String.valueOf(majorCompactionThreads));
+		
+		if (hasValue(purgeInterval))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL,String.valueOf(purgeInterval));
+		
+		if (hasValue(writeOnlyFileSize))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE,String.valueOf(writeOnlyFileSize));
+		
+		if (hasValue(fileRolloverInterval))
+			command.addOption(CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL,String.valueOf(fileRolloverInterval));
+		
+		if (hasValue(groups)) {
+			command.addOption(CliStrings.ALTER_HDFS_STORE__GROUP,StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
+		}
+
+		return processCommand(command.toString());
+  }
+  
+  @RequestMapping(method = RequestMethod.DELETE, value = "/hdfsstores/{name}")
+  @ResponseBody
+  public String destroyHDFSStore(
+		  @PathVariable("name") final String hdfsStoreName,
+          @RequestParam(value = CliStrings.DESTROY_HDFS_STORE__GROUP, required = false) final String[] groups)
+  {
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.DESTROY_HDFS_STORE);
+    command.addOption(CliStrings.DESTROY_HDFS_STORE__NAME, decode(hdfsStoreName));
+
+    if (hasValue(groups)) {
+      command.addOption(CliStrings.DESTROY_HDFS_STORE__GROUP, StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
+    }
+    return processCommand(command.toString());
+    
+  }  
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
index c41f747..9dcd94f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
@@ -18,22 +18,11 @@ package com.gemstone.gemfire.management.internal.web.controllers;
 
 import java.io.IOException;
 import java.util.Set;
-
 import javax.management.AttributeNotFoundException;
 import javax.management.InstanceNotFoundException;
 import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 
-import org.springframework.http.HttpStatus;
-import org.springframework.http.MediaType;
-import org.springframework.http.ResponseEntity;
-import org.springframework.stereotype.Controller;
-import org.springframework.web.bind.annotation.RequestBody;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.RequestMethod;
-import org.springframework.web.bind.annotation.RequestParam;
-import org.springframework.web.bind.annotation.ResponseBody;
-
 import com.gemstone.gemfire.internal.GemFireVersion;
 import com.gemstone.gemfire.internal.lang.ObjectUtils;
 import com.gemstone.gemfire.internal.lang.StringUtils;
@@ -44,6 +33,16 @@ import com.gemstone.gemfire.management.internal.web.domain.LinkIndex;
 import com.gemstone.gemfire.management.internal.web.domain.QueryParameterSource;
 import com.gemstone.gemfire.management.internal.web.http.HttpMethod;
 
+import org.springframework.http.HttpStatus;
+import org.springframework.http.MediaType;
+import org.springframework.http.ResponseEntity;
+import org.springframework.stereotype.Controller;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.ResponseBody;
+
 /**
  * The ShellCommandsController class implements GemFire REST API calls for Gfsh Shell Commands.
  * 
@@ -250,19 +249,11 @@ public class ShellCommandsController extends AbstractCommandsController {
       .add(new Link(PING_LINK_RELATION, toUri("/ping", scheme), HttpMethod.GET))
       .add(new Link(CliStrings.VERSION, toUri("/version", scheme)))
       // WAN Gateway Commands
-      .add(new Link(CliStrings.LIST_GATEWAY, toUri("/gateways", scheme)))
-      .add(new Link(CliStrings.CREATE_GATEWAYRECEIVER, toUri("/gateways/receivers", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.CREATE_GATEWAYSENDER, toUri("/gateways/senders", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.LOAD_BALANCE_GATEWAYSENDER, toUri("/gateways/senders/{id}?op=load-balance", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.PAUSE_GATEWAYSENDER, toUri("/gateways/senders/{id}?op=pause", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.RESUME_GATEWAYSENDER, toUri("/gateways/senders/{id}?op=resume", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.START_GATEWAYRECEIVER, toUri("/gateways/receivers?op=start", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.START_GATEWAYSENDER, toUri("/gateways/senders?op=start", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.STATUS_GATEWAYRECEIVER, toUri("/gateways/receivers", scheme)))
-      .add(new Link(CliStrings.STATUS_GATEWAYSENDER, toUri("/gateways/senders/{id}", scheme)))
-      .add(new Link(CliStrings.STOP_GATEWAYRECEIVER, toUri("/gateways/receivers?op=stop", scheme), HttpMethod.POST))
-      .add(new Link(CliStrings.STOP_GATEWAYSENDER, toUri("/gateways/senders/{id}?op=stop", scheme), HttpMethod.POST))
-        ;
+       .add(new Link(CliStrings.LIST_HDFS_STORE, toUri("/hdfsstores",scheme), HttpMethod.GET))
+       .add(new Link(CliStrings.DESCRIBE_HDFS_STORE, toUri("/hdfsstores/{name}",scheme), HttpMethod.GET))
+       .add(new Link(CliStrings.CREATE_HDFS_STORE, toUri("/hdfsstores",scheme), HttpMethod.POST))
+       .add(new Link(CliStrings.DESTROY_HDFS_STORE, toUri("/hdfsstores/{name}",scheme), HttpMethod.DELETE))
+       .add(new Link(CliStrings.ALTER_HDFS_STORE,   toUri("/hdfsstores/{name}",scheme), HttpMethod.PUT));
   }
 
   @RequestMapping(method = { RequestMethod.GET, RequestMethod.HEAD }, value = "/ping")

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
index a3d4cd0..f1e0d7a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/support/MemberMXBeanAdapter.java
@@ -243,6 +243,11 @@ public class MemberMXBeanAdapter implements MemberMXBean {
   }
 
   @Override
+  public String[] getHDFSStores() {
+    throw new UnsupportedOperationException("Not Implemented!");
+  }  
+  
+  @Override
   public String[] getRootRegionNames() {
     throw new UnsupportedOperationException("Not Implemented!");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
----------------------------------------------------------------------
diff --git a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
index cc6d189..5ecd67d 100755
--- a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
+++ b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
@@ -274,6 +274,7 @@ declarative caching XML file elements unless indicated otherwise.
         </xsd:element>
         <xsd:element maxOccurs="unbounded" minOccurs="0" name="pool" type="gf:pool-type" />
         <xsd:element maxOccurs="unbounded" minOccurs="0" name="disk-store" type="gf:disk-store-type" />
+        <xsd:element maxOccurs="unbounded" minOccurs="0" name="hdfs-store" type="gf:hdfs-store-type" />
         <xsd:element maxOccurs="1" minOccurs="0" name="pdx" type="gf:pdx-type" />
         <xsd:element maxOccurs="unbounded" minOccurs="0" name="region-attributes" type="gf:region-attributes-type" />
         <xsd:choice maxOccurs="unbounded" minOccurs="0">
@@ -825,6 +826,8 @@ As of 6.5 disk-dirs is deprecated on region-attributes. Use disk-store-name inst
     <xsd:attribute name="pool-name" type="xsd:string" use="optional" />
     <xsd:attribute name="disk-store-name" type="xsd:string" use="optional" />
     <xsd:attribute name="disk-synchronous" type="xsd:boolean" use="optional" />
+    <xsd:attribute name="hdfs-store-name" type="xsd:string" use="optional" />
+    <xsd:attribute name="hdfs-write-only" type="xsd:boolean" use="optional" />
     <xsd:attribute name="publisher" type="xsd:boolean" use="optional" />
     <xsd:attribute name="refid" type="xsd:string" use="optional" />
     <xsd:attribute name="scope" type="gf:region-attributesScope" use="optional" />
@@ -1130,6 +1133,34 @@ As of 6.5 disk-dirs is deprecated on region-attributes. Use disk-store-name inst
     <xsd:attribute name="disk-usage-critical-percentage" type="xsd:string" use="optional" />
   </xsd:complexType>
 
+  <xsd:complexType mixed="true" name="hdfs-store-type">
+    <xsd:annotation>
+      <xsd:documentation>
+        A "hdfs-store" element specifies a HdfsStore for persistence.
+      </xsd:documentation>
+    </xsd:annotation>
+    <xsd:attribute name="name" type="xsd:string" use="required" />
+    <xsd:attribute name="namenode-url" type="xsd:string" use="optional" />
+    <xsd:attribute name="home-dir" type="xsd:string" use="optional" />
+    <xsd:attribute name="max-memory" type="xsd:string" use="optional" />
+    <xsd:attribute name="read-cache-size" type="xsd:string" use="optional" />
+    <xsd:attribute name="batch-size" type="xsd:string" use="optional" />
+    <xsd:attribute name="batch-interval" type="xsd:string" use="optional" />
+    <xsd:attribute name="dispatcher-threads" type="xsd:string" use="optional" />
+    <xsd:attribute name="buffer-persistent" type="xsd:boolean" use="optional" />
+    <xsd:attribute name="disk-store" type="xsd:string" use="optional" />
+    <xsd:attribute name="synchronous-disk-write" type="xsd:string" use="optional" />
+    <xsd:attribute name="hdfs-client-config-file" type="xsd:string" use="optional" />
+    <xsd:attribute name="purge-interval" type="xsd:string" use="optional" />
+    <xsd:attribute name="major-compaction" type="xsd:string" use="optional" />
+    <xsd:attribute name="major-compaction-interval" type="xsd:string" use="optional" />
+    <xsd:attribute name="major-compaction-threads" type="xsd:integer" use="optional" />
+    <xsd:attribute name="minor-compaction" type="xsd:string" use="optional" />
+    <xsd:attribute name="minor-compaction-threads" type="xsd:integer" use="optional" />
+    <xsd:attribute name="max-write-only-file-size" type="xsd:integer" use="optional" />
+    <xsd:attribute name="write-only-file-rollover-interval" type="xsd:string" use="optional" />    
+  </xsd:complexType>
+
   <xsd:complexType name="pdx-type">
     <xsd:annotation>
       <xsd:documentation>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
new file mode 100644
index 0000000..aa40368
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
@@ -0,0 +1,188 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.VM;
+
+/**
+ * A class for testing the basic HDFS functionality
+ * 
+ * @author Hemant Bhanawat
+ */
+@SuppressWarnings({"serial", "rawtypes", "unchecked", "deprecation"})
+public class ColocatedRegionWithHDFSDUnitTest extends RegionWithHDFSTestBase {
+
+  public ColocatedRegionWithHDFSDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  protected SerializableCallable getCreateRegionCallable(
+      final int totalnumOfBuckets, final int batchSizeMB,
+      final int maximumEntries, final String folderPath,
+      final String uniqueName, final int batchInterval,
+      final boolean queuePersistent, final boolean writeonly,
+      final long timeForRollover, final long maxFileSize) {
+    SerializableCallable createRegion = new SerializableCallable() {
+      public Object call() throws Exception {
+        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
+        hsf.setBatchSize(batchSizeMB);
+        hsf.setBufferPersistent(queuePersistent);
+        hsf.setMaxMemory(3);
+        hsf.setBatchInterval(batchInterval);
+        hsf.setHomeDir(tmpDir + "/" + folderPath);
+        homeDir = new File(tmpDir + "/" + folderPath).getCanonicalPath();
+        hsf.setHomeDir(homeDir);
+        hsf.create(uniqueName);
+
+        AttributesFactory af = new AttributesFactory();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setTotalNumBuckets(totalnumOfBuckets);
+        paf.setRedundantCopies(1);
+
+        af.setHDFSStoreName(uniqueName);
+        af.setPartitionAttributes(paf.create());
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(
+            maximumEntries, EvictionAction.LOCAL_DESTROY));
+
+        af.setHDFSWriteOnly(writeonly);
+        Region r1 = createRootRegion(uniqueName + "-r1", af.create());
+
+        paf.setColocatedWith(uniqueName + "-r1");
+        af.setPartitionAttributes(paf.create());
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(
+            maximumEntries, EvictionAction.LOCAL_DESTROY));
+        Region r2 = createRootRegion(uniqueName + "-r2", af.create());
+
+        ((LocalRegion) r1).setIsTest();
+        ((LocalRegion) r2).setIsTest();
+
+        return 0;
+      }
+    };
+    return createRegion;
+  }
+
+  @Override
+  protected void doPuts(String uniqueName, int start, int end) {
+    Region r1 = getRootRegion(uniqueName + "-r1");
+    Region r2 = getRootRegion(uniqueName + "-r2");
+
+    for (int i = start; i < end; i++) {
+      r1.put("K" + i, "V" + i);
+      r2.put("K" + i, "V" + i);
+    }
+  }
+
+  protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
+                                        final int start, final int end, final String suffix) throws Exception {
+    return vm.invokeAsync(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r1 = getRootRegion(regionName + "-r1");
+        Region r2 = getRootRegion(regionName + "-r2");
+
+        getCache().getLogger().info("Putting entries ");
+        for (int i = start; i < end; i++) {
+          r1.put("K" + i, "V" + i + suffix);
+          r2.put("K" + i, "V" + i + suffix);
+        }
+        return null;
+      }
+
+    });
+  }
+
+  protected void doPutAll(final String uniqueName, Map map) {
+    Region r1 = getRootRegion(uniqueName + "-r1");
+    Region r2 = getRootRegion(uniqueName + "-r2");
+    r1.putAll(map);
+    r2.putAll(map);
+  }
+
+  @Override
+  protected void doDestroys(String uniqueName, int start, int end) {
+    Region r1 = getRootRegion(uniqueName + "-r1");
+    Region r2 = getRootRegion(uniqueName + "-r2");
+
+    for (int i = start; i < end; i++) {
+      r1.destroy("K" + i);
+      r2.destroy("K" + i);
+    }
+  }
+
+  @Override
+  protected void checkWithGet(String uniqueName, int start, int end,
+      boolean expectValue) {
+    Region r1 = getRootRegion(uniqueName + "-r1");
+    Region r2 = getRootRegion(uniqueName + "-r2");
+    for (int i = start; i < end; i++) {
+      String expected = expectValue ? "V" + i : null;
+      assertEquals("Mismatch on key " + i, expected, r1.get("K" + i));
+      assertEquals("Mismatch on key " + i, expected, r2.get("K" + i));
+    }
+  }
+
+  protected void checkWithGetAll(String uniqueName, ArrayList arrayl) {
+    Region r1 = getRootRegion(uniqueName + "-r1");
+    Region r2 = getRootRegion(uniqueName + "-r2");
+    Map map1 = r1.getAll(arrayl);
+    Map map2 = r2.getAll(arrayl);
+    for (Object e : map1.keySet()) {
+      String v = e.toString().replaceFirst("K", "V");
+      assertTrue("Reading entries failed for key " + e + " where value = "
+          + map1.get(e), v.equals(map1.get(e)));
+      assertTrue("Reading entries failed for key " + e + " where value = "
+          + map2.get(e), v.equals(map2.get(e)));
+    }
+  }
+
+  @Override
+  protected void verifyHDFSData(VM vm, String uniqueName) throws Exception {
+    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(
+        vm, uniqueName, uniqueName + "-r1");
+    HashMap<String, String> entriesMap = new HashMap<String, String>();
+    for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
+        .entrySet()) {
+      entriesMap.putAll(e.getValue());
+    }
+
+    verifyInEntriesMap(entriesMap, 1, 50, "vm0");
+    verifyInEntriesMap(entriesMap, 40, 100, "vm1");
+    verifyInEntriesMap(entriesMap, 40, 100, "vm2");
+    verifyInEntriesMap(entriesMap, 90, 150, "vm3");
+
+    filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName
+        + "-r2");
+    entriesMap = new HashMap<String, String>();
+    for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
+        .entrySet()) {
+      entriesMap.putAll(e.getValue());
+    }
+
+    verifyInEntriesMap(entriesMap, 1, 50, "vm0");
+    verifyInEntriesMap(entriesMap, 40, 100, "vm1");
+    verifyInEntriesMap(entriesMap, 40, 100, "vm2");
+    verifyInEntriesMap(entriesMap, 90, 150, "vm3");
+  }
+}


[14/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index c477466..db14e57 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -126,6 +126,16 @@ import com.gemstone.gemfire.cache.client.internal.ClientMetadataService;
 import com.gemstone.gemfire.cache.client.internal.ClientRegionFactoryImpl;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSFlushQueueFunction;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSForceCompactionFunction;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSLastCompactionTimeFunction;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSStoreDirector;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.internal.DefaultQuery;
 import com.gemstone.gemfire.cache.query.internal.DefaultQueryService;
@@ -922,6 +932,9 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         HARegionQueue.setMessageSyncInterval(HARegionQueue.DEFAULT_MESSAGE_SYNC_INTERVAL);
       }
       FunctionService.registerFunction(new PRContainsValueFunction());
+      FunctionService.registerFunction(new HDFSLastCompactionTimeFunction());
+      FunctionService.registerFunction(new HDFSForceCompactionFunction());
+      FunctionService.registerFunction(new HDFSFlushQueueFunction());
       this.expirationScheduler = new ExpirationScheduler(this.system);
 
       // uncomment following line when debugging CacheExistsException
@@ -2172,6 +2185,8 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
           closeDiskStores();
           diskMonitor.close();
           
+          closeHDFSStores();
+          
           // Close the CqService Handle.
           try {
             if (isDebugEnabled) {
@@ -2257,6 +2272,7 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         } catch (CancelException e) {
           // make sure the disk stores get closed
           closeDiskStores();
+          closeHDFSStores();
           // NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
 
           // okay, we're taking too long to do this stuff, so let's
@@ -3103,6 +3119,8 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
             future = (Future) this.reinitializingRegions.get(fullPath);
           }
           if (future == null) {
+            HDFSIntegrationUtil.createAndAddAsyncQueue(regionPath, attrs, this);
+            attrs = setEvictionAttributesForLargeRegion(attrs);
             if (internalRegionArgs.getInternalMetaRegion() != null) {
               rgn = internalRegionArgs.getInternalMetaRegion();
             } else if (isPartitionedRegion) {
@@ -3227,6 +3245,54 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
 
+  /**
+   * turn on eviction by default for HDFS regions
+   */
+  @SuppressWarnings("deprecation")
+  public <K, V> RegionAttributes<K, V> setEvictionAttributesForLargeRegion(
+      RegionAttributes<K, V> attrs) {
+    RegionAttributes<K, V> ra = attrs;
+    if (DISABLE_AUTO_EVICTION) {
+      return ra;
+    }
+    if (attrs.getDataPolicy().withHDFS()
+        || attrs.getHDFSStoreName() != null) {
+      // make the region overflow by default
+      EvictionAttributes evictionAttributes = attrs.getEvictionAttributes();
+      boolean hasNoEvictionAttrs = evictionAttributes == null
+          || evictionAttributes.getAlgorithm().isNone();
+      AttributesFactory<K, V> af = new AttributesFactory<K, V>(attrs);
+      String diskStoreName = attrs.getDiskStoreName();
+      // set the local persistent directory to be the same as that for
+      // HDFS store
+      if (attrs.getHDFSStoreName() != null) {
+        HDFSStoreImpl hdfsStore = findHDFSStore(attrs.getHDFSStoreName());
+        if (attrs.getPartitionAttributes().getLocalMaxMemory() != 0 && hdfsStore == null) {
+          // HDFS store expected to be found at this point
+          throw new IllegalStateException(
+              LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND
+                  .toLocalizedString(attrs.getHDFSStoreName()));
+        }
+        // if there is no disk store, use the one configured for hdfs queue
+        if (attrs.getPartitionAttributes().getLocalMaxMemory() != 0 && diskStoreName == null) {
+          diskStoreName = hdfsStore.getDiskStoreName();
+        }
+      }
+      // set LRU heap eviction with overflow to disk for HDFS stores with
+      // local Oplog persistence
+      // set eviction attributes only if not set
+      if (hasNoEvictionAttrs) {
+        if (diskStoreName != null) {
+          af.setDiskStoreName(diskStoreName);
+        }
+        af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(
+            ObjectSizer.DEFAULT, EvictionAction.OVERFLOW_TO_DISK));
+      }
+      ra = af.create();
+    }
+    return ra;
+  }
+
   public final Region getRegion(String path) {
     return getRegion(path, false);
   }
@@ -4944,6 +5010,48 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         c.setRegionAttributes(pra.toString(), af.create());
         break;
       }
+      case PARTITION_HDFS: {
+    	  AttributesFactory af = new AttributesFactory();
+          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          af.setPartitionAttributes(paf.create());
+          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
+          af.setHDFSWriteOnly(false);
+          c.setRegionAttributes(pra.toString(), af.create());
+          break;
+        }
+      case PARTITION_REDUNDANT_HDFS: {
+    	  AttributesFactory af = new AttributesFactory();
+          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(1);
+          af.setPartitionAttributes(paf.create());
+          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
+          af.setHDFSWriteOnly(false);
+          c.setRegionAttributes(pra.toString(), af.create());
+          break;
+        }
+      case PARTITION_WRITEONLY_HDFS_STORE: {
+        AttributesFactory af = new AttributesFactory();
+          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          af.setPartitionAttributes(paf.create());
+          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
+          af.setHDFSWriteOnly(true);
+          c.setRegionAttributes(pra.toString(), af.create());
+          break;
+        }
+      case PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE: {
+        AttributesFactory af = new AttributesFactory();
+          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+          PartitionAttributesFactory paf = new PartitionAttributesFactory();
+          paf.setRedundantCopies(1);
+          af.setPartitionAttributes(paf.create());
+          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
+          af.setHDFSWriteOnly(true);
+          c.setRegionAttributes(pra.toString(), af.create());
+          break;
+        }
       default:
         throw new IllegalStateException("unhandled enum " + pra);
       }
@@ -5337,6 +5445,45 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
   
+  @Override
+  public HDFSStoreFactory createHDFSStoreFactory() {
+    // TODO Auto-generated method stub
+    return new HDFSStoreFactoryImpl(this);
+  }
+  
+  public HDFSStoreFactory createHDFSStoreFactory(HDFSStoreCreation creation) {
+    return new HDFSStoreFactoryImpl(this, creation);
+  }
+  public void addHDFSStore(HDFSStoreImpl hsi) {
+    HDFSStoreDirector.getInstance().addHDFSStore(hsi);
+    //TODO:HDFS Add a resource event for hdfs store creation as well 
+    // like the following disk store event
+    //system.handleResourceEvent(ResourceEvent.DISKSTORE_CREATE, dsi);
+  }
+
+  public void removeHDFSStore(HDFSStoreImpl hsi) {
+    //hsi.destroy();
+    HDFSStoreDirector.getInstance().removeHDFSStore(hsi.getName());
+    //TODO:HDFS Add a resource event for hdfs store as well 
+    // like the following disk store event
+    //system.handleResourceEvent(ResourceEvent.DISKSTORE_REMOVE, dsi);
+  }
+
+  public void closeHDFSStores() {
+    HDFSRegionDirector.reset();
+    HDFSStoreDirector.getInstance().closeHDFSStores();
+  }
+
+  
+  public HDFSStoreImpl findHDFSStore(String name) {
+    return HDFSStoreDirector.getInstance().getHDFSStore(name);
+  }
+  
+  public Collection<HDFSStoreImpl> getHDFSStores() {
+    return HDFSStoreDirector.getInstance().getAllHDFSStores();
+  }
+  
+  
   public TemporaryResultSetFactory getResultSetFactory() {
     return this.resultSetFactory;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
index c924be5..3896800 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HARegion.java
@@ -373,20 +373,13 @@ public final class HARegion extends DistributedRegion
   
   /**
    * @return the deserialized value
-   * @see LocalRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean)
+   * @see DistributedRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean, boolean)
    *      
    */
   @Override
-  protected Object findObjectInSystem(KeyInfo keyInfo,
-                                      boolean isCreate,
-                                      TXStateInterface txState,
-                                      boolean generateCallbacks,
-                                      Object localValue,
-                                      boolean disableCopyOnRead,
-                                      boolean preferCD,
-                                      ClientProxyMembershipID requestingClient,
-                                      EntryEventImpl clientEvent,
-                                      boolean returnTombstones)
+  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
+      TXStateInterface txState, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead,
+      boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
     throws CacheLoaderException, TimeoutException  {
 
     Object value = null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java
new file mode 100644
index 0000000..f6c6aa7
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSLRURegionMap.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.util.Collection;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.cache.lru.EnableLRU;
+import com.gemstone.gemfire.internal.cache.lru.LRUEntry;
+import com.gemstone.gemfire.internal.cache.lru.NewLRUClockHand;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+/**
+ * Implementation of RegionMap that reads data from HDFS and adds LRU behavior
+ * 
+ */
+public class HDFSLRURegionMap extends AbstractLRURegionMap implements HDFSRegionMap {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private final HDFSRegionMapDelegate delegate;
+
+  /**
+   *  A tool from the eviction controller for sizing entries and
+   *  expressing limits.
+   */
+  private EnableLRU ccHelper;
+
+  /**  The list of nodes in LRU order */
+  private NewLRUClockHand lruList;
+
+  private static final boolean DEBUG = Boolean.getBoolean("hdfsRegionMap.DEBUG");
+
+  public HDFSLRURegionMap(LocalRegion owner, Attributes attrs,
+      InternalRegionArguments internalRegionArgs) {
+    super(internalRegionArgs);
+    assert owner instanceof BucketRegion;
+    initialize(owner, attrs, internalRegionArgs);
+    this.delegate = new HDFSRegionMapDelegate(owner, attrs, internalRegionArgs, this);
+  }
+
+  @Override
+  public RegionEntry getEntry(Object key) {
+    return delegate.getEntry(key, null);
+  }
+
+  @Override
+  protected RegionEntry getEntry(EntryEventImpl event) {
+    return delegate.getEntry(event);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Collection<RegionEntry> regionEntries() {
+    return delegate.regionEntries();
+  }
+    
+  @Override
+  public int size() {
+    return delegate.size();
+  }
+    
+  @Override
+  public boolean isEmpty() {
+    return delegate.isEmpty();
+  }
+
+  @Override
+  protected void _setCCHelper(EnableLRU ccHelper) {
+    this.ccHelper = ccHelper;
+  }
+
+  @Override
+  protected EnableLRU _getCCHelper() {
+    return this.ccHelper;
+  }
+
+  @Override
+  protected void _setLruList(NewLRUClockHand lruList) {
+    this.lruList = lruList;
+  }
+
+  @Override
+  protected NewLRUClockHand _getLruList() {
+    return this.lruList;
+  }
+
+  @Override
+  public HDFSRegionMapDelegate getDelegate() {
+    return this.delegate;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java
new file mode 100644
index 0000000..2a7baef
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMap.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache;
+
+/**
+ * Interface implemented by RegionMap implementations that
+ * read from HDFS.
+ * 
+ *
+ */
+public interface HDFSRegionMap {
+
+  /**
+   * @return the {@link HDFSRegionMapDelegate} that does
+   * all the work
+   */
+  public HDFSRegionMapDelegate getDelegate();
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java
new file mode 100644
index 0000000..a2ef653
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapDelegate.java
@@ -0,0 +1,540 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.io.IOException;
+import java.lang.ref.Reference;
+import java.lang.ref.ReferenceQueue;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSEntriesSet;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSEntriesSet.HDFSIterator;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
+import com.gemstone.gemfire.internal.cache.RegionMap.Attributes;
+import com.gemstone.gemfire.internal.cache.lru.EnableLRU;
+import com.gemstone.gemfire.internal.cache.lru.LRUEntry;
+import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
+import com.gemstone.gemfire.internal.cache.versions.VersionStamp;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor;
+import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import com.gemstone.gemfire.internal.util.concurrent.FutureResult;
+
+/**
+ * This class encapsulates all the functionality of HDFSRegionMap, so
+ * that it can be provided to HDFSLRURegionMap. 
+ * 
+ */
+public class HDFSRegionMapDelegate {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private final BucketRegion owner;
+
+  private ConcurrentParallelGatewaySenderQueue hdfsQueue;
+
+  private final RegionMap backingRM;
+
+  /** queue of dead iterators */
+  private final ReferenceQueue<HDFSIterator> refs;
+  
+  private static final boolean DEBUG = Boolean.getBoolean("hdfsRegionMap.DEBUG");
+  
+  /**
+   * used for serializing fetches from HDFS
+   */
+  private ConcurrentMap<Object, FutureResult> futures = new ConcurrentHashMap<Object, FutureResult>();
+
+  public HDFSRegionMapDelegate(LocalRegion owner, Attributes attrs,
+      InternalRegionArguments internalRegionArgs, RegionMap backingRM) {
+    assert owner instanceof BucketRegion;
+    this.owner = (BucketRegion) owner;
+    this.backingRM = backingRM;
+    refs = new ReferenceQueue<HDFSEntriesSet.HDFSIterator>();
+  }
+
+  public RegionEntry getEntry(Object key, EntryEventImpl event) {
+    
+    RegionEntry re = getEntry(key, event, true);
+    // get from tx should put the entry back in map
+    // it should be evicted once tx completes
+    /**MergeGemXDHDFSToGFE txstate does not apply for this*/
+    /* if (re != null && getTXState(event) != null) {
+    if (re != null) {
+      // put the region entry in backing CHM of AbstractRegionMap so that
+      // it can be locked in basicPut/destroy
+      RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re);
+      if (oldRe != null) {
+        if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
+          ((OffHeapRegionEntry)re).release();
+        }
+        return oldRe;
+      }
+      re.setMarkedForEviction();
+      owner.updateSizeOnCreate(key,
+          owner.calculateRegionEntryValueSize(re));
+      ((AbstractRegionMap)backingRM).incEntryCount(1);
+      ((AbstractRegionMap)backingRM).lruEntryCreate(re);
+    }*/
+    return re;
+  }
+
+  /*
+  private TXStateInterface getTXState(EntryEventImpl event) {
+    return event != null ? event.getTXState(this.owner) : this.owner
+        .getTXState();
+  }*/
+
+  /**
+   * 
+   * @param key
+   * @param event
+   * @param forceOnHeap if true will return heap version of off-heap region entries
+   */
+  private RegionEntry getEntry(Object key, EntryEventImpl event, boolean forceOnHeap) {
+    closeDeadIterators();
+    
+    RegionEntry re = backingRM.getEntryInVM(key);
+    if (logger.isTraceEnabled() || DEBUG) {
+      logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: Found the key in CHM: " + key
+          + " ,value=" + (re == null? "null" : "[" + re._getValue() + " or (" + re.getValueAsToken() + ")]")));
+    }
+    if ((re == null || (re.isRemoved() && !re.isTombstone()))
+        && owner.getBucketAdvisor().isPrimary() && allowReadFromHDFS()) {
+      if (logger.isTraceEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: fetching from hdfs key:" + key));
+      }
+      try {
+        this.owner.getPartitionedRegion().hdfsCalled(key);
+        re = getEntryFromFuture(key);
+        if (re != null) {
+          return re;
+        }
+
+        assert this.owner.getPartitionedRegion().getDataPolicy()
+            .withHDFS();
+        byte[] k = EntryEventImpl.serialize(key);
+      
+        // for destroy ops we will retain the entry in the region map so
+        // tombstones can be tracked
+        //final boolean forceOnHeap = (event==null || !event.getOperation().isDestroy());
+        
+        // get from queue
+        re = getFromHDFSQueue(key, k, forceOnHeap);
+        if (re == null) {
+          // get from HDFS
+          re = getFromHDFS(key, k, forceOnHeap);
+        }
+        if (re != null && re.isTombstone()) {
+          RegionVersionVector vector = this.owner.getVersionVector();
+//          if (vector == null) {
+//            this.owner.getLogWriterI18n().info(LocalizedStrings.DEBUG,
+//            "found a tombstone in a region w/o a version vector: " + re + "; region: " + this.owner);
+//          }
+          if (vector == null
+              || vector.isTombstoneTooOld(re.getVersionStamp().getMemberID(),
+                                    re.getVersionStamp().getRegionVersion())) {
+            re = null;
+          }
+        }
+        if (logger.isTraceEnabled() || DEBUG) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: returning from hdfs re:" + re));
+        }
+      } catch (ForceReattemptException e) {
+        throw new PrimaryBucketException(e.getLocalizedMessage(), e);
+      } catch (IOException e) {
+        throw new HDFSIOException("Error reading from HDFS", e);
+      } finally {
+        notifyFuture(key, re);
+        // If we mark it here, the table scan may miss it causing updates/delete using table
+        // scan to fail.
+//        if (re != null) {
+//          re.setMarkedForEviction();
+//        }
+        if(re != null && event != null && !re.isTombstone()) {
+          if (logger.isTraceEnabled() || DEBUG) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: loaded from hdfs re:" + re));
+          }
+          BucketRegion br = (BucketRegion)owner;
+          //CustomEvictionAttributes csAttr = br.getCustomEvictionAttributes();
+          //if(csAttr!=null)
+          event.setLoadedFromHDFS(true);
+        }
+      }
+    }
+    if(re!=null && re.isMarkedForEviction() && !re.isTombstone()) {
+      if(event!=null) {
+        event.setLoadedFromHDFS(true);
+      }
+    }
+
+    return re;
+  }
+
+  /**
+   * This method returns true if the RegionEntry should be read from HDFS.
+   * fixes #49101 by not allowing reads from HDFS for persistent regions
+   * that do not define an eviction criteria.
+   * 
+   * @return true if RegionEntry should be read from HDFS
+   */
+  private boolean allowReadFromHDFS() {
+    if (!owner.getDataPolicy().withPersistence()
+        || owner.getCustomEvictionAttributes() != null
+        || isEvictionActionLocalDestroy()){
+        /**MergeGemXDHDFSToGFE this is used for global index. Hence not required here*/ 
+        //|| owner.isUsedForIndex()) {
+      // when region does not have persistence, we have to read from HDFS (even
+      // though there is no eviction criteria) for constraint checks
+      return true;
+    }
+    return false;
+  }
+
+  private boolean isEvictionActionLocalDestroy() {
+    PartitionedRegion pr = owner.getPartitionedRegion();
+    if (pr.getEvictionAttributes() != null) {
+      return pr.getEvictionAttributes().getAction() == EvictionAction.LOCAL_DESTROY;
+    }
+    return false;
+  }
+
+  protected RegionEntry getEntry(EntryEventImpl event) {
+    RegionEntry re = getEntry(event.getKey(), event, false);
+    if (re != null && event.isLoadedFromHDFS()) {
+      // put the region entry in backing CHM of AbstractRegionMap so that
+      // it can be locked in basicPut/destroy
+      RegionEntry oldRe = backingRM.putEntryIfAbsent(event.getKey(), re);
+      if (oldRe != null) {
+        if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
+          ((OffHeapRegionEntry) re).release();
+        }
+        return oldRe;
+      }
+      // since the entry is faulted in from HDFS, it must have
+      // satisfied the eviction criteria in the past, so mark it for eviction
+      re.setMarkedForEviction();
+
+      owner.updateSizeOnCreate(event.getKey(), owner.calculateRegionEntryValueSize(re));
+      ((AbstractRegionMap) backingRM).incEntryCount(1);
+      ((AbstractRegionMap) backingRM).lruEntryCreate(re);
+    }
+    return re;
+  }
+
+  @SuppressWarnings("unchecked")
+  public Collection<RegionEntry> regionEntries() {
+    closeDeadIterators();
+    if (!owner.getPartitionedRegion().includeHDFSResults()) {
+      if (logger.isDebugEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #regionEntries"));
+      }
+      return backingRM.regionEntriesInVM();
+    }
+
+    try {
+      return createEntriesSet(IteratorType.ENTRIES);
+    } catch (ForceReattemptException e) {
+      throw new PrimaryBucketException(e.getLocalizedMessage(), e);
+    }
+  }
+    
+  public int size() {
+    closeDeadIterators();
+    if (!owner.getPartitionedRegion().includeHDFSResults()) {
+      if (logger.isDebugEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #size"));
+      }
+      return backingRM.sizeInVM();
+    }
+
+    try {
+      return createEntriesSet(IteratorType.KEYS).size();
+    } catch (ForceReattemptException e) {
+      throw new PrimaryBucketException(e.getLocalizedMessage(), e);
+    }
+  }
+    
+  public boolean isEmpty() {
+    closeDeadIterators();
+    if (!owner.getPartitionedRegion().includeHDFSResults()) {
+      if (logger.isDebugEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #isEmpty"));
+      }
+      return backingRM.sizeInVM() == 0;
+    }
+
+    try {
+      return createEntriesSet(IteratorType.KEYS).isEmpty();
+    } catch (ForceReattemptException e) {
+      throw new PrimaryBucketException(e.getLocalizedMessage(), e);
+    }
+  }
+  
+  private void notifyFuture(Object key, RegionEntry re) {
+    FutureResult future = this.futures.remove(key);
+    if (future != null) {
+      future.set(re);
+    }
+  }
+
+  private RegionEntry getEntryFromFuture(Object key) {
+    FutureResult future = new FutureResult(this.owner.getCancelCriterion());
+    FutureResult old = this.futures.putIfAbsent(key, future);
+    if (old != null) {
+      if (logger.isTraceEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: waiting for concurrent fetch to complete for key:" + key));
+      }
+      try {
+        return (RegionEntry) old.get();
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        this.owner.getCache().getCancelCriterion().checkCancelInProgress(null);
+      }
+    }
+    return null;
+  }
+
+  private RegionEntry getFromHDFS(Object key, byte[] k, boolean forceOnHeap) throws IOException, ForceReattemptException {
+    SortedHoplogPersistedEvent ev;
+    try {
+      ev = (SortedHoplogPersistedEvent) owner.getHoplogOrganizer().read(k);
+    } catch (IOException e) {
+      owner.checkForPrimary();
+      throw e;
+    }
+    if (ev != null) {
+      if (logger.isTraceEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: got from hdfs ev:" + ev));
+      }
+      return getEntryFromEvent(key, ev, forceOnHeap, false);
+    }
+    return null;
+  }
+
+  /**
+   * set the versionTag on the newly faulted-in entry
+   */
+  private void setVersionTag(RegionEntry re, VersionTag versionTag) {
+    if (owner.concurrencyChecksEnabled) {
+      versionTag.setMemberID(
+            owner.getVersionVector().getCanonicalId(versionTag.getMemberID()));
+      VersionStamp versionedRe = (VersionStamp) re;
+      versionedRe.setVersions(versionTag);
+    }
+  }
+
+  private RegionEntry getFromHDFSQueue(Object key, byte[] k, boolean forceOnHeap) throws ForceReattemptException {
+    ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
+    if (q == null) return null;
+    HDFSGatewayEventImpl hdfsGatewayEvent = (HDFSGatewayEventImpl) q.get(owner.getPartitionedRegion(), k, owner.getId());
+    if (hdfsGatewayEvent != null) {
+      if (logger.isTraceEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: got from hdfs queue: " + hdfsGatewayEvent));
+      }
+      return getEntryFromEvent(key, hdfsGatewayEvent, forceOnHeap, false);
+    }
+    return null;
+  }
+
+  private ConcurrentParallelGatewaySenderQueue getHDFSQueue()
+      throws ForceReattemptException {
+    if (this.hdfsQueue == null) {
+      String asyncQId = this.owner.getPartitionedRegion().getHDFSEventQueueName();
+      final AsyncEventQueueImpl asyncQ =  (AsyncEventQueueImpl)this.owner.getCache().getAsyncEventQueue(asyncQId);
+      final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender();
+      AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor();
+      if (ep == null) return null;
+      hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue();
+    }
+    
+    // Check whether the queue has become primary here.
+    // There could be some time between bucket becoming a primary
+    // and underlying queue becoming a primary, so isPrimaryWithWait()
+    // waits for some time for the queue to become a primary on this member
+    final HDFSBucketRegionQueue brq = hdfsQueue.getBucketRegionQueue(
+        this.owner.getPartitionedRegion(), this.owner.getId());
+    if (brq != null) {
+      if (owner.getBucketAdvisor().isPrimary()
+          && !brq.getBucketAdvisor().isPrimaryWithWait()) {
+        InternalDistributedMember primaryHolder = brq.getBucketAdvisor()
+            .basicGetPrimaryMember();
+        throw new PrimaryBucketException("Bucket " + brq.getName()
+            + " is not primary. Current primary holder is " + primaryHolder);
+      }
+    }
+      
+    return hdfsQueue;
+  }
+
+  public RegionEntry getEntryFromEvent(Object key, HDFSGatewayEventImpl event, boolean forceOnHeap, boolean forUpdate) {
+    Object val;
+    if (event.getOperation().isDestroy()) {
+      val = Token.TOMBSTONE;
+    } else if (event.getOperation().isInvalidate()) {
+      val = Token.INVALID;
+    } else {
+      val = event.getValue();
+    }
+    RegionEntry re = null;
+    final TXStateInterface tx = owner.getTXState();
+    if (tx == null) {
+      re = createRegionEntry(key, val, event.getVersionTag(), forceOnHeap);
+      return re;
+    }
+    else
+    if (val != null) {
+      if (((re = this.backingRM.getEntryInVM(key)) == null)
+          || (re.isRemoved() && !re.isTombstone())) {
+        boolean shouldCreateOnHeapEntry = !(owner.getOffHeap() && forUpdate); 
+        re = createRegionEntry(key, val, event.getVersionTag(), shouldCreateOnHeapEntry);
+        if (forUpdate) {
+          if (re != null && tx != null) {
+            // put the region entry in backing CHM of AbstractRegionMap so that
+            // it can be locked in basicPut/destroy
+            RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re);
+            if (oldRe != null) {
+              if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
+                ((OffHeapRegionEntry)re).release();
+              }
+              return oldRe;
+            }
+            re.setMarkedForEviction();
+            owner.updateSizeOnCreate(key,
+                owner.calculateRegionEntryValueSize(re));
+            ((AbstractRegionMap)backingRM).incEntryCount(1);
+            ((AbstractRegionMap)backingRM).lruEntryCreate(re);
+          }
+        }
+      }
+    }
+    return re;
+  }
+
+  public RegionEntry getEntryFromEvent(Object key, SortedHoplogPersistedEvent event, boolean forceOnHeap, boolean forUpdate) {
+    Object val = getValueFromEvent(event);
+    RegionEntry re = null;
+    final TXStateInterface tx = owner.getTXState();
+    if (tx == null) {
+      re = createRegionEntry(key, val, event.getVersionTag(), forceOnHeap);
+      return re;
+    }
+    else // FOR TX case, we need to create off heap entry if required
+    if (val != null) {
+      if (((re = this.backingRM.getEntryInVM(key)) == null)
+          || (re.isRemoved() && !re.isTombstone())) {
+        boolean shouldCreateOnHeapEntry = !(owner.getOffHeap() && forUpdate); 
+        re = createRegionEntry(key, val, event.getVersionTag(), shouldCreateOnHeapEntry);
+        if(forUpdate) {
+          if (re != null && tx != null) {
+            // put the region entry in backing CHM of AbstractRegionMap so that
+            // it can be locked in basicPut/destroy
+            RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re);
+            if (oldRe != null) {
+              if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) {
+                ((OffHeapRegionEntry)re).release();
+              }
+              return oldRe;
+            }
+            re.setMarkedForEviction();
+            owner.updateSizeOnCreate(key,
+                owner.calculateRegionEntryValueSize(re));
+            ((AbstractRegionMap)backingRM).incEntryCount(1);
+            ((AbstractRegionMap)backingRM).lruEntryCreate(re);
+          }
+        }
+      }
+    }
+    return re;
+  }
+
+  private RegionEntry createRegionEntry(Object key, Object value, VersionTag tag, boolean forceOnHeap) {
+    RegionEntryFactory ref = backingRM.getEntryFactory();
+    if (forceOnHeap) {
+      ref = ref.makeOnHeap();
+    }
+    value = getValueDuringGII(key, value);
+    RegionEntry re = ref.createEntry(this.owner, key, value);
+    setVersionTag(re, tag);
+    if (re instanceof LRUEntry) {
+      assert backingRM instanceof AbstractLRURegionMap;
+      EnableLRU ccHelper = ((AbstractLRURegionMap)backingRM)._getCCHelper();
+      ((LRUEntry)re).updateEntrySize(ccHelper);
+    }
+    return re;
+  }
+
+  private Object getValueDuringGII(Object key, Object value) {
+    if (owner.getIndexUpdater() != null && !owner.isInitialized()) {
+      return AbstractRegionMap.listOfDeltasCreator.newValue(key, owner, value,
+          null);
+    }
+    return value;
+  }
+
+  private Set createEntriesSet(IteratorType type)
+      throws ForceReattemptException {
+    ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
+    if (q == null) return Collections.emptySet();
+    HDFSBucketRegionQueue brq = q.getBucketRegionQueue(this.owner.getPartitionedRegion(), owner.getId());
+    return new HDFSEntriesSet(owner, brq, owner.getHoplogOrganizer(), type, refs);
+  }
+
+  private void closeDeadIterators() {
+    Reference<? extends HDFSIterator> weak;
+    while ((weak = refs.poll()) != null) {
+      if (logger.isTraceEnabled() || DEBUG) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Closing weak ref for iterator "
+            + weak.get()));
+      }
+      weak.get().close();
+    }
+  }
+
+  /**
+   * gets the value from event, deserializing if necessary.
+   */
+  private Object getValueFromEvent(PersistedEventImpl ev) {
+    if (ev.getOperation().isDestroy()) {
+      return Token.TOMBSTONE;
+    } else if (ev.getOperation().isInvalidate()) {
+      return Token.INVALID;
+    }
+    return ev.getValue();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java
new file mode 100644
index 0000000..9336ed7
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/HDFSRegionMapImpl.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.util.Collection;
+
+import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.internal.size.SingleObjectSizer;
+
+/**
+ * Implementation of RegionMap that reads data from HDFS.
+ * 
+ */
+public class HDFSRegionMapImpl extends AbstractRegionMap implements HDFSRegionMap {
+
+  private final HDFSRegionMapDelegate delegate;
+
+  private static final boolean DEBUG = Boolean.getBoolean("hdfsRegionMap.DEBUG");
+
+  public HDFSRegionMapImpl(LocalRegion owner, Attributes attrs,
+      InternalRegionArguments internalRegionArgs) {
+    super(internalRegionArgs);
+    assert owner instanceof BucketRegion;
+    initialize(owner, attrs, internalRegionArgs, false);
+    this.delegate = new HDFSRegionMapDelegate(owner, attrs, internalRegionArgs, this);
+  }
+
+  @Override
+  public RegionEntry getEntry(Object key) {
+    return delegate.getEntry(key, null);
+  }
+
+  @Override
+  protected RegionEntry getEntry(EntryEventImpl event) {
+    return delegate.getEntry(event);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Collection<RegionEntry> regionEntries() {
+    return delegate.regionEntries();
+  }
+    
+  @Override
+  public int size() {
+    return delegate.size();
+  }
+    
+  @Override
+  public boolean isEmpty() {
+    return delegate.isEmpty();
+  }
+
+  @Override
+  public HDFSRegionMapDelegate getDelegate() {
+    return this.delegate;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
index bda5a27..36eee80 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalCache.java
@@ -20,6 +20,8 @@ package com.gemstone.gemfire.internal.cache;
 import java.util.Collection;
 
 import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSStoreDirector;
 import com.gemstone.gemfire.cache.query.internal.cq.CqService;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.internal.cache.extension.Extensible;
@@ -43,5 +45,7 @@ public interface InternalCache extends Cache, Extensible<Cache> {
 
   public CqService getCqService();
   
+  public Collection<HDFSStoreImpl> getHDFSStores() ;
+  
   public <T extends CacheService> T getService(Class<T> clazz);
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
index 0885477..e506f2e 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalDataView.java
@@ -39,22 +39,17 @@ public interface InternalDataView {
    * @param keyInfo
    * @param localRegion
    * @param updateStats
-   * @param disableCopyOnRead
-   * @param preferCD
+   * @param disableCopyOnRead 
+   * @param preferCD 
    * @param clientEvent TODO
    * @param returnTombstones TODO
    * @param retainResult if true then the result may be a retained off-heap reference
    * @return the object associated with the key
    */
   @Retained
-  Object getDeserializedValue(KeyInfo keyInfo,
-                              LocalRegion localRegion,
-                              boolean updateStats,
-                              boolean disableCopyOnRead,
-                              boolean preferCD,
-                              EntryEventImpl clientEvent,
-                              boolean returnTombstones,
-                              boolean retainResult);
+  Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
+      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, 
+      boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult);
 
   /**
    * @param event
@@ -187,8 +182,8 @@ public interface InternalDataView {
    * @return the Object associated with the key
    */
   Object findObject(KeyInfo key, LocalRegion r, boolean isCreate, boolean generateCallbacks,
-                    Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
-                    EntryEventImpl clientEvent, boolean returnTombstones);
+      Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS);
 
 
   /**
@@ -229,18 +224,13 @@ public interface InternalDataView {
    * 
    * @param localRegion
    * @param key
-   * @param doNotLockEntry
+   * @param doNotLockEntry 
    * @param requestingClient the client that made the request, or null if not from a client
    * @param clientEvent the client event, if any
    * @param returnTombstones TODO
    * @return the serialized value from the cache
    */
-  Object getSerializedValue(LocalRegion localRegion,
-                            KeyInfo key,
-                            boolean doNotLockEntry,
-                            ClientProxyMembershipID requestingClient,
-                            EntryEventImpl clientEvent,
-                            boolean returnTombstones) throws DataLocationException;
+  Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException;
 
   abstract void checkSupportsRegionDestroy() throws UnsupportedOperationInTransactionException;
   abstract void checkSupportsRegionInvalidate() throws UnsupportedOperationInTransactionException;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
index f7d46fe..41e763d 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/InternalRegionArguments.java
@@ -37,6 +37,7 @@ public final class InternalRegionArguments
   private boolean isUsedForPartitionedRegionAdmin;
   private boolean isUsedForSerialGatewaySenderQueue;
   private boolean isUsedForParallelGatewaySenderQueue;
+  private boolean isUsedForHDFSParallelGatewaySenderQueue = false;
   private int bucketRedundancy;
   private boolean isUsedForPartitionedRegionBucket;
   private RegionAdvisor partitionedRegionAdvisor;
@@ -272,11 +273,26 @@ public final class InternalRegionArguments
     this.isUsedForParallelGatewaySenderQueue = queueFlag;
     return this;
   }
+  public InternalRegionArguments setIsUsedForHDFSParallelGatewaySenderQueue(
+      boolean queueFlag) {
+    this.isUsedForHDFSParallelGatewaySenderQueue = queueFlag;
+    return this;
+  }
 
   public boolean isUsedForParallelGatewaySenderQueue() {
     return this.isUsedForParallelGatewaySenderQueue;
   }
   
+  public boolean isUsedForHDFSParallelGatewaySenderQueue() {
+    return this.isUsedForHDFSParallelGatewaySenderQueue;
+  }
+  
+  public boolean isReadWriteHDFSRegion() {
+    return isUsedForPartitionedRegionBucket()
+        && getPartitionedRegion().getHDFSStoreName() != null
+        && !getPartitionedRegion().getHDFSWriteOnly();
+  }
+
   public InternalRegionArguments setParallelGatewaySender(
       AbstractGatewaySender pgSender) {
     this.parallelGatewaySender = pgSender;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
index 3ad294c..b3de9b7 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
@@ -116,6 +116,11 @@ import com.gemstone.gemfire.cache.client.internal.ServerRegionProxy;
 import com.gemstone.gemfire.cache.control.ResourceManager;
 import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
+import com.gemstone.gemfire.cache.hdfs.internal.HoplogListenerForRegion;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
 import com.gemstone.gemfire.cache.partition.PartitionRegionHelper;
 import com.gemstone.gemfire.cache.query.FunctionDomainException;
 import com.gemstone.gemfire.cache.query.Index;
@@ -460,6 +465,10 @@ public class LocalRegion extends AbstractRegion
   // Lock for updating PR MetaData on client side 
   public final Lock clientMetaDataLock = new ReentrantLock();
   
+  
+  protected HdfsRegionManager hdfsManager;
+  protected HoplogListenerForRegion hoplogListener;
+
   /**
    * There seem to be cases where a region can be created and yet the
    * distributed system is not yet in place...
@@ -632,6 +641,7 @@ public class LocalRegion extends AbstractRegion
       }
     }
 
+    this.hdfsManager = initHDFSManager();
     this.dsi = findDiskStore(attrs, internalRegionArgs);
     this.diskRegion = createDiskRegion(internalRegionArgs);
     this.entries = createRegionMap(internalRegionArgs);
@@ -686,8 +696,22 @@ public class LocalRegion extends AbstractRegion
     
   }
 
+  private HdfsRegionManager initHDFSManager() {
+    HdfsRegionManager hdfsMgr = null;
+    if (this.getHDFSStoreName() != null) {
+      this.hoplogListener = new HoplogListenerForRegion();
+      HDFSRegionDirector.getInstance().setCache(cache);
+      hdfsMgr = HDFSRegionDirector.getInstance().manageRegion(this, 
+          this.getHDFSStoreName(), hoplogListener);
+    }
+    return hdfsMgr;
+  }
+
   private RegionMap createRegionMap(InternalRegionArguments internalRegionArgs) {
     RegionMap result = null;
+	if ((internalRegionArgs.isReadWriteHDFSRegion()) && this.diskRegion != null) {
+      this.diskRegion.setEntriesMapIncompatible(true);
+    }
     if (this.diskRegion != null) {
       result = this.diskRegion.useExistingRegionMap(this);
     }
@@ -953,6 +977,11 @@ public class LocalRegion extends AbstractRegion
           existing = (LocalRegion)this.subregions.get(subregionName);
 
           if (existing == null) {
+            // create the async queue for HDFS if required. 
+            HDFSIntegrationUtil.createAndAddAsyncQueue(regionPath,
+                regionAttributes, this.cache);
+            regionAttributes = cache.setEvictionAttributesForLargeRegion(
+                regionAttributes);
             if (regionAttributes.getScope().isDistributed()
                 && internalRegionArgs.isUsedForPartitionedRegionBucket()) {
               final PartitionedRegion pr = internalRegionArgs
@@ -962,8 +991,15 @@ public class LocalRegion extends AbstractRegion
               internalRegionArgs.setKeyRequiresRegionContext(pr
                   .keyRequiresRegionContext());
               if (pr.isShadowPR()) {
-                newRegion = new BucketRegionQueue(subregionName, regionAttributes,
-                  this, this.cache, internalRegionArgs);
+                if (!pr.isShadowPRForHDFS()) {
+                    newRegion = new BucketRegionQueue(subregionName, regionAttributes,
+                      this, this.cache, internalRegionArgs);
+                }
+                else {
+                   newRegion = new HDFSBucketRegionQueue(subregionName, regionAttributes,
+                      this, this.cache, internalRegionArgs);
+                }
+                
               } else {
                 newRegion = new BucketRegion(subregionName, regionAttributes,
                     this, this.cache, internalRegionArgs);  
@@ -1098,6 +1134,7 @@ public class LocalRegion extends AbstractRegion
       if (event.getEventId() == null && generateEventID()) {
         event.setNewEventId(cache.getDistributedSystem());
       }
+      assert event.isFetchFromHDFS() : "validatedPut() should have been called";
       // Fix for 42448 - Only make create with null a local invalidate for
       // normal regions. Otherwise, it will become a distributed invalidate.
       if (getDataPolicy() == DataPolicy.NORMAL) {
@@ -1224,20 +1261,18 @@ public class LocalRegion extends AbstractRegion
    * @param retainResult if true then the result may be a retained off-heap reference
    * @return the value for the given key
    */
-  public final Object getDeserializedValue(RegionEntry re,
-                                           final KeyInfo keyInfo,
-                                           final boolean updateStats,
-                                           boolean disableCopyOnRead,
-                                           boolean preferCD,
-                                           EntryEventImpl clientEvent,
-                                           boolean returnTombstones,
-                                           boolean retainResult) {
+  public final Object getDeserializedValue(RegionEntry re, final KeyInfo keyInfo, final boolean updateStats, boolean disableCopyOnRead, 
+  boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS, boolean retainResult) {
     if (this.diskRegion != null) {
       this.diskRegion.setClearCountReference();
     }
     try {
       if (re == null) {
-        re = this.entries.getEntry(keyInfo.getKey());
+        if (allowReadFromHDFS) {
+          re = this.entries.getEntry(keyInfo.getKey());
+        } else {
+          re = this.entries.getOperationalEntryInVM(keyInfo.getKey());
+        }
       }
       //skip updating the stats if the value is null
       // TODO - We need to clean up the callers of the this class so that we can
@@ -1347,7 +1382,7 @@ public class LocalRegion extends AbstractRegion
   public Object get(Object key, Object aCallbackArgument,
       boolean generateCallbacks, EntryEventImpl clientEvent) throws TimeoutException, CacheLoaderException
   {
-    Object result = get(key, aCallbackArgument, generateCallbacks, false, false, null, clientEvent, false);
+    Object result = get(key, aCallbackArgument, generateCallbacks, false, false, null, clientEvent, false, true/*allowReadFromHDFS*/);
     if (Token.isInvalid(result)) {
       result = null;
     }
@@ -1357,16 +1392,11 @@ public class LocalRegion extends AbstractRegion
   /*
    * @see BucketRegion#getSerialized(KeyInfo, boolean, boolean)
    */
-  public Object get(Object key,
-                    Object aCallbackArgument,
-                    boolean generateCallbacks,
-                    boolean disableCopyOnRead,
-                    boolean preferCD,
-                    ClientProxyMembershipID requestingClient,
-                    EntryEventImpl clientEvent,
-                    boolean returnTombstones) throws TimeoutException, CacheLoaderException {
+  public Object get(Object key, Object aCallbackArgument,
+	      boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
+	      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws TimeoutException, CacheLoaderException {
 	  return get(key, aCallbackArgument,
-		      generateCallbacks, disableCopyOnRead, preferCD,requestingClient, clientEvent, returnTombstones, false, false);
+		      generateCallbacks, disableCopyOnRead, preferCD,requestingClient, clientEvent, returnTombstones, false, allowReadFromHDFS, false);
   }
   
   /**
@@ -1388,17 +1418,16 @@ public class LocalRegion extends AbstractRegion
   public Object getRetained(Object key, Object aCallbackArgument,
       boolean generateCallbacks, boolean disableCopyOnRead,
       ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean opScopeIsLocal) throws TimeoutException, CacheLoaderException {
-    return get(key, aCallbackArgument, generateCallbacks, disableCopyOnRead, true, requestingClient, clientEvent, returnTombstones, opScopeIsLocal,
-      false /* see GEODE-1291*/);
+    return get(key, aCallbackArgument, generateCallbacks, disableCopyOnRead, true, requestingClient, clientEvent, returnTombstones, opScopeIsLocal, true, false/* see GEODE-1291*/);
   }
   /**
    * @param opScopeIsLocal if true then just check local storage for a value; if false then try to find the value if it is not local
    * @param retainResult if true then the result may be a retained off-heap reference.
    */
   public Object get(Object key, Object aCallbackArgument,
-                    boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
-                    ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones,
-                    boolean opScopeIsLocal, boolean retainResult) throws TimeoutException, CacheLoaderException
+      boolean generateCallbacks, boolean disableCopyOnRead, boolean preferCD,
+      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, 
+	  boolean opScopeIsLocal, boolean allowReadFromHDFS, boolean retainResult) throws TimeoutException, CacheLoaderException
   {
     assert !retainResult || preferCD;
     validateKey(key);
@@ -1411,8 +1440,7 @@ public class LocalRegion extends AbstractRegion
     boolean isMiss = true;
     try {
       KeyInfo keyInfo = getKeyInfo(key, aCallbackArgument);
-      Object value = getDataView().getDeserializedValue(keyInfo, this, true, disableCopyOnRead, preferCD, clientEvent, returnTombstones,
-        retainResult);
+      Object value = getDataView().getDeserializedValue(keyInfo, this, true, disableCopyOnRead, preferCD, clientEvent, returnTombstones, allowReadFromHDFS, retainResult);
       final boolean isCreate = value == null;
       isMiss = value == null || Token.isInvalid(value)
           || (!returnTombstones && value == Token.TOMBSTONE);
@@ -1425,13 +1453,13 @@ public class LocalRegion extends AbstractRegion
         // if scope is local and there is no loader, then
         // don't go further to try and get value
         if (!opScopeIsLocal
-            && ((getScope().isDistributed())
+            && ((getScope().isDistributed() && !isHDFSRegion())
                 || hasServerProxy()
                 || basicGetLoader() != null)) { 
           // serialize search/load threads if not in txn
           value = getDataView().findObject(keyInfo,
               this, isCreate, generateCallbacks, value, disableCopyOnRead,
-              preferCD, requestingClient, clientEvent, returnTombstones);
+              preferCD, requestingClient, clientEvent, returnTombstones, false/*allowReadFromHDFS*/);
           if (!returnTombstones && value == Token.TOMBSTONE) {
             value = null;
           }
@@ -1457,7 +1485,7 @@ public class LocalRegion extends AbstractRegion
    */
   final public void recordMiss(final RegionEntry re, Object key) {
     final RegionEntry e;
-    if (re == null && !isTX()) {
+    if (re == null && !isTX() && !isHDFSRegion()) {
       e = basicGetEntry(key);
     } else {
       e = re;
@@ -1466,30 +1494,60 @@ public class LocalRegion extends AbstractRegion
   }
 
   /**
+   * @return true if this region has been configured for HDFS persistence
+   */
+  public boolean isHDFSRegion() {
+    return false;
+  }
+
+  /**
+   * @return true if this region is configured to read and write data from HDFS
+   */
+  public boolean isHDFSReadWriteRegion() {
+    return false;
+  }
+
+  /**
+   * @return true if this region is configured to only write to HDFS
+   */
+  protected boolean isHDFSWriteOnly() {
+    return false;
+  }
+
+  /**
+   * FOR TESTING ONLY
+   */
+  public HoplogListenerForRegion getHoplogListener() {
+    return hoplogListener;
+  }
+  
+  /**
+   * FOR TESTING ONLY
+   */
+  public HdfsRegionManager getHdfsRegionManager() {
+    return hdfsManager;
+  }
+  
+  /**
    * optimized to only allow one thread to do a search/load, other threads wait
    * on a future
-   *  @param keyInfo
+   *
+   * @param keyInfo
    * @param p_isCreate
    *                true if call found no entry; false if updating an existing
    *                entry
    * @param generateCallbacks
    * @param p_localValue
-*                the value retrieved from the region for this object.
+   *                the value retrieved from the region for this object.
    * @param disableCopyOnRead if true then do not make a copy
    * @param preferCD true if the preferred result form is CachedDeserializable
    * @param clientEvent the client event, if any
    * @param returnTombstones whether to return tombstones
    */
   @Retained
-  Object nonTxnFindObject(KeyInfo keyInfo,
-                          boolean p_isCreate,
-                          boolean generateCallbacks,
-                          Object p_localValue,
-                          boolean disableCopyOnRead,
-                          boolean preferCD,
-                          ClientProxyMembershipID requestingClient,
-                          EntryEventImpl clientEvent,
-                          boolean returnTombstones)
+  Object nonTxnFindObject(KeyInfo keyInfo, boolean p_isCreate,
+      boolean generateCallbacks, Object p_localValue, boolean disableCopyOnRead, boolean preferCD,
+      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) 
       throws TimeoutException, CacheLoaderException
   {
     final Object key = keyInfo.getKey();
@@ -1548,8 +1606,7 @@ public class LocalRegion extends AbstractRegion
     try {
       boolean partitioned = this.getDataPolicy().withPartitioning();
       if (!partitioned) {
-        localValue = getDeserializedValue(null, keyInfo, isCreate, disableCopyOnRead, preferCD, clientEvent, false,
-          false);
+        localValue = getDeserializedValue(null, keyInfo, isCreate, disableCopyOnRead, preferCD, clientEvent, false, false/*allowReadFromHDFS*/, false);
 
         // stats have now been updated
         if (localValue != null && !Token.isInvalid(localValue)) {
@@ -1558,7 +1615,7 @@ public class LocalRegion extends AbstractRegion
         }
         isCreate = localValue == null;
         result = findObjectInSystem(keyInfo, isCreate, null, generateCallbacks,
-            localValue, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
+            localValue, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false/*allowReadFromHDFS*/);
 
       } else {
         
@@ -1566,7 +1623,7 @@ public class LocalRegion extends AbstractRegion
         // For PRs we don't want to deserialize the value and we can't use findObjectInSystem because
         // it can invoke code that is transactional.
         result = getSharedDataView().findObject(keyInfo, this, true/*isCreate*/, generateCallbacks,
-            localValue, disableCopyOnRead, preferCD, null, null, false);
+            localValue, disableCopyOnRead, preferCD, null, null, false, allowReadFromHDFS);
         // TODO why are we not passing the client event or returnTombstones in the above invokation?
       }
 
@@ -1749,6 +1806,7 @@ public class LocalRegion extends AbstractRegion
   public final EntryEventImpl newPutEntryEvent(Object key, Object value,
       Object aCallbackArgument) {
     EntryEventImpl ev = newUpdateEntryEvent(key, value, aCallbackArgument);
+    ev.setFetchFromHDFS(false);
     ev.setPutDML(true);
     return ev;
   }
@@ -1880,11 +1938,23 @@ public class LocalRegion extends AbstractRegion
     }
   }
 
+  protected boolean includeHDFSResults() {
+    return isUsedForPartitionedRegionBucket() 
+        && isHDFSReadWriteRegion() 
+        && getPartitionedRegion().includeHDFSResults();
+  }
+  
+
   /** a fast estimate of total number of entries locally in the region */
   public long getEstimatedLocalSize() {
     RegionMap rm;
     if (!this.isDestroyed) {
       long size;
+      if (isHDFSReadWriteRegion() && this.initialized) {
+        // this size is not used by HDFS region iterators
+        // fixes bug 49239
+        return 0;
+      }
       // if region has not been initialized yet, then get the estimate from
       // disk region's recovery map if available
       if (!this.initialized && this.diskRegion != null
@@ -2196,6 +2266,9 @@ public class LocalRegion extends AbstractRegion
       if (this.imageState.isClient() && !this.concurrencyChecksEnabled) {
         return result - this.imageState.getDestroyedEntriesCount();
       }
+	if (includeHDFSResults()) {
+      return result;
+    }
       return result - this.tombstoneCount.get();
     }
   }
@@ -2931,18 +3004,11 @@ public class LocalRegion extends AbstractRegion
    * @param clientEvent the client's event, if any.  If not null, we set the version tag
    * @param returnTombstones TODO
    * @return the deserialized value
-   * @see LocalRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean)
-   */
-  protected Object findObjectInSystem(KeyInfo keyInfo,
-                                      boolean isCreate,
-                                      TXStateInterface tx,
-                                      boolean generateCallbacks,
-                                      Object localValue,
-                                      boolean disableCopyOnRead,
-                                      boolean preferCD,
-                                      ClientProxyMembershipID requestingClient,
-                                      EntryEventImpl clientEvent,
-                                      boolean returnTombstones)
+   * @see DistributedRegion#findObjectInSystem(KeyInfo, boolean, TXStateInterface, boolean, Object, boolean, boolean, ClientProxyMembershipID, EntryEventImpl, boolean, boolean )
+   */
+  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
+      TXStateInterface tx, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean returnTombstones,  boolean allowReadFromHDFS)
       throws CacheLoaderException, TimeoutException
   {
     final Object key = keyInfo.getKey();
@@ -5317,6 +5383,9 @@ public class LocalRegion extends AbstractRegion
     // Notify bridge clients (if this is a BridgeServer)
     event.setEventType(eventType);
     notifyBridgeClients(event);
+  if (this.hdfsStoreName != null) {
+    notifyGatewaySender(eventType, event);
+    }
     if(callDispatchListenerEvent){
       dispatchListenerEvent(eventType, event);
     }
@@ -7202,8 +7271,24 @@ public class LocalRegion extends AbstractRegion
     if (generateEventID()) {
       event.setNewEventId(cache.getDistributedSystem());
     }
+    event.setFetchFromHDFS(false);
+    return event;
+  }
+  
+  @Retained
+  protected EntryEventImpl generateCustomEvictDestroyEvent(final Object key) {
+    @Retained EntryEventImpl event =  EntryEventImpl.create(
+        this, Operation.CUSTOM_EVICT_DESTROY, key, null/* newValue */,
+        null, false, getMyId());
+    
+    // Fix for bug#36963
+    if (generateEventID()) {
+      event.setNewEventId(cache.getDistributedSystem());
+    }
+    event.setFetchFromHDFS(false);
     return event;
   }
+  
   /**
    * @return true if the evict destroy was done; false if it was not needed
    */
@@ -9856,6 +9941,8 @@ public class LocalRegion extends AbstractRegion
       }
     }
     
+    clearHDFSData();
+    
     if (!isProxy()) {
       // Now we need to recreate all the indexes.
       //If the indexManager is null we don't have to worry
@@ -9894,6 +9981,11 @@ public class LocalRegion extends AbstractRegion
     }
   }
 
+  /**Clear HDFS data, if present */
+  protected void clearHDFSData() {
+    //do nothing, clear is implemented for subclasses like BucketRegion.
+  }
+
   @Override
   void basicLocalClear(RegionEventImpl rEvent)
   {
@@ -10670,6 +10762,7 @@ public class LocalRegion extends AbstractRegion
   }
     public final DistributedPutAllOperation newPutAllForPUTDmlOperation(Map<?, ?> map, Object callbackArg) {
     DistributedPutAllOperation dpao = newPutAllOperation(map, callbackArg);
+    dpao.getEvent().setFetchFromHDFS(false);
     dpao.getEvent().setPutDML(true);
     return dpao;
   }
@@ -10725,6 +10818,7 @@ public class LocalRegion extends AbstractRegion
         putallOp, this, Operation.PUTALL_CREATE, key, value);
 
     try {
+	event.setFetchFromHDFS(putallOp.getEvent().isFetchFromHDFS());
     event.setPutDML(putallOp.getEvent().isPutDML());
     
     if (tagHolder != null) {
@@ -12827,6 +12921,22 @@ public class LocalRegion extends AbstractRegion
   public Integer getCountNotFoundInLocal() {
     return countNotFoundInLocal.get();
   }
+  /// End of Variables and methods for test Hook for HDFS ///////
+  public void forceHDFSCompaction(boolean isMajor, Integer maxWaitTime) {
+    throw new UnsupportedOperationException(
+        LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
+            .toLocalizedString(getName()));
+  }
+
+  public void flushHDFSQueue(int maxWaitTime) {
+    throw new UnsupportedOperationException(
+        LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE
+            .toLocalizedString(getName()));
+  }
+  
+  public long lastMajorHDFSCompaction() {
+    throw new UnsupportedOperationException();
+  }
 
   public static void simulateClearForTests(boolean flag) {
     simulateClearForTests = flag;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
index c26ff10..5193a17 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegionDataView.java
@@ -17,6 +17,7 @@
 package com.gemstone.gemfire.internal.cache;
 
 import java.util.Collection;
+import java.util.Iterator;
 import java.util.Set;
 
 import com.gemstone.gemfire.cache.EntryNotFoundException;
@@ -35,16 +36,9 @@ public class LocalRegionDataView implements InternalDataView {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getDeserializedValue(java.lang.Object, com.gemstone.gemfire.internal.cache.LocalRegion, boolean)
    */
-  public Object getDeserializedValue(KeyInfo keyInfo,
-                                     LocalRegion localRegion,
-                                     boolean updateStats,
-                                     boolean disableCopyOnRead,
-                                     boolean preferCD,
-                                     EntryEventImpl clientEvent,
-                                     boolean returnTombstones,
-                                     boolean retainResult) {
-    return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones,
-      retainResult);
+  public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion,
+      boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadsFromHDFS, boolean retainResult) {
+    return localRegion.getDeserializedValue(null, keyInfo, updateStats, disableCopyOnRead, preferCD, clientEvent, returnTombstones, allowReadsFromHDFS, retainResult);
   }
 
   /* (non-Javadoc)
@@ -142,17 +136,9 @@ public class LocalRegionDataView implements InternalDataView {
   /* (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#findObject(com.gemstone.gemfire.internal.cache.LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object)
    */
-  public Object findObject(KeyInfo keyInfo,
-                           LocalRegion r,
-                           boolean isCreate,
-                           boolean generateCallbacks,
-                           Object value,
-                           boolean disableCopyOnRead,
-                           boolean preferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent,
-                           boolean returnTombstones) {
-   return r.nonTxnFindObject(keyInfo, isCreate, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones);
+  public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate,
+      boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) {
+   return r.nonTxnFindObject(keyInfo, isCreate, generateCallbacks, value, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, allowReadFromHDFS);
   }
 
   /* (non-Javadoc)
@@ -194,12 +180,7 @@ public class LocalRegionDataView implements InternalDataView {
    * (non-Javadoc)
    * @see com.gemstone.gemfire.internal.cache.InternalDataView#getSerializedValue(com.gemstone.gemfire.internal.cache.BucketRegion, java.lang.Object, java.lang.Object)
    */
-  public Object getSerializedValue(LocalRegion localRegion,
-                                   KeyInfo key,
-                                   boolean doNotLockEntry,
-                                   ClientProxyMembershipID requestingClient,
-                                   EntryEventImpl clientEvent,
-                                   boolean returnTombstones) throws DataLocationException {
+  public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws DataLocationException {
     throw new IllegalStateException();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
index 4c1fa7f..bb83383 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/NonLocalRegionEntry.java
@@ -461,6 +461,26 @@ public class NonLocalRegionEntry implements RegionEntry, VersionStamp {
   }
 
   @Override
+  public boolean isMarkedForEviction() {
+    throw new UnsupportedOperationException(LocalizedStrings
+        .PartitionedRegion_NOT_APPROPRIATE_FOR_PARTITIONEDREGIONNONLOCALREGIONENTRY
+            .toLocalizedString());
+  }
+  @Override
+  public void setMarkedForEviction() {
+    throw new UnsupportedOperationException(LocalizedStrings
+        .PartitionedRegion_NOT_APPROPRIATE_FOR_PARTITIONEDREGIONNONLOCALREGIONENTRY
+            .toLocalizedString());
+  }
+
+  @Override
+  public void clearMarkedForEviction() {
+    throw new UnsupportedOperationException(LocalizedStrings
+        .PartitionedRegion_NOT_APPROPRIATE_FOR_PARTITIONEDREGIONNONLOCALREGIONENTRY
+            .toLocalizedString());
+  }
+
+  @Override
   public boolean isValueNull() {
     return (null == getValueAsToken());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
index 4728594..fe8813e 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
@@ -7384,6 +7384,19 @@ public final class Oplog implements CompactableOplog, Flushable {
       // TODO Auto-generated method stub
     }
     @Override
+    public boolean isMarkedForEviction() {
+      // TODO Auto-generated method stub
+      return false;
+    }
+    @Override
+    public void setMarkedForEviction() {
+      // TODO Auto-generated method stub
+    }
+    @Override
+    public void clearMarkedForEviction() {
+      // TODO Auto-generated method stub
+    }
+    @Override
     public boolean isInvalid() {
       // TODO Auto-generated method stub
       return false;


[04/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
new file mode 100644
index 0000000..7b45952
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
@@ -0,0 +1,904 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.HoplogCompactor;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class TieredCompactionJUnitTest extends BaseHoplogTestCase {
+  static long ONE_MB = 1024 * 1024;
+  static long TEN_MB = 10 * ONE_MB;
+  
+  @Override
+  protected void configureHdfsStoreFactory() throws Exception {
+    super.configureHdfsStoreFactory();
+    
+    hsf.setInputFileCountMin(3);
+    hsf.setMinorCompaction(false);
+    hsf.setMajorCompaction(false);
+  }
+  
+  public void testMinorCompaction() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    // #1
+    ArrayList<QueuedPersistentEvent> items = new ArrayList<QueuedPersistentEvent>();
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("2", "1"));
+    items.add(new TestEvent("3", "1"));
+    items.add(new TestEvent("4", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    // #2
+    items.clear();
+    items.add(new TestEvent("2", "1"));
+    items.add(new TestEvent("4", "1"));
+    items.add(new TestEvent("6", "1"));
+    items.add(new TestEvent("8", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    // #3
+    items.clear();
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("3", "1"));
+    items.add(new TestEvent("5", "1"));
+    items.add(new TestEvent("7", "1"));
+    items.add(new TestEvent("9", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    // #4
+    items.clear();
+    items.add(new TestEvent("0", "1"));
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("4", "1"));
+    items.add(new TestEvent("5", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    // check file existence in bucket directory, expect 4 hoplgos
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(4, hoplogs.length);
+
+    // After compaction expect 1 hoplog only. It should have the same sequence number as that of the
+    // youngest file compacted, which should be 4 in this case
+    organizer.getCompactor().compact(false, false);
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
+    assertEquals(1, hoplogs.length);
+    assertEquals(1, organizer.getSortedOplogs().size());
+    Hoplog hoplog = new HFileSortedOplog(hdfsStore, hoplogs[0].getPath(), blockCache, stats, storeStats);
+    assertEquals(4, HdfsSortedOplogOrganizer.getSequenceNumber(hoplog));
+
+    // iterate on oplogs to validate data in files
+    HoplogSetIterator iter = new HoplogSetIterator(organizer.getSortedOplogs());
+    // the iteration pattern for this test should be 0-9:
+    // 0 1 4 5 oplog #4
+    // 1 3 5 7 9 oplog #3
+    // 2 4 6 8 oplog #2
+    // 1 2 3 4 oplog #1
+    int count = 0;
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
+      count++;
+    }
+    assertEquals(10, count);
+
+    // there must be 4 expired hoplogs now
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(4, hoplogs.length);
+    organizer.close();
+  }
+  
+  public void testIterativeMinorCompaction() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    // #1
+    ArrayList<QueuedPersistentEvent> items = new ArrayList<QueuedPersistentEvent>();
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("2", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent("1", "2"));
+    items.add(new TestEvent("3", "2"));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent("4", "3"));
+    items.add(new TestEvent("5", "3"));
+    organizer.flush(items.iterator(), items.size());
+    
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(3, hoplogs.length);
+
+    organizer.getCompactor().compact(false, false);
+    
+    FileStatus[] expired = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(3, expired.length);
+    FileStatus[] valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
+    assertEquals(0, valids.length);
+    // After compaction expect 1 hoplog only.
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
+    assertEquals(1, hoplogs.length);
+    
+    items.clear();
+    items.add(new TestEvent("4", "4"));
+    items.add(new TestEvent("6", "4"));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent("7", "5"));
+    items.add(new TestEvent("8", "5"));
+    organizer.flush(items.iterator(), items.size());
+    
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(5, hoplogs.length);
+    
+    organizer.getCompactor().compact(false, false);
+    expired = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(6, expired.length);
+    valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
+    assertEquals(0, valids.length);    
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
+    assertEquals(2, hoplogs.length);
+    valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
+    assertEquals(1, valids.length);
+    
+    assertEquals("2", organizer.read(BlobHelper.serializeToBlob("1")).getValue());
+    assertEquals("1", organizer.read(BlobHelper.serializeToBlob("2")).getValue());
+    assertEquals("2", organizer.read(BlobHelper.serializeToBlob("3")).getValue());
+    assertEquals("4", organizer.read(BlobHelper.serializeToBlob("4")).getValue());
+    assertEquals("3", organizer.read(BlobHelper.serializeToBlob("5")).getValue());
+    assertEquals("4", organizer.read(BlobHelper.serializeToBlob("6")).getValue());
+    assertEquals("5", organizer.read(BlobHelper.serializeToBlob("7")).getValue());
+    organizer.close();
+  }
+
+  public void testMajorCompactionWithDelete() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    // #1
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("2", "1"));
+    items.add(new TestEvent("3", "1"));
+    items.add(new TestEvent("4", "1"));
+    items.add(new TestEvent("4", "10", Operation.DESTROY));
+    organizer.flush(items.iterator(), items.size());
+
+    // #2
+    items.clear();
+    items.add(new TestEvent("2", "1", Operation.DESTROY));
+    items.add(new TestEvent("4", "1", Operation.DESTROY));
+    items.add(new TestEvent("6", "1", Operation.INVALIDATE));
+    items.add(new TestEvent("8", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    // #3
+    items.clear();
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("3", "1"));
+    items.add(new TestEvent("5", "1"));
+    items.add(new TestEvent("7", "1"));
+    items.add(new TestEvent("9", "1", Operation.DESTROY));
+    organizer.flush(items.iterator(), items.size());
+
+    // #4
+    items.clear();
+    items.add(new TestEvent("0", "1", Operation.DESTROY));
+    items.add(new TestEvent("1", "1"));
+    items.add(new TestEvent("4", "1"));
+    items.add(new TestEvent("5", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    // check file existence in bucket directory, expect 4 hoplgos
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(4, hoplogs.length);
+
+    // After compaction expect 1 hoplog only. It should have the same sequence number as that of the
+    // youngest file compacted, which should be 4 in this case
+    organizer.getCompactor().compact(true, false);
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(1, hoplogs.length);
+    assertEquals(1, organizer.getSortedOplogs().size());
+    Hoplog hoplog = new HFileSortedOplog(hdfsStore, hoplogs[0].getPath(), blockCache, stats, storeStats);
+    assertEquals(4, HdfsSortedOplogOrganizer.getSequenceNumber(hoplog));
+
+    // iterate on oplogs to validate data in files
+    HoplogSetIterator iter = new HoplogSetIterator(organizer.getSortedOplogs());
+    int count = 0;
+
+    // entries in () are destroyed or invalidated
+    // 1, 2, 3, 4, (11)
+    // (2), (4), (6), 8
+    // 1, 3, 5, 7, (9)
+    // (0), 1, 4, 5
+    String[] expectedValues = { "1", "3", "4", "5", "7", "8" };
+    for (ByteBuffer keyBB = null; iter.hasNext();) {
+      keyBB = iter.next();
+      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
+      assertEquals(expectedValues[count], BlobHelper.deserializeBlob(key));
+      count++;
+    }
+    assertEquals(6, count);
+
+    // there must be 4 expired hoplogs now
+    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(4, hoplogs.length);
+    organizer.close();
+  }
+  
+  public void testGainComputation() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
+    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 10; i++) {
+      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB)));
+    }    
+
+    // each read has cost 3. Four files read cost is 3 * 4. Reduce read cost of
+    // file after compaction
+    float expect = (float) ((3 * 4.0 - 3) / (20 + 30 + 40 + 50));
+    float result = bucket.computeGain(2, 5, targets);
+    assertTrue(Math.abs(expect - result) < (expect/1000));
+    
+    // each read has cost 3 except 10MB file with read cost 2. 9 files read cost
+    // is 3 * 9. Reduce read cost of file after compaction.
+    expect = (float) ((3 * 9 - 3 - 1.0) / (10 + 20 + 30 + 40 + 50 + 60 + 70 + 80 + 90));
+    result = bucket.computeGain(0, 9, targets);
+    assertTrue(Math.abs(expect - result) < (expect/1000));
+  }
+
+  public void testGainComputeSmallFile() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
+    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
+    
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 10; i++) {
+      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB / 1024)));
+    }
+
+    float result = bucket.computeGain(2, 5, targets);
+    assertTrue(Math.abs(8.0 - result) < (1.0/1000));
+  }
+  
+  public void testGainComputeMixedFiles() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
+    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
+    
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 10; i++) {
+      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB / 1024)));
+    }
+    TestHoplog midHop = (TestHoplog) targets.get(4).get();
+    // one more than other files
+    midHop.size = 5  * TEN_MB;
+    
+    float expect = (float) ((4 * 2 - 3 + 1.0) / 50);
+    float result = bucket.computeGain(2, 5, targets);
+    System.out.println(expect);
+    System.out.println(result);
+    assertTrue(Math.abs(expect - result) < (expect/1000));
+  }
+  
+  public void testGainComputeBadRatio() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
+    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 10; i++) {
+      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB)));
+    }
+
+    TestHoplog firstHop = (TestHoplog) targets.get(2).get();
+    // one more than other files
+    firstHop.size = (1 + 30 + 40 + 50)  * TEN_MB;
+    Float result = bucket.computeGain(2, 5, targets);
+    assertNull(result);
+  }
+  
+  public void testMinorCompactionTargetMaxSize() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+
+    ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
+    for (int i = 0; i < 5; i++) {
+      TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
+      hop.increment();
+      targets.add(hop);
+    }
+    TrackedReference<TestHoplog> oldestHop = targets.get(targets.size() - 1);
+    TestHoplog thirdHop = (TestHoplog) targets.get(2).get();
+
+    // oldest is more than max size is ignored 
+    oldestHop.get().size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
+    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(4, list.size());
+    for (TrackedReference<Hoplog> ref : list) {
+      assertTrue(((TestHoplog)ref.get()).size - TEN_MB < 5 );
+    }
+    
+    // third is more than max size but is not ignored
+    thirdHop.size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
+    oldestHop.increment();
+    list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(4, list.size());
+    int i = 0;
+    for (TrackedReference<Hoplog> ref : list) {
+      if (i != 2) {
+        assertTrue(((TestHoplog) ref.get()).size - TEN_MB < 5);
+      } else {
+        assertTrue(((TestHoplog) ref.get()).size > HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB);
+      }
+      i++;
+    }
+  }
+  
+  public void testAlterMaxInputFileSize() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+
+    assertTrue(TEN_MB * 2 < hdfsStore.getInputFileSizeMax() * ONE_MB);
+    
+    ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
+    for (int i = 0; i < 5; i++) {
+      TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
+      hop.increment();
+      targets.add(hop);
+    }
+    
+    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(targets.size(), list.size());
+    
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    mutator.setInputFileSizeMax(1);
+    hdfsStore.alter(mutator);
+    
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(0, list.size());
+  }
+  
+  public void testAlterInputFileCount() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+    
+    assertTrue(2 < hdfsStore.getInputFileCountMax());
+    
+    ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
+    for (int i = 0; i < 5; i++) {
+      TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
+      hop.increment();
+      targets.add(hop);
+    }
+    
+    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(targets.size(), list.size());
+    
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    mutator.setInputFileCountMax(2);
+    mutator.setInputFileCountMin(2);
+    hdfsStore.alter(mutator);
+    
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(2, list.size());
+  }
+  
+  public void testAlterMajorCompactionInterval() throws Exception {
+    final AtomicInteger majorCReqCount = new AtomicInteger(0);
+    
+    final Compactor compactor = new AbstractCompactor() {
+      @Override
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        majorCReqCount.incrementAndGet();
+        return true;
+      }
+    };
+    
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
+      @Override
+      public synchronized Compactor getCompactor() {
+        return compactor;
+      }
+    };
+
+    // create hoplog in the past, 90 seconds before current time
+    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, ONE_MB, System.currentTimeMillis() - 90000));
+    TimeUnit.MILLISECONDS.sleep(50);
+    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, ONE_MB, System.currentTimeMillis() - 90000));
+    
+    alterMajorCompaction(hdfsStore, true);
+    
+    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
+    assertEquals(2, hoplogs.size());
+    
+    organizer.performMaintenance();
+    TimeUnit.MILLISECONDS.sleep(100);
+    assertEquals(0, majorCReqCount.get());
+    
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    mutator.setMajorCompactionInterval(1);
+    hdfsStore.alter(mutator);
+    
+    organizer.performMaintenance();
+    TimeUnit.MILLISECONDS.sleep(100);
+    assertEquals(1, majorCReqCount.get());
+  }
+
+  public void testMinorCompactionTargetMinCount() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+    
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 2; i++) {
+      TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
+      hop.increment();
+      targets.add(hop);
+    }
+    compactor.getMinorCompactionTargets(targets, -1);
+    assertEquals(0, targets.size());
+  }
+  
+  public void testMinorCompactionLessTargetsStatsUpdate() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent("1", "1"));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent("2", "2", Operation.DESTROY));
+    organizer.flush(items.iterator(), items.size());
+    
+    TimeUnit.SECONDS.sleep(1);
+    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
+    assertEquals(2, hoplogs.size());
+    
+    organizer.performMaintenance();
+    hoplogs = organizer.getSortedOplogs();
+    assertEquals(2, hoplogs.size());
+  }
+  
+  public void testMinorCompactionTargetsOptimizer() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 6; i++) {
+      TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
+      hop.increment();
+      targets.add(hop);
+    }
+    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(6, list.size());
+    
+    TestHoplog fifthHop = (TestHoplog) targets.get(4).get();
+    // fifth hop needs additional block read as it has more than max keys size 
+    fifthHop.size = (HdfsSortedOplogOrganizer.AVG_NUM_KEYS_PER_INDEX_BLOCK * 5 + 1) * 64 * 1024;
+    list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(4, list.size());
+    for (TrackedReference<Hoplog> ref : list) {
+      assertTrue(((TestHoplog)ref.get()).size - TEN_MB < 4 );
+    }
+  }
+  
+  public void testTargetsReleasedBadRatio() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+
+    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
+    for (int i = 0; i < 3; i++) {
+      TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
+      hop.increment();
+      targets.add(hop);
+    }
+    TestHoplog oldestHop = (TestHoplog) targets.get(2).get();
+    oldestHop.size = (1 + 30)  * TEN_MB;
+    
+    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
+    compactor.getMinorCompactionTargets(list, -1);
+    assertEquals(0, list.size());
+    assertEquals(3, targets.size());
+    for (TrackedReference<Hoplog> ref : targets) {
+      assertEquals(0, ref.uses());
+    }
+  }
+  
+  public void testMinorCTargetsIgnoreMajorC() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 7; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+    List<TrackedReference<Hoplog>> targets = organizer.getSortedOplogs();
+    compactor.getMinorCompactionTargets(targets, -1);
+    assertEquals(7, targets.size());
+    
+    targets = organizer.getSortedOplogs();
+    for (TrackedReference<Hoplog> ref : targets) {
+      ref.increment();
+    }
+    compactor.getMinorCompactionTargets(targets, 2);
+    assertEquals((7 - 2), targets.size());
+    targets = organizer.getSortedOplogs();
+    for (int i = 0; i < targets.size(); i++) {
+      if (i + 1 <= (7 - 2)) {
+        assertEquals(1, targets.get(i).uses());
+      } else {
+        assertEquals(0, targets.get(i).uses());
+      }
+    }
+    
+    targets = organizer.getSortedOplogs();
+    for (TrackedReference<Hoplog> ref : targets) {
+      if (ref.uses() == 0) {
+        ref.increment();
+      }
+      assertEquals(1, ref.uses());
+    }
+    compactor.getMinorCompactionTargets(targets, 7);
+    assertEquals(0, targets.size());
+    
+    targets = organizer.getSortedOplogs();
+    for (int i = 0; i < targets.size(); i++) {
+      assertEquals(0, targets.get(i).uses());
+    }
+  }
+  
+  public void testTargetOverlap() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 7; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+    List<TrackedReference<Hoplog>> targets = organizer.getSortedOplogs();
+    assertTrue(compactor.isMinorMajorOverlap(targets, 8));
+    assertTrue(compactor.isMinorMajorOverlap(targets, 7));
+    assertTrue(compactor.isMinorMajorOverlap(targets, 6));
+    assertTrue(compactor.isMinorMajorOverlap(targets, 1));
+    assertFalse(compactor.isMinorMajorOverlap(targets, 0));
+    assertFalse(compactor.isMinorMajorOverlap(targets, -1));
+    
+    targets.remove(targets.size() -1); // remove the last one 
+    targets.remove(targets.size() -1); // remove the last one again
+    assertFalse(compactor.isMinorMajorOverlap(targets, 1));
+    assertFalse(compactor.isMinorMajorOverlap(targets, 2));
+    assertTrue(compactor.isMinorMajorOverlap(targets, 3));
+    
+    targets.remove(3); // remove from the middle, seq num 4
+    assertTrue(compactor.isMinorMajorOverlap(targets, 4));
+    assertTrue(compactor.isMinorMajorOverlap(targets, 3));
+  }
+  
+  public void testSuspendMinorByMajor() throws Exception {
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 5; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+
+    Hoplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir + "/"
+        + getName() + "-" + System.currentTimeMillis() + "-1.ihop.tmp"), blockCache, stats, storeStats);
+    compactor.fillCompactionHoplog(false, organizer.getSortedOplogs(), hoplog, -1);
+    
+    cache.getLogger().info("<ExpectedException action=add>java.lang.InterruptedException</ExpectedException>");
+    try {
+      compactor.maxMajorCSeqNum.set(3);
+      compactor.fillCompactionHoplog(false, organizer.getSortedOplogs(), hoplog, -1);
+      fail();
+    } catch (InterruptedException e) {
+      // expected
+    }
+    cache.getLogger().info("<ExpectedException action=remove>java.lang.InterruptedException</ExpectedException>");
+    organizer.close();
+  }
+  
+  public void testMajorCompactionSetsSeqNum() throws Exception {
+    final CountDownLatch compactionStartedLatch = new CountDownLatch(1);
+    final CountDownLatch waitLatch = new CountDownLatch(1);
+    class MyOrganizer extends HdfsSortedOplogOrganizer {
+      final HoplogCompactor compactor = new MyCompactor();
+      public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
+        super(region, bucketId);
+      }
+      public synchronized Compactor getCompactor() {
+        return compactor;
+      }
+      class MyCompactor extends HoplogCompactor {
+        @Override
+        public long fillCompactionHoplog(boolean isMajor,
+            List<TrackedReference<Hoplog>> targets, Hoplog output,
+            int majorCSeqNum) throws IOException, InterruptedException {
+          compactionStartedLatch.countDown();
+          waitLatch.await();
+          long byteCount = 0;
+          try {
+            byteCount = super.fillCompactionHoplog(isMajor, targets, output, majorCSeqNum);
+          } catch (ForceReattemptException e) {
+            // we do not expect this in a unit test. 
+          }
+          return byteCount;
+        }
+      }
+    }
+    
+    final HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 3; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    Thread t = new Thread(new Runnable() {
+      public void run() {
+        try {
+          organizer.getCompactor().compact(true, false);
+        } catch (IOException e) {
+          e.printStackTrace();
+        }
+      }
+    });
+    t.start();
+    compactionStartedLatch.await();
+    assertEquals(3, ((HoplogCompactor)organizer.getCompactor()).maxMajorCSeqNum.get());
+    waitLatch.countDown();
+    t.join();
+  }
+  
+  public void testMinorWatchesMajorsSeqNum() throws Exception {
+    final CountDownLatch majorCStartedLatch = new CountDownLatch(1);
+    final CountDownLatch majorCWaitLatch = new CountDownLatch(1);
+    
+    final CountDownLatch minorCStartedLatch = new CountDownLatch(1);
+    final List<TrackedReference<Hoplog>> minorTargets = new ArrayList<TrackedReference<Hoplog>>();
+    
+    class MyOrganizer extends HdfsSortedOplogOrganizer {
+      final HoplogCompactor compactor = new MyCompactor();
+      public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
+        super(region, bucketId);
+      }
+      public synchronized Compactor getCompactor() {
+        return compactor;
+      }
+      class MyCompactor extends HoplogCompactor {
+        @Override
+        public long fillCompactionHoplog(boolean isMajor,
+            List<TrackedReference<Hoplog>> targets, Hoplog output,
+            int majorCSeqNum) throws IOException, InterruptedException {
+          if (isMajor) {
+            majorCStartedLatch.countDown();
+            majorCWaitLatch.await();
+          } else {
+            minorCStartedLatch.countDown();
+            minorTargets.addAll(targets);
+          }
+          long byteCount =0;
+          try {
+            byteCount = super.fillCompactionHoplog(isMajor, targets, output, majorCSeqNum);
+          } catch (ForceReattemptException e) {
+            // we do not expect this in a unit test. 
+          }
+          return byteCount;
+        }
+      }
+    }
+    
+    final HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 3; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    Thread majorCThread = new Thread(new Runnable() {
+      public void run() {
+        try {
+          organizer.getCompactor().compact(true, false);
+        } catch (IOException e) {
+          e.printStackTrace();
+        }
+      }
+    });
+    majorCThread.start();
+    majorCStartedLatch.await();
+    assertEquals(3, ((HoplogCompactor)organizer.getCompactor()).maxMajorCSeqNum.get());
+
+    // create more files for minor C
+    for (int i = 0; i < 4; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+    
+    Thread minorCThread = new Thread(new Runnable() {
+      public void run() {
+        try {
+          organizer.getCompactor().compact(false, false);
+        } catch (IOException e) {
+          e.printStackTrace();
+        }
+      }
+    });
+    minorCThread.start();
+    minorCThread.join();
+    assertEquals(4, minorTargets.size());
+    for (TrackedReference<Hoplog> ref : minorTargets) {
+      assertTrue(organizer.getSequenceNumber(ref.get()) >= 4);
+    }
+    
+    majorCWaitLatch.countDown();
+    majorCThread.join();
+  }
+  
+  public void testTimeBoundedSuspend() throws Exception {
+    final AtomicBoolean barrier = new AtomicBoolean(true);
+    
+    class MyOrganizer extends HdfsSortedOplogOrganizer {
+      public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
+        super(region, bucketId);
+      }
+      public synchronized Compactor getCompactor() {
+        return new MyCompactor();
+      }
+      class MyCompactor extends HoplogCompactor {
+        public long fillCompactionHoplog(boolean isMajor, List<TrackedReference<Hoplog>> targets, Hoplog output)
+            throws IOException, InterruptedException {
+          barrier.set(false);
+          TimeUnit.SECONDS.sleep(5 * HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
+          long byteCount =0;
+          try {
+            byteCount = super.fillCompactionHoplog(isMajor, targets, output, -1);
+          } catch (ForceReattemptException e) {
+            // we do not expect this in a unit test. 
+          }
+          return byteCount;
+        }
+      }
+    }
+    
+    HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 4; i++) {
+      items.clear();
+      items.add(new TestEvent("1" + i, "1" + i));
+      organizer.flush(items.iterator(), items.size());
+    }
+
+    final HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
+    ExecutorService service = Executors.newCachedThreadPool();
+    service.execute(new Runnable() {
+      public void run() {
+        try {
+          compactor.compact(false, false);
+        } catch (Exception e) {
+        }
+      }
+    });
+    
+    final AtomicLong start = new AtomicLong(0);
+    final AtomicLong end = new AtomicLong(0);
+    service.execute(new Runnable() {
+      public void run() {
+        while (barrier.get()) {
+          try {
+            TimeUnit.MILLISECONDS.sleep(50);
+          } catch (InterruptedException e) {
+            e.printStackTrace();
+          }
+        }
+        
+        start.set(System.currentTimeMillis());
+        compactor.suspend();
+        end.set(System.currentTimeMillis());
+      }
+    });
+    
+    for (long i = 0; i < 5; i++) {
+      if (end.get() == 0) {
+        TimeUnit.MILLISECONDS.sleep(HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT / 2);
+      } else {
+        break;
+      }
+    }
+    
+    assertTrue(end.get() - start.get() < 100 + HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
+  }
+  
+  public static class TestHoplog extends AbstractHoplog {
+    long size;
+    long creationTime;
+    TestHoplog(HDFSStoreImpl store, long size) throws IOException {
+      this(store, size, 0);
+    }
+    
+    TestHoplog(HDFSStoreImpl store, long size, long creationTime) throws IOException {
+      super(store, new Path("1-" + creationTime + "-1.hop"), null);
+      this.size = size;
+      this.creationTime = creationTime;
+    }
+    
+    @Override
+    public long getSize() {
+      return size;
+    }
+    @Override
+    public long getModificationTimeStamp() {
+      if (creationTime > 0) {
+        return creationTime;
+      }
+      return super.getModificationTimeStamp();
+    }
+    @Override
+    public String toString() {
+      long name = size -  TEN_MB;
+      if (name < 0) name = size - (TEN_MB / 1024);
+      return name + "";
+    }
+    public boolean isClosed() {
+      return false;
+    }
+    public void close() throws IOException {
+    }
+    public HoplogReader getReader() throws IOException {
+      return null;
+    }
+    public HoplogWriter createWriter(int keys) throws IOException {
+      return null;
+    }
+    public void close(boolean clearCache) throws IOException {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
new file mode 100644
index 0000000..fe15305
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
@@ -0,0 +1,50 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+import junit.framework.TestCase;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class GFKeyJUnitTest extends TestCase {
+  public void testSerde() throws Exception {
+    String str = "str";
+    GFKey key = new GFKey();
+    key.setKey(str);
+    
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    DataOutputStream dos = new DataOutputStream(baos);
+    key.write(dos);
+    
+    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+    DataInputStream dis = new DataInputStream(bais);
+    key.readFields(dis);
+    
+    assertEquals(str, key.getKey());
+  }
+  
+  public void testCompare() {
+    GFKey keya = new GFKey();
+    keya.setKey("a");
+    
+    GFKey keyb = new GFKey();
+    keyb.setKey("b");
+    
+    assertEquals(-1, keya.compareTo(keyb));
+    assertEquals(1, keyb.compareTo(keya));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
new file mode 100644
index 0000000..5ebb00e
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
@@ -0,0 +1,265 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSSplitIteratorJUnitTest extends BaseHoplogTestCase {
+  public void test1Hop1BlockIter() throws Exception {
+    Path path = new Path(testDataDir, "region/0/1-1-1.hop");
+    Hoplog oplog = new HFileSortedOplog(hdfsStore, path, blockCache, stats,
+        storeStats);
+    createHoplog(10, oplog);
+
+    Path[] paths = {path};
+    long[] starts = {0};
+    long[] lengths = {oplog.getSize()};
+    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    
+    int count = 0;
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      assertEquals("key-" + count, new String((byte[])iter.getKey()));
+      count++;
+    }
+    assertEquals(10, count);
+  }
+  
+  public void test1HopNBlockIter() throws Exception {
+    Path path = new Path(testDataDir, "region/0/1-1-1.hop");
+    Hoplog oplog = new HFileSortedOplog(hdfsStore, path,
+        blockCache, stats, storeStats);
+    createHoplog(2000, oplog);
+    
+    FileSystem fs = hdfsStore.getFileSystem();
+    Reader reader = HFile.createReader(fs, path, new CacheConfig(fs.getConf()));
+    BlockIndexReader bir = reader.getDataBlockIndexReader();
+    int blockCount = bir.getRootBlockCount();
+    reader.close();
+    
+    // make sure there are more than 1 hfile blocks in the hoplog
+    assertTrue(1 < blockCount);
+
+    Path[] paths = {path};
+    long half = oplog.getSize()/2;
+    long[] starts = {0};
+    long[] lengths = {half};
+    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    
+    int count = 0;
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
+      count++;
+    }
+    // the number of iterations should be less than number of keys inserted in
+    // the hoplog
+    assertTrue(count < 2000 && count > 0);
+
+    paths = new Path[] {path};
+    starts = new long[] {half + 1};
+    lengths = new long[] {oplog.getSize()};
+    iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
+      count++;
+    }
+    assertEquals(2000, count);
+
+    paths = new Path[] {path, path};
+    starts = new long[] {0, half + 1};
+    lengths = new long[] {half, oplog.getSize()};
+    iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    
+    count = 0;
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
+      count++;
+    }
+    assertEquals(2000, count);
+  }
+
+  /*
+   * This tests iterates over 3 hoplog files. The three hoplog files have the
+   * same content. Duplicate keys should not get discarded
+   */
+  public void testNHoplogNBlockIter() throws Exception {
+    Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
+    Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
+        blockCache, stats, storeStats);
+    createHoplog(2000, oplog);
+    
+    FileSystem fs = hdfsStore.getFileSystem();
+    Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
+    BlockIndexReader bir = reader.getDataBlockIndexReader();
+    int blockCount = bir.getRootBlockCount();
+    reader.close();
+    
+    // make sure there are more than 1 hfile blocks in the hoplog
+    assertTrue(1 < blockCount);
+    
+    Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
+    oplog = new HFileSortedOplog(hdfsStore, path2,
+        blockCache, stats, storeStats);
+    createHoplog(2000, oplog);
+
+    Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
+    oplog = new HFileSortedOplog(hdfsStore, path3,
+        blockCache, stats, storeStats);
+    createHoplog(2000, oplog);
+    
+    Path[] paths = {path1, path2, path3, path1, path2, path3};
+    long half = oplog.getSize()/2;
+    long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
+    long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
+    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    
+    int[] keyCounts = new int[2000];
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      String key = new String((byte[])iter.getKey()).substring("key-".length());
+      keyCounts[Integer.valueOf(key) - 100000] ++;
+    }
+    
+    for (int i : keyCounts) {
+      assertEquals(3, i);
+    }
+  }
+  
+  public void testMRLikeNHopIter() throws Exception {
+    Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
+    Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+    
+    Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
+    oplog = new HFileSortedOplog(hdfsStore, path2,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+    
+    Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
+    oplog = new HFileSortedOplog(hdfsStore, path3,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+    
+    Path[] paths = {path1, path2, path3};
+    long[] starts = {0, 0, 0};
+    long[] lengths = {oplog.getSize(), oplog.getSize(), oplog.getSize()};
+    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    
+    int[] keyCounts = new int[10];
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      // extra has next before key read
+      iter.hasNext(); 
+      String key = new String((byte[])iter.getKey()).substring("key-".length());
+      System.out.println(key);
+      keyCounts[Integer.valueOf(key)] ++;
+    }
+    
+    for (int i : keyCounts) {
+      assertEquals(3, i);
+    }
+  }
+  
+  public void test1Hop1BlockIterSkipDeletedHoplogs() throws Exception {
+    FileSystem fs = hdfsStore.getFileSystem();
+    Path path = new Path(testDataDir, "region/0/1-1-1.hop");
+    Hoplog oplog = new HFileSortedOplog(hdfsStore, path,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+
+    Path[] paths = {path};
+    long[] starts = {0};
+    long[] lengths = {oplog.getSize()};
+    
+    //Delete the Hoplog file
+    fs.delete(path, true);
+    
+    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    assertFalse(iter.hasNext());
+    
+  }
+  
+  public void testMRLikeNHopIterSkipDeletedHoplogs() throws Exception {
+    FileSystem fs = hdfsStore.getFileSystem();
+    //Create Hoplogs
+    Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
+    Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+    
+    Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
+    oplog = new HFileSortedOplog(hdfsStore, path2,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+    
+    Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
+    oplog = new HFileSortedOplog(hdfsStore, path3,
+        blockCache, stats, storeStats);
+    createHoplog(10, oplog);
+    
+    Path[] paths = {path1, path2, path3};
+    long[] starts = {0, 0, 0};
+    long[] lengths = {oplog.getSize(), oplog.getSize(), oplog.getSize()};
+    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
+        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+    int count = 0;
+    while (iter.hasNext()) {
+      boolean success = iter.next();
+      assertTrue(success);
+      count++;
+    }
+    assertEquals(30, count);
+    
+    for(int i = 0; i < 3; ++i){
+      fs.delete(paths[i], true);
+      iter = HDFSSplitIterator.newInstance(
+          hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
+      count = 0;
+      while (iter.hasNext()) {
+        boolean success = iter.next();
+        assertTrue(success);
+        count++;
+      }
+      assertEquals(20, count);
+      oplog = new HFileSortedOplog(hdfsStore, paths[i],
+          blockCache, stats, storeStats);
+      createHoplog(10, oplog);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
new file mode 100644
index 0000000..a209b6e
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
@@ -0,0 +1,305 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HoplogUtilJUnitTest extends BaseHoplogTestCase {
+  Path regionPath = null;
+  
+  @Override
+  protected void configureHdfsStoreFactory() throws Exception {
+    super.configureHdfsStoreFactory();
+    
+    hsf.setInputFileCountMin(3);
+    hsf.setMinorCompaction(false);
+    hsf.setMajorCompaction(false);
+  }
+  
+  public void testHoplogListingMultiBucket() throws Exception {
+    createHoplogs();
+
+    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    assertEquals(5, hdfsStore.getFileSystem().listStatus(regionPath).length);
+    assertEquals(15, hoplogs.size());
+  }
+
+  public void testHoplogListingMixFileTypes() throws Exception {
+    createHoplogs();
+
+    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    organizer.getCompactor().compact(false, false);
+
+    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    assertEquals(7,
+        hdfsStore.getFileSystem().listStatus(new Path(regionPath, "0")).length);
+    assertEquals(15, hoplogs.size());
+  }
+
+  public void testHoplogListingEmptyBucket() throws Exception {
+    createHoplogs();
+
+    hdfsStore.getFileSystem().mkdirs(new Path(regionPath, "100"));
+
+    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    assertEquals(6, hdfsStore.getFileSystem().listStatus(regionPath).length);
+    assertEquals(15, hoplogs.size());
+  }
+
+  public void testHoplogListingInvalidBucket() throws Exception {
+    createHoplogs();
+
+    hdfsStore.getFileSystem().rename(new Path(regionPath, "0"),
+        new Path(regionPath, "not_a_bucket"));
+
+    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    assertEquals(5, hdfsStore.getFileSystem().listStatus(regionPath).length);
+    assertEquals(12, hoplogs.size());
+  }
+
+  public void testHoplogListingInvalidFiles() throws Exception {
+    createHoplogs();
+
+    Path bucketPath = new Path(regionPath, "0");
+    FSDataOutputStream stream = hdfsStore.getFileSystem().create(
+        new Path(bucketPath, "not_a_hoplog"));
+    stream.close();
+
+    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+
+    assertEquals(4, hdfsStore.getFileSystem().listStatus(bucketPath).length);
+    assertEquals(15, hoplogs.size());
+  }
+
+  public void testTimeRange() throws Exception {
+    createHoplogs();
+    // rename hoplogs for testing purpose
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
+        regionManager, 0);
+    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
+    assertEquals(3, hoplogs.size());
+    hoplogs.get(0).get().rename("0-300-1.hop");
+    hoplogs.get(1).get().rename("0-310-1.hop");
+    hoplogs.get(2).get().rename("0-320-1.hop");
+    organizer.close();
+
+    organizer = new HdfsSortedOplogOrganizer(regionManager, 3);
+    hoplogs = organizer.getSortedOplogs();
+    assertEquals(3, hoplogs.size());
+    hoplogs.get(0).get().rename("0-600-1.hop");
+    hoplogs.get(1).get().rename("0-610-1.hop");
+    hoplogs.get(2).get().rename("0-620-1.hop");
+    organizer.close();
+
+    organizer = new HdfsSortedOplogOrganizer(regionManager, 6);
+    hoplogs = organizer.getSortedOplogs();
+    assertEquals(3, hoplogs.size());
+    hoplogs.get(0).get().rename("0-100-1.hop");
+    hoplogs.get(1).get().rename("0-110-1.hop");
+    hoplogs.get(2).get().rename("0-120-1.hop");
+
+    Collection<FileStatus> filtered = HoplogUtil.getRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 300, 305);
+    assertEquals(5, filtered.size());
+    assertTrue(containsHoplogWithName(filtered, "0-300-1.hop"));
+    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
+    assertTrue(containsHoplogWithName(filtered, "0-600-1.hop"));
+
+    filtered = HoplogUtil.getRegionHoplogs(regionPath,
+        hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 250, 310);
+    assertEquals(6, filtered.size());
+    assertTrue(containsHoplogWithName(filtered, "0-300-1.hop"));
+    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
+    assertTrue(containsHoplogWithName(filtered, "0-320-1.hop"));
+
+    filtered = HoplogUtil.getRegionHoplogs(regionPath,
+        hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 301, 311);
+    assertEquals(5, filtered.size());
+    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
+    assertTrue(containsHoplogWithName(filtered, "0-320-1.hop"));
+
+    filtered = HoplogUtil.getRegionHoplogs(regionPath,
+        hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 301, 309);
+    assertEquals(4, filtered.size());
+    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
+    organizer.close();
+  }
+  
+  public void testExcludeSoonCleanedHoplogs() throws Exception {
+    FileSystem fs = hdfsStore.getFileSystem();
+    Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
+        regionManager, 0);
+    //delete the auto generated clean up interval file   
+    if (fs.exists(cleanUpIntervalPath)){
+      fs.delete(cleanUpIntervalPath, true);
+    }
+    
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    int count = 10;
+    for (int fileCount = 0; fileCount < 3; fileCount++) {
+      items.clear();
+      for (int itemCount = 0; itemCount < count; itemCount++) {
+        items.add(new TestEvent(("key-" + itemCount), "value"));
+      }
+      organizer.flush(items.iterator(), count);
+    }
+    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
+    
+    for(TrackedReference<Hoplog> hoplog : hoplogs) {
+      Path p = new Path(testDataDir, getName() + "/0/" +
+          hoplog.get().getFileName() + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+      fs.createNewFile(p);
+    }
+    Collection<FileStatus> files = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(3, files.size());
+    
+    TimeUnit.MINUTES.sleep(2);
+    //No clean up interval file, all expired files will be included
+    files = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(3, files.size());
+    
+    
+    long interval = 1 * 60 * 1000;
+    HoplogUtil.exposeCleanupIntervalMillis(fs,cleanUpIntervalPath,interval);
+    
+    files = HoplogUtil.getAllRegionHoplogs(
+        regionPath, hdfsStore.getFileSystem(),
+        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(0, files.size());
+    organizer.close();  
+  }
+  
+  
+  public void testCheckpointSelection() throws Exception {
+    createHoplogs();
+    // rename hoplogs for testing purpose
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
+        regionManager, 0);
+    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
+    assertEquals(3, hoplogs.size());
+    hoplogs.get(0).get().rename("0-300-1.chop");
+    hoplogs.get(1).get().rename("0-310-1.hop");
+    hoplogs.get(2).get().rename("0-320-1.hop"); // checkpoint file
+    organizer.close();
+    
+    organizer = new HdfsSortedOplogOrganizer(regionManager, 3);
+    hoplogs = organizer.getSortedOplogs();
+    assertEquals(3, hoplogs.size());
+    hoplogs.get(0).get().rename("0-600-1.hop");
+    hoplogs.get(1).get().rename("0-610-1.chop");
+    hoplogs.get(2).get().rename("0-620-1.hop");
+    organizer.close();
+    
+    organizer = new HdfsSortedOplogOrganizer(regionManager, 6);
+    hoplogs = organizer.getSortedOplogs();
+    assertEquals(3, hoplogs.size());
+    hoplogs.get(0).get().rename("0-100-1.hop");
+    hoplogs.get(1).get().rename("0-110-1.hop");
+    hoplogs.get(2).get().rename("0-120-1.chop");
+    
+    Collection<FileStatus> filtered = HoplogUtil.filterHoplogs(
+        hdfsStore.getFileSystem(), regionPath, 290, 305, false);
+    assertEquals(4, filtered.size());
+    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
+    assertTrue(containsHoplogWithName(filtered, "0-600-1.hop"));
+    
+    filtered = HoplogUtil.filterHoplogs(hdfsStore.getFileSystem(),
+        regionPath, 290, 305, true);
+    assertEquals(3, filtered.size());
+    assertTrue(containsHoplogWithName(filtered, "0-300-1.chop"));
+    assertTrue(containsHoplogWithName(filtered, "0-610-1.chop"));
+    assertTrue(containsHoplogWithName(filtered, "0-120-1.chop"));
+    organizer.close();
+  }
+  
+  private boolean containsHoplogWithName(Collection<FileStatus> filtered,
+      String name) {
+    for (FileStatus file : filtered) {
+      if (file.getPath().getName().equals(name)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private void createHoplogs() throws IOException, Exception {
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    int count = 10;
+    for (int bucketId = 0; bucketId < 15; bucketId += 3) {
+      HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager,
+          bucketId);
+      for (int fileCount = 0; fileCount < 3; fileCount++) {
+        items.clear();
+        for (int itemCount = 0; itemCount < count; itemCount++) {
+          items.add(new TestEvent(("key-" + itemCount), "value"));
+        }
+        organizer.flush(items.iterator(), count);
+      }
+    }
+  }
+  
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    regionPath = new Path(testDataDir, getName());
+  }
+  
+  @Override 
+  protected void tearDown() throws Exception{
+    FileSystem fs = hdfsStore.getFileSystem();
+    Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(),HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
+    if (fs.exists(cleanUpIntervalPath)){
+      fs.delete(cleanUpIntervalPath, true);
+    }  
+    super.tearDown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
index 7e4acbf..f1b9746 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
@@ -308,7 +308,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
             BucketRegion br = (BucketRegion) r;
             try {
               KeyInfo keyInfo = new KeyInfo(k1, null, bucketId);
-              RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false);
+              RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false, false);
               Object val = rv.getRawValue();
               assertTrue(val instanceof CachedDeserializable);
               CachedDeserializable cd = (CachedDeserializable)val;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
new file mode 100644
index 0000000..5e2ba4f
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
@@ -0,0 +1,33 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * Tests regions operations when entries are not yet persisted
+ * in HDFS but are in HDFSAsyncQueue
+ * @author sbawaska
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSQueueRegionOperationsJUnitTest extends
+    HDFSRegionOperationsJUnitTest {
+
+  @Override
+  protected int getBatchTimeInterval() {
+    return 50*1000;
+  }
+
+  @Override
+  protected void sleep(String regionPath) {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
new file mode 100644
index 0000000..f28c138
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
@@ -0,0 +1,54 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.util.Properties;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOperationsJUnitTest {
+  static {
+    System.setProperty("gemfire.trackOffHeapRefCounts", "true");
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    OffHeapTestUtil.checkOrphans();
+  }
+  @Override
+  protected Region<Integer, String> createRegion(String regionName) {
+    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
+    rf.setPartitionAttributes(prAttr);
+    rf.setOffHeap(true);
+    rf.setHDFSStoreName(hdfsStore.getName());
+    Region<Integer, String> r = rf.create(regionName);
+//    addListener(r);
+    
+    ((PartitionedRegion) r).setQueryHDFS(true);
+    return r;
+  }
+  @Override
+  protected Properties getDSProps() {
+    Properties props = super.getDSProps();
+    props.setProperty("off-heap-memory-size", "50m");
+    return props;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
new file mode 100644
index 0000000..6cf9c6a
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
@@ -0,0 +1,542 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache;
+
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Random;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.FixMethodOrder;
+import org.junit.experimental.categories.Category;
+import org.junit.runners.MethodSorters;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskStore;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAlgorithm;
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.RegionDestroyedException;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.hll.HyperLogLog;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * Tests that region operations work as expected when data is in HDFS.
+ * This test explicitly clears in-memory ConcurrentHashMap that back
+ * AbstractRegionMap before validating region operations.
+ * 
+ * @author sbawaska
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSRegionOperationsJUnitTest extends TestCase {
+
+  protected Cache cache;
+  protected HDFSStore hdfsStore;
+
+  public void setUp() throws Exception {
+    Properties props = getDSProps();
+    cache = new CacheFactory(props).create();
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
+    String storeName = getName()+"-store";
+    HDFSStoreFactory hsf = cache.createHDFSStoreFactory();
+    hsf.setHomeDir(getName()+"-test");
+    hsf.setBatchInterval(getBatchTimeInterval());
+    hdfsStore = hsf.create(storeName);
+  }
+
+  protected Properties getDSProps() {
+    Properties props = new Properties();
+    props.put("mcast-port", "0");
+    props.put("locators", "");
+    props.put("log-level", "config");
+    return props;
+  }
+
+  public void tearDown() throws Exception {
+    for (Region r : cache.rootRegions()) {
+      if (r != null) {
+        r.close();
+      }
+    }
+
+    if (cache.getRegion(getName()) != null) {
+      cache.getRegion(getName()).destroyRegion();
+    }
+    DiskStore ds = cache.findDiskStore(null);
+    if (ds != null) {
+      ds.destroy();
+    }
+    
+    ((HDFSStoreImpl)hdfsStore).getFileSystem().delete(new Path(hdfsStore.getHomeDir()), true);
+  }
+
+  protected int getBatchTimeInterval() {
+    return 1000;
+  }
+
+  protected Region<Integer, String> createRegion(String regionName) {
+    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
+    rf.setPartitionAttributes(prAttr);
+    rf.setHDFSStoreName(hdfsStore.getName());
+    Region<Integer, String> r = rf.create(regionName);
+    
+    ((PartitionedRegion) r).setQueryHDFS(true);
+    return r;
+  }
+
+  protected void clearBackingCHM(Region<Integer, String> r) {
+    PartitionedRegion pr = (PartitionedRegion)r;
+    for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
+      assertTrue(br.getRegionMap() instanceof HDFSRegionMap);
+      ((AbstractRegionMap)br.getRegionMap())._getMap().clear();
+      // wait here to make sure that the queue has been flushed
+    }
+    sleep(pr.getFullPath());
+  }
+
+  protected void sleep(String regionPath) {
+    String qname = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
+    GemFireCacheImpl.getExisting().waitForSenderQueueFlush(qname, true, 30);
+  }
+
+  public void test010PUTDMLSupport() {
+    Region<Integer, String> r = createRegion(getName());
+    SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
+    assertEquals(0, stats.getRead().getCount());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    assertEquals(100, stats.getRead().getCount());
+    sleep(r.getFullPath());
+    clearBackingCHM(r);
+    LocalRegion lr = (LocalRegion) r;
+    for (int i=0; i<200; i++) {
+      EntryEventImpl ev = lr.newPutEntryEvent(i, "value"+i, null);
+      lr.validatedPut(ev, System.currentTimeMillis());
+    }
+    // verify that read count on HDFS does not change
+    assertEquals(100, stats.getRead().getCount());
+    sleep(r.getFullPath());
+    clearBackingCHM(r);
+    for (int i=0; i<200; i++) {
+      assertEquals("value"+i, r.get(i));
+    }
+    if (getBatchTimeInterval() > 1000) {
+      // reads from async queue
+      assertEquals(100, stats.getRead().getCount());
+    } else {
+      assertEquals(300, stats.getRead().getCount());
+    }
+  }
+
+  public void test020GetOperationalData() throws Exception {
+    Region<Integer, String> r = createRegion(getName());
+    SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
+    assertEquals(0, stats.getRead().getCount());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    int expectedReadsFromHDFS = 100;
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+    sleep(r.getFullPath());
+    clearBackingCHM(r);
+    LocalRegion lr = (LocalRegion) r;
+    for (int i=0; i<200; i++) {
+      if (i < 100) {
+        assertEquals("value"+i, r.get(i));
+      } else {
+        assertNull(r.get(i));
+      }
+    }
+    if (getBatchTimeInterval() > 1000) {
+      // reads from async queue
+      expectedReadsFromHDFS = 200; // initial 100 + 100 for misses
+    } else {
+      expectedReadsFromHDFS = 300; // initial 100 + 200 for reads
+    }
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+    for (int i=0; i<200; i++) {
+      assertNull(lr.get(i, null, true, false, false, null, null, false, false/*allowReadFromHDFS*/));
+    }
+    // no increase in HDFS reads
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+    
+    /**MergeGemXDHDFSToGFE Have not merged this API as this api is not called by any code*/ 
+    //   test the dataView API
+    //for (int i=0; i<200; i++) {
+    //  assertNull(lr.getDataView().getLocally(i, null, i%10, lr, true, true, null, null, false, false/*allowReadFromHDFS*/));
+    //}
+    // no increase in HDFS reads
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+  }
+  
+  public void test030RemoveOperationalData() throws Exception {
+    Region<Integer, String> r = createRegion(getName());
+    SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
+    assertEquals(0, stats.getRead().getCount());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    int expectedReadsFromHDFS = 100;
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+    sleep(r.getFullPath());
+    PartitionedRegion lr = (PartitionedRegion) r;
+    for(int i =0; i < 50; i++) {
+      lr.getBucketRegion(i).customEvictDestroy(i);
+    }
+    for (int i=0; i<200; i++) {
+      if (i < 100) {
+        assertEquals("value"+i, r.get(i));
+      } else {
+        assertNull(r.get(i));
+      }
+    }
+    if (getBatchTimeInterval() > 1000) {
+      // reads from async queue
+      expectedReadsFromHDFS = 200; // initial 100 + 100 for misses
+    } else {
+      expectedReadsFromHDFS = 250; // initial 100 + 200 for reads + 50 for 
+    }
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+    for (int i=0; i<50; i++) {
+      assertNull(lr.get(i, null, true, false, false, null,  null, false, false/*allowReadFromHDFS*/));
+    }
+    for (int i=50; i<100; i++) {
+      assertEquals("value"+i, lr.get(i, null, true, false, false, null,null, false, false/*allowReadFromHDFS*/));
+    }
+    for (int i=100; i<200; i++) {
+      assertNull(lr.get(i, null, true, false, false, null,  null, false, false/*allowReadFromHDFS*/));
+    }
+    // no increase in HDFS reads
+    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
+  }
+
+  public void _test040NoAutoEviction() throws Exception {
+    if (!cache.isClosed()) {
+      tearDown();
+      cache.close();
+      System.setProperty("gemfire.disableAutoEviction", "true");
+      setUp();
+    }
+    Region<Integer, String> r = createRegion(getName());
+    System.setProperty("gemfire.disableAutoEviction", "false");
+    for (int i =0; i<5; i++) {
+      r.put(i, "value"+i);
+    }
+    PartitionedRegion pr = (PartitionedRegion) r;
+    BucketRegion br = pr.getBucketRegion(1);
+    assertNotNull(br.getAttributes().getEvictionAttributes());
+    assertEquals(EvictionAlgorithm.NONE, br.getAttributes().getEvictionAttributes().getAlgorithm());
+
+    GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache();
+    assertEquals(0.0f, cache.getResourceManager().getEvictionHeapPercentage());
+  }
+
+  public void test050LRURegionAttributesForPR() {
+    RegionFactory<Integer, String> rf = cache.createRegionFactory();
+    rf.setHDFSStoreName(hdfsStore.getName());
+    rf.setDataPolicy(DataPolicy.HDFS_PARTITION);
+    verifyLRURegionAttributesForPR(rf.create(getName()));
+  }
+
+  public void test060LRURegionAttributesForRegionShortcutPR() {
+    verifyLRURegionAttributesForPR(createRegion(getName()));
+  }
+
+  private void verifyLRURegionAttributesForPR(Region r) {
+    for (int i =0; i<200; i++) {
+      r.put(i, "value"+i);
+    }
+    RegionAttributes<Integer, String> ra = r.getAttributes();
+    assertNotNull(ra.getEvictionAttributes());
+    // default eviction action for region shortcut
+    assertEquals(EvictionAction.OVERFLOW_TO_DISK, ra.getEvictionAttributes().getAction());
+
+    GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache();
+    assertEquals(80.0f, cache.getResourceManager().getEvictionHeapPercentage());
+    DiskStore ds = cache.findDiskStore(null);
+    assertNotNull(ds);
+    Set s = cache.getResourceManager().getResourceListeners(ResourceType.HEAP_MEMORY);
+    Iterator it = s.iterator();
+    boolean regionFound = false;
+    while (it.hasNext()) {
+      Object o = it.next();
+      if (o instanceof PartitionedRegion) {
+        PartitionedRegion pr = (PartitionedRegion) o;
+        if (getName().equals(pr.getName())) {
+          regionFound = true;
+        } else {
+          continue;
+        }
+        for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
+          assertNotNull(br.getAttributes().getEvictionAttributes());
+          assertEquals(EvictionAlgorithm.LRU_HEAP, br.getAttributes().getEvictionAttributes().getAlgorithm());
+          assertEquals(EvictionAction.OVERFLOW_TO_DISK, br.getAttributes().getEvictionAttributes().getAction());
+        }
+      }
+    }
+    assertTrue(regionFound);
+
+  }
+
+  public void test070SizeEstimate() {
+    Region<Integer, String> r = createRegion(getName());
+    int size = 226;
+    Random rand = new Random();
+    for (int i=0; i<size; i++) {
+      r.put(rand.nextInt(), "value"+i);
+    }
+    // size before flush
+    LocalRegion lr = (LocalRegion) r;
+    long estimate = lr.sizeEstimate();
+    double err = Math.abs(estimate - size) / (double) size;
+    // on a busy system flush might start before we call estimateSize, so rather than equality,
+    // test for error margin. fixes bug 49381
+    assertTrue("size:"+size+" estimate:"+estimate, err < 0.02 * 10); // each bucket can have an error of 0.02
+
+    // size after flush
+    sleep(r.getFullPath());
+    estimate = lr.sizeEstimate();
+    err = Math.abs(estimate - size) / (double) size;
+    assertTrue("size:"+size+" estimate:"+estimate, err < 0.02 * 10); // each bucket can have an error of 0.02
+  }
+
+  public void test080PutGet() throws InterruptedException {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    for (int i=0; i<100; i++) {
+      assertEquals("value"+i, r.get(i));
+    }
+    
+    //Do a put while there are entries in the map
+    r.put(0, "value"+0);
+    
+    r.destroy(1, "value"+1);
+  }
+
+  public void test090Delete() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<11; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    int delKey = 9;
+    r.destroy(delKey);
+    assertNull(r.get(delKey));
+    assertFalse(r.containsKey(delKey));
+  }
+
+  public void test100Invalidate() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    int invKey = 9;
+    r.invalidate(invKey);
+    assertNull(r.get(invKey));
+    assertTrue(r.containsKey(invKey));
+  }
+
+  public void test110Size() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    assertEquals(100, r.size());
+    r.destroy(45);
+    assertEquals(99, r.size());
+    r.invalidate(55);
+    r.invalidate(65);
+    assertEquals(99, r.size());
+  }
+
+  public void test120KeyIterator() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    Set<Integer> keys = r.keySet();
+    int c = 0;
+    for (int i : keys) {
+//      assertEquals(c, i);
+      c++;
+    }
+    assertEquals(100, c);
+    assertEquals(100, keys.size());
+    int delKey = 88;
+    r.destroy(delKey);
+    r.invalidate(39);
+    keys = r.keySet();
+    c = 0;
+    for (int i : keys) {
+      if (c == delKey) {
+        c++;
+      }
+//      assertEquals(c, i);
+      c++;
+    }
+    assertEquals(99, keys.size());
+  }
+
+  public void test130EntriesIterator() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    Set<Entry<Integer, String>> entries = r.entrySet();
+    int c = 0;
+    for (Entry<Integer, String> e : entries) {
+//      assertEquals(c, (int) e.getKey());
+      assertEquals("value"+e.getKey(), e.getValue());
+      c++;
+    }
+    assertEquals(100, c);
+    assertEquals(100, entries.size());
+    int delKey = 88;
+    r.destroy(delKey);
+    int invKey = 39;
+    r.invalidate(invKey);
+    entries = r.entrySet();
+    c = 0;
+    for (Entry<Integer, String> e : entries) {
+      if (c == delKey) {
+        c++;
+      } else if (e.getKey() == invKey) {
+//        assertEquals(c, (int) e.getKey());
+        assertNull(e.getValue());
+      } else {
+//        assertEquals(c, (int) e.getKey());
+        assertEquals("value"+e.getKey(), e.getValue());
+      }
+      c++;
+    }
+    assertEquals(99, entries.size());
+  }
+
+  public void test140ContainsKey() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    assertTrue(r.containsKey(80));
+    r.destroy(80);
+    assertFalse(r.containsKey(80));
+    r.invalidate(64);
+    assertTrue(r.containsKey(64));
+  }
+
+  public void test150ContainsValue() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    assertTrue(r.containsValue("value45"));
+    r.destroy(45);
+    assertFalse(r.containsValue("value45"));
+    r.invalidate(64);
+    assertFalse(r.containsValue("value64"));
+  }
+
+  public void test160DestroyRegion() {
+    Region<Integer, String> r = createRegion(getName());
+    for (int i=0; i<100; i++) {
+      r.put(i, "value"+i);
+    }
+    clearBackingCHM(r);
+    r.destroyRegion();
+    try {
+      r.get(3);
+      fail("expected exception not thrown");
+    } catch (RegionDestroyedException expected) {
+    }
+  }
+
+  public void test170PutIfAbsent() {
+    Region<Integer, String> r = createRegion(getName());
+    r.put(1, "value1");
+    clearBackingCHM(r);
+    assertEquals("value1", r.putIfAbsent(1, "value2"));
+  }
+
+  public void test180Replace() {
+    Region<Integer, String> r = createRegion(getName());
+    assertNull(r.replace(1, "value"));
+    r.put(1, "value1");
+    clearBackingCHM(r);
+    assertEquals("value1", r.replace(1, "value2"));
+  }
+
+  public void test190ReplaceKVV() {
+    Region<Integer, String> r = createRegion(getName());
+    assertFalse(r.replace(1, "oldValue", "newValue"));
+    r.put(1, "value1");
+    clearBackingCHM(r);
+    assertTrue(r.replace(1, "value1", "value2"));
+  }
+
+  public void test200Accuracy() throws IOException {
+    double sum=0.0;
+    int iter = 10;
+    for (int t=0; t<iter; t++) {
+      Random r = new Random();
+      HashSet<Integer> vals = new HashSet<Integer>();
+      HyperLogLog hll = new HyperLogLog(0.03);
+      //HyperLogLog hll = new HyperLogLog(0.1);
+      double accuracy = 0.0;
+      for (int i = 0; i < 2 * 1000000; i++) {
+        int val = r.nextInt();
+        vals.add(val);
+        hll.offer(val);
+      }
+      long size = vals.size();
+      long est = hll.cardinality();
+      
+      accuracy = 100.0 * (size - est) / est;
+      System.out.printf("Accuracy is %f hll size is %d\n", accuracy, hll.getBytes().length);
+      sum+=Math.abs(accuracy);
+    }
+    double avgAccuracy = sum/(iter*1.0);
+    System.out.println("Avg accuracy is:"+avgAccuracy);
+    assertTrue(avgAccuracy < 6);
+  }
+}



[03/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
new file mode 100644
index 0000000..421cd28
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
@@ -0,0 +1,78 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.util.Iterator;
+import java.util.Properties;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.internal.util.concurrent.CustomEntryConcurrentHashMap;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJUnitTest {
+  static {
+    System.setProperty("gemfire.trackOffHeapRefCounts", "true");
+    System.setProperty("gemfire.trackOffHeapFreedRefCounts", "true");
+  }
+
+  @Override
+  protected void clearBackingCHM(Region<Integer, String> r) {
+    PartitionedRegion pr = (PartitionedRegion)r;
+    for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
+      assertTrue(br.getRegionMap() instanceof HDFSRegionMap);
+      CustomEntryConcurrentHashMap chm = ((AbstractRegionMap)br.getRegionMap())._getMap();
+      Iterator it = chm.keySet().iterator();
+      while (it.hasNext()) {
+        Object key = it.next();
+        OffHeapRegionEntry re = (OffHeapRegionEntry) chm.remove(key);
+        assert re != null;
+        re.release();
+      }
+      // wait here to make sure that the queue has been flushed
+    }
+    sleep(pr.getFullPath());
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    
+    OffHeapTestUtil.checkOrphans();
+    super.tearDown();
+  }
+  @Override
+  protected Region<Integer, String> createRegion(String regionName) {
+    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
+    rf.setPartitionAttributes(prAttr);
+    rf.setOffHeap(true);
+    rf.setHDFSStoreName(hdfsStore.getName());
+    Region<Integer, String> r = rf.create(regionName);
+//    addListener(r);
+    
+    ((PartitionedRegion) r).setQueryHDFS(true);
+    return r;
+  }
+  @Override
+  protected Properties getDSProps() {
+    Properties props = super.getDSProps();
+    props.setProperty("off-heap-memory-size", "50m");
+    return props;
+  }
+  
+  
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
index b2399fd..a7daf98 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java
@@ -67,7 +67,7 @@ public class ParallelGatewaySenderQueueJUnitTest {
     PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
     when(mockMetaRegion.getDataStore()).thenReturn(dataStore);
     when(dataStore.getSizeOfLocalPrimaryBuckets()).thenReturn(3); 
-    when(metaRegionFactory.newMetataRegion(any(), any(), any(), any())).thenReturn(mockMetaRegion);
+    when(metaRegionFactory.newMetataRegion(any(), any(), any(), any(), anyBoolean())).thenReturn(mockMetaRegion);
     when(cache.createVMRegion(any(), any(), any())).thenReturn(mockMetaRegion);
     
     queue.addShadowPartitionedRegionForUserPR(mockPR("region1"));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
new file mode 100644
index 0000000..38145d1
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
@@ -0,0 +1,169 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.management.bean.stats;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.cache.versions.DiskVersionTag;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+import com.gemstone.gemfire.management.ManagementService;
+import com.gemstone.gemfire.management.RegionMXBean;
+import com.gemstone.gemfire.management.internal.ManagementConstants;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * Test for verifying HDFS related MBean attributes
+ * @author rishim
+ *
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
+
+  public static final String HDFS_STORE_NAME = "HDFSMBeanJUnitTestStore";
+  public static final String REGION_NAME = "HDFSMBeanJUnitTest_Region";
+  protected Path testDataDir;
+  protected Cache cache;
+
+  protected HDFSStoreFactory hsf;
+  protected HDFSStoreImpl hdfsStore;
+  protected Region<Object, Object> region;
+  SortedOplogStatistics stats;
+  HFileStoreStatistics storeStats;
+  BlockCache blockCache;
+
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
+    testDataDir = new Path("test-case");
+
+    cache = createCache();
+
+    configureHdfsStoreFactory();
+    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
+
+    RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+
+    // regionfactory.setCompressionCodec("Some");
+    PartitionAttributesFactory fac = new PartitionAttributesFactory();
+    fac.setTotalNumBuckets(10);
+
+    regionfactory.setPartitionAttributes(fac.create());
+    region = regionfactory.create(REGION_NAME);
+
+  }
+
+  protected void configureHdfsStoreFactory() throws Exception {
+    hsf = this.cache.createHDFSStoreFactory();
+    hsf.setHomeDir(testDataDir.toString());
+  }
+
+  protected Cache createCache() {
+    CacheFactory cf = new CacheFactory().set("mcast-port", "0").set("log-level", "info");
+    cache = cf.create();
+    return cache;
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+    hdfsStore.getFileSystem().delete(testDataDir, true);
+    cache.close();
+    super.tearDown();
+  }
+
+  public void testStoreUsageStats() throws Exception {
+
+    PartitionedRegion parRegion = (PartitionedRegion)region;
+   
+
+      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+      for (int i = 0; i < 100; i++) {
+        String key = ("key-" + (i * 100 + i));
+        String value = ("value-" + System.nanoTime());
+        parRegion.put(key, value);
+        
+        items.add(new TestEvent(key, value));
+      }
+
+    //Dont want to create
+    Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
+    BucketRegion flushingBucket=  localPrimaryBucketRegions.iterator().next();
+    HoplogOrganizer hoplogOrganizer = getOrganizer(parRegion,flushingBucket.getId());
+    hoplogOrganizer.flush(items.iterator(), 100);
+    
+    GemFireCacheImpl cache = GemFireCacheImpl.getExisting();
+    ManagementService service = ManagementService.getExistingManagementService(cache);
+    RegionMXBean bean = service.getLocalRegionMBean(region.getFullPath());
+    
+
+    //assertTrue(bean.getEntryCount() == ManagementConstants.ZERO);
+    assertTrue(bean.getEntrySize() == ManagementConstants.NOT_AVAILABLE_LONG);
+    assertTrue(0 < bean.getDiskUsage());
+    
+  }
+  
+  
+  private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
+    BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
+    if (br == null) {
+      // got rebalanced or something
+      throw new BucketMovedException("Bucket region is no longer available. BucketId: " + 
+          bucketId +  " RegionPath: "  +  region.getFullPath());
+    }
+
+    return br.getHoplogOrganizer();
+  }
+ 
+  
+  public static class TestEvent extends SortedHDFSQueuePersistedEvent implements Serializable {
+    private static final long serialVersionUID = 1L;
+    
+    Object key;
+    
+    public TestEvent(String k, String v) throws Exception {
+      this(k, v, Operation.PUT_IF_ABSENT);
+    }
+
+    public TestEvent(String k, String v, Operation op) throws Exception {
+      super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
+      this.key = k; 
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
new file mode 100644
index 0000000..af47138
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
@@ -0,0 +1,838 @@
+/*
+ * =========================================================================
+ *  Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ *  This product is protected by U.S. and international copyright
+ *  and intellectual property laws. Pivotal products are covered by
+ *  more patents listed at http://www.pivotal.io/patents.
+ * ========================================================================
+ */
+
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.execute.Execution;
+import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
+import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.CliFunctionResult;
+import com.gemstone.gemfire.management.internal.cli.functions.CreateHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.json.GfJsonObject;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.result.InfoResultData;
+import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
+import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
+import com.gemstone.gemfire.management.internal.cli.util.MemberNotFoundException;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+/**
+ * The HDFSStoreCommandsJUnitTest class is a test suite of test cases testing
+ * the contract and functionality of the HDFSStoreCommands class implementing
+ * commands in the GemFire shell (gfsh) that access and modify hdfs stores in
+ * GemFire. </p>
+ * 
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommands
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
+ * @see com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction
+ * @see org.jmock.Expectations
+ * @see org.jmock.Mockery
+ * @see org.jmock.lib.legacy.ClassImposteriser
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSStoreCommandsJUnitTest {
+
+  private Mockery mockContext;
+
+  @Before
+  public void setUp() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+      }
+    };
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testGetHDFSStoreDescription() {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+
+    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolder = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
+        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
+        40, 40, 800);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getName();
+        will(returnValue(null));
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(expectedHdfsStoreConfigHolder)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final HDFSStoreConfigHolder actualHdfsStoreConfigHolder = commands.getHDFSStoreDescription(memberId, hdfsStoreName);
+
+    assertNotNull(actualHdfsStoreConfigHolder);
+    assertEquals(expectedHdfsStoreConfigHolder, actualHdfsStoreConfigHolder);
+  }
+
+  @Test(expected = MemberNotFoundException.class)
+  public void testGetHDFSStoreDescriptionThrowsMemberNotFoundException() {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getName();
+        will(returnValue(null));
+        oneOf(mockMember).getId();
+        will(returnValue("testMember"));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, null);
+
+    try {
+      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
+    } catch (MemberNotFoundException expected) {
+      assertEquals(CliStrings.format(CliStrings.MEMBER_NOT_FOUND_ERROR_MESSAGE, memberId), expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test(expected = HDFSStoreNotFoundException.class)
+  public void testGetHDFSStoreDescriptionThrowsResourceNotFoundException() {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getName();
+        will(returnValue(null));
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
+        will(throwException(new HDFSStoreNotFoundException("expected")));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    try {
+      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
+    } catch (HDFSStoreNotFoundException expected) {
+      assertEquals("expected", expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void testGetHDFSStoreDescriptionThrowsRuntimeException() {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getName();
+        will(returnValue(null));
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
+        will(throwException(new RuntimeException("expected")));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    try {
+      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
+    } catch (RuntimeException expected) {
+      assertEquals("expected", expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void testGetHDFSStoreDescriptionWithInvalidFunctionResultReturnType() {
+    final String hdfsStoreName = "mockHDFSStore";
+    final String memberId = "mockMember";
+
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getName();
+        will(returnValue(null));
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(new Object())));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    try {
+      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
+    } catch (RuntimeException expected) {
+      assertEquals(CliStrings.format(CliStrings.UNEXPECTED_RETURN_TYPE_EXECUTING_COMMAND_ERROR_MESSAGE, Object.class
+          .getName(), CliStrings.DESCRIBE_HDFS_STORE), expected.getMessage());
+      assertNull(expected.getCause());
+      throw expected;
+    }
+  }
+
+  @Test
+  public void testGetHDFSStoreListing() {
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+
+    final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final AbstractExecution mockFunctionExecutor = mockContext.mock(AbstractExecution.class, "Function Executor");
+
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+
+    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderOne = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName1",
+        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
+        40, 40, 800);
+    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderTwo = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName2",
+        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
+        40, 40, 800);
+    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderThree = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName3",
+        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
+        40, 40, 800);
+ 
+    
+    HdfsStoreDetails d1=new HdfsStoreDetails(expectedHdfsStoreConfigHolderOne.getName(), "member1", "member1");
+    HdfsStoreDetails d2=new HdfsStoreDetails(expectedHdfsStoreConfigHolderTwo.getName(), "member2", "member2");
+    HdfsStoreDetails d3=new HdfsStoreDetails(expectedHdfsStoreConfigHolderThree.getName(), "member3", "member3");
+    
+    final Set<HdfsStoreDetails> expectedHdfsStores = new HashSet<HdfsStoreDetails>();
+    expectedHdfsStores.add( d1);
+    expectedHdfsStores.add(d2 );    
+    expectedHdfsStores.add(d3);
+
+    final List<Object> results = new ArrayList<Object>();
+    results.add(expectedHdfsStores);
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).setIgnoreDepartedMembers(with(equal(true)));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(results));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
+
+    final List<?> actualHdfsStores = commands.getHdfsStoreListing(commands.getNormalMembers(mockCache));
+
+    Assert.assertNotNull(actualHdfsStores);   
+    Assert.assertTrue(actualHdfsStores.contains(d1));
+    Assert.assertTrue(actualHdfsStores.contains(d2));
+    Assert.assertTrue(actualHdfsStores.contains(d3));
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void testGetHDFSStoreListThrowsRuntimeException() {
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
+        will(throwException(new RuntimeException("expected")));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
+
+    try {
+      commands.getHdfsStoreListing(commands.getNormalMembers(mockCache));
+    } catch (RuntimeException expected) {
+      assertEquals("expected", expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test
+  public void testGetHDFSStoreListReturnsFunctionInvocationTargetExceptionInResults() {
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final AbstractExecution mockFunctionExecutor = mockContext.mock(AbstractExecution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+
+    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolder = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
+        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
+        40, 40, 800);
+
+    final List<HdfsStoreDetails> expectedHdfsStores = Arrays.asList(new HdfsStoreDetails(
+        expectedHdfsStoreConfigHolder.getName(), "member1", "member1"));
+
+    final List<Object> results = new ArrayList<Object>();
+
+    results.add(expectedHdfsStores);
+    results.add(new FunctionInvocationTargetException("expected"));
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).setIgnoreDepartedMembers(with(equal(true)));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(results));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
+
+    final List<HdfsStoreDetails> actualHdfsStores = commands.getHdfsStoreListing(commands
+        .getNormalMembers(mockCache));
+
+  }
+
+  @Test
+  public void testGetCreatedHDFSStore() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    XmlEntity xml = null;
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
+        20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
+
+    assertNotNull(result);
+    assertEquals(Status.OK, result.getStatus());
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("Member"));
+    assertNotNull(jsonObject.get("Result"));
+
+    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
+    assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
+  }
+
+  @Test
+  public void testGetCreatedHDFSStoreWithThrowable() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    RuntimeException exception = new RuntimeException("Test Exception");
+
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, null);
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
+        20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
+
+    assertNotNull(result);
+    assertEquals(Status.ERROR, result.getStatus());
+
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("Member"));
+    assertNotNull(jsonObject.get("Result"));
+    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
+    assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
+        .get("Result")).get(0)));
+  }
+
+  @Test
+  public void testGetCreatedHDFSStoreWithCacheClosedException() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
+        20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
+
+    assertNotNull(result);
+    InfoResultData resultData = (InfoResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("message"));
+
+    assertEquals("Unable to create hdfs store:" + hdfsStoreName, (((JSONArray)jsonObject.get("message")).get(0)));
+  }
+
+  @Test
+  public void testGetAlteredHDFSStore() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    XmlEntity xml = null;
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
+        100, 100);
+
+    assertNotNull(result);
+    assertEquals(Status.OK, result.getStatus());
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("Member"));
+    assertNotNull(jsonObject.get("Result"));
+
+    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
+    assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
+  }
+
+  @Test
+  public void testGetAlteredHDFSStoreWithThrowable() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    RuntimeException exception = new RuntimeException("Test Exception");
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, "Success");
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
+        100, 100);
+
+    assertNotNull(result);
+    assertEquals(Status.ERROR, result.getStatus());
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("Member"));
+    assertNotNull(jsonObject.get("Result"));
+
+    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
+    assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
+        .get("Result")).get(0)));
+  }
+
+  @Test
+  public void testGetAlteredHDFSStoreWithCacheClosedException() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
+        100, 100);
+
+    assertNotNull(result);
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    JSONObject jsonObject = (JSONObject)resultData.getGfJsonObject().get("content");
+    assertEquals(0, jsonObject.length());
+  }
+
+  @Test
+  public void testDestroyStore() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    XmlEntity xml = null;
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.destroyStore(hdfsStoreName, null);
+
+    assertNotNull(result);
+    assertEquals(Status.OK, result.getStatus());
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("Member"));
+    assertNotNull(jsonObject.get("Result"));
+
+    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
+    assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
+  }
+
+  @Test
+  public void testDestroyStoreWithThrowable() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    RuntimeException exception = new RuntimeException("Test Exception");
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, "Success");
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.destroyHdfstore(hdfsStoreName, null);
+
+    assertNotNull(result);
+    assertEquals(Status.ERROR, result.getStatus());
+    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("Member"));
+    assertNotNull(jsonObject.get("Result"));
+
+    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
+    assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
+        .get("Result")).get(0)));
+  }
+
+  @Test
+  public void testDestroyStoreWithCacheClosedException() throws JSONException {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMember";
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
+    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
+    final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
+    // Need to fix the return value of this function
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
+        will(returnValue(mockFunctionExecutor));
+        oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
+        will(returnValue(mockResultCollector));
+        oneOf(mockResultCollector).getResult();
+        will(returnValue(Arrays.asList(cliResult)));
+      }
+    });
+
+    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
+
+    final Result result = commands.destroyHdfstore(hdfsStoreName, null);
+
+    assertNotNull(result);
+
+    assertNotNull(result);
+    InfoResultData resultData = (InfoResultData)((CommandResult)result).getResultData();
+    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
+    assertNotNull(jsonObject.get("message"));
+
+    assertEquals("No matching hdfs stores found.", (((JSONArray)jsonObject.get("message")).get(0)));
+  }
+
+  public static HDFSStoreConfigHolder createMockHDFSStoreConfigHolder(Mockery mockContext, final String storeName, final String namenode,
+      final String homeDir, final int maxFileSize, final int fileRolloverInterval, final float blockCachesize,
+      final String clientConfigFile, final int batchSize, final int batchInterval, final String diskStoreName,
+      final boolean syncDiskwrite, final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent,
+      final boolean minorCompact, final boolean majorCompact, final int majorCompactionInterval,
+      final int majorCompactionThreads, final int minorCompactionThreads, final int purgeInterval) {
+
+    HDFSStoreConfigHolder mockHdfsStore = mockContext.mock(HDFSStoreConfigHolder.class, "HDFSStoreConfigHolder_"
+        + storeName);
+
+    createMockStore(mockContext, mockHdfsStore, storeName, namenode, homeDir, maxFileSize, fileRolloverInterval,
+        minorCompact, minorCompactionThreads, majorCompact, majorCompactionThreads, majorCompactionInterval,
+        purgeInterval, blockCachesize, clientConfigFile, batchSize,
+        batchInterval, diskStoreName, syncDiskwrite, dispatcherThreads, maxMemory, bufferPersistent);
+    return mockHdfsStore;
+
+  }
+
+  public static void createMockStore(Mockery mockContext, final HDFSStore mockStore, final String storeName,
+      final String namenode, final String homeDir, final int maxFileSize, final int fileRolloverInterval,
+      final boolean minorCompact, final int minorCompactionThreads, final boolean majorCompact,
+      final int majorCompactionThreads, final int majorCompactionInterval, final int purgeInterval,
+      final float blockCachesize, final String clientConfigFile, final int batchSize, final int batchInterval,
+      final String diskStoreName, final boolean syncDiskwrite, final int dispatcherThreads, final int maxMemory,
+      final boolean bufferPersistent) {
+
+    mockContext.checking(new Expectations() {
+      {
+        allowing(mockStore).getName();
+        will(returnValue(storeName));
+        allowing(mockStore).getNameNodeURL();
+        will(returnValue(namenode));
+        allowing(mockStore).getHomeDir();
+        will(returnValue(homeDir));
+        allowing(mockStore).getWriteOnlyFileRolloverSize();
+        will(returnValue(maxFileSize));
+        allowing(mockStore).getWriteOnlyFileRolloverInterval();
+        will(returnValue(fileRolloverInterval));
+        allowing(mockStore).getMinorCompaction();
+        will(returnValue(minorCompact));
+        allowing(mockStore).getMajorCompaction();
+        will(returnValue(majorCompact));
+        allowing(mockStore).getMajorCompactionInterval();
+        will(returnValue(majorCompactionInterval));
+        allowing(mockStore).getMajorCompactionThreads();
+        will(returnValue(majorCompactionThreads));
+        allowing(mockStore).getMinorCompactionThreads();
+        will(returnValue(minorCompactionThreads));
+        allowing(mockStore).getPurgeInterval();
+        will(returnValue(purgeInterval));
+        allowing(mockStore).getInputFileCountMax();
+        will(returnValue(10));
+        allowing(mockStore).getInputFileSizeMax();
+        will(returnValue(1024));
+        allowing(mockStore).getInputFileCountMin();
+        will(returnValue(2));
+        allowing(mockStore).getBlockCacheSize();
+        will(returnValue(blockCachesize));
+        allowing(mockStore).getHDFSClientConfigFile();
+        will(returnValue(clientConfigFile));
+
+        allowing(mockStore).getBatchSize();
+        will(returnValue(batchSize));
+        allowing(mockStore).getBatchInterval();
+        will(returnValue(batchInterval));
+        allowing(mockStore).getDiskStoreName();
+        will(returnValue(diskStoreName));
+        allowing(mockStore).getSynchronousDiskWrite();
+        will(returnValue(syncDiskwrite));
+        allowing(mockStore).getBufferPersistent();
+        will(returnValue(bufferPersistent));
+        allowing(mockStore).getDispatcherThreads();
+        will(returnValue(dispatcherThreads));
+        allowing(mockStore).getMaxMemory();
+        will(returnValue(maxMemory));
+      }
+    });
+  }
+
+  protected static class TestHDFSStoreCommands extends HDFSStoreCommands {
+
+    private final Cache cache;
+
+    private final DistributedMember distributedMember;
+
+    private final Execution functionExecutor;
+
+    public TestHDFSStoreCommands(final Cache cache, final DistributedMember distributedMember,
+        final Execution functionExecutor) {
+      assert cache != null: "The Cache cannot be null!";
+      this.cache = cache;
+      this.distributedMember = distributedMember;
+      this.functionExecutor = functionExecutor;
+    }
+
+    @Override
+    protected Cache getCache() {
+      return this.cache;
+    }
+
+    @Override
+    protected Set<DistributedMember> getMembers(final Cache cache) {
+      assertSame(getCache(), cache);
+      return Collections.singleton(this.distributedMember);
+    }
+
+    @Override
+    protected Execution getMembersFunctionExecutor(final Set<DistributedMember> members) {
+      Assert.assertNotNull(members);
+      return this.functionExecutor;
+    }
+
+    @Override
+    protected Set<DistributedMember> getNormalMembers(final Cache cache) {
+      assertSame(getCache(), cache);
+      return Collections.singleton(this.distributedMember);
+    }
+
+    @Override
+    protected Set<DistributedMember> getGroupMembers(String[] groups) {
+      Set<DistributedMember> dm = new HashSet<DistributedMember>();
+      dm.add(distributedMember);
+      return dm;
+
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
new file mode 100644
index 0000000..4a93e30
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
@@ -0,0 +1,324 @@
+/*=========================================================================
+ * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.logging.log4j.Logger;
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.ResultSender;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.InternalCache;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommandsJUnitTest;
+import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction.AlterHDFSStoreAttributes;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+/**
+ * The AlterHDFSStoreFunctionJUnitTest test suite class tests the contract and
+ * functionality of the AlterHDFSStoreFunction class. </p>
+ * 
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
+ * @see com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction
+ * @see org.jmock.Expectations
+ * @see org.jmock.Mockery
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ */
+@SuppressWarnings( { "unused" })
+@Category({IntegrationTest.class, HoplogTest.class})
+public class AlterHDFSStoreFunctionJUnitTest {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private Mockery mockContext;
+
+  @Before
+  public void setup() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+      }
+    };
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testExecute() throws Throwable {
+
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
+    final TestResultSender testResultSender = new TestResultSender();
+    final HDFSStoreImpl mockHdfsStore = CreateHDFSStoreFunctionJUnitTest.createMockHDFSStoreImpl(mockContext,
+        "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false,
+        false, true, 20, 20, 10, 100);
+	final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
+				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
+				100);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));        
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(alterHDFSStoreAttributes));
+        oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
+        will(returnValue(mockHdfsStore));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("Success", result.getMessage());
+
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testExecuteOnMemberHavingNoHDFSStore() throws Throwable {
+
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final TestResultSender testResultSender = new TestResultSender();
+    final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
+	final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
+				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
+				100);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
+        will(returnValue(null));       
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(alterHDFSStoreAttributes));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("Hdfs store not found on this member", result.getMessage());
+  }
+
+  @Test
+  public void testExecuteOnMemberWithNoCache() throws Throwable {
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final InternalCache mockCache = mockContext.mock(InternalCache.class, "Cache");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final TestResultSender testResultSender = new TestResultSender();
+	final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
+				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
+				100);
+
+    final AlterHDFSStoreFunction function = new TestAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity) {
+      @Override
+      protected Cache getCache() {
+        throw new CacheClosedException("Expected");
+      }
+    };
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(alterHDFSStoreAttributes));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals("", result.getMemberIdOrName());
+    assertNull(result.getMessage());
+  }
+
+  @Test
+  public void testExecuteHandleRuntimeException() throws Throwable {
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+    final TestResultSender testResultSender = new TestResultSender();
+    final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
+
+    final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
+				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
+				100);
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(alterHDFSStoreAttributes));
+        oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
+        will(throwException(new RuntimeException("expected")));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("expected", result.getThrowable().getMessage());
+
+  }
+
+  protected TestAlterHDFSStoreFunction createAlterHDFSStoreFunction(final Cache cache, DistributedMember member,
+      XmlEntity xml) {
+    return new TestAlterHDFSStoreFunction(cache, member, xml);
+  }
+
+  protected static class TestAlterHDFSStoreFunction extends AlterHDFSStoreFunction {
+    private static final long serialVersionUID = 1L;
+
+    private final Cache cache;
+
+    private final DistributedMember member;
+
+    private final XmlEntity xml;
+
+    public TestAlterHDFSStoreFunction(final Cache cache, DistributedMember member, XmlEntity xml) {
+      this.cache = cache;
+      this.member = member;
+      this.xml = xml;
+    }
+
+    @Override
+    protected Cache getCache() {
+      return this.cache;
+    }
+
+    @Override
+    protected DistributedMember getDistributedMember(Cache cache) {
+      return member;
+    }
+
+    @Override
+    protected XmlEntity getXMLEntity(String storeName) {
+      return xml;
+    }
+
+    @Override
+    protected HDFSStore alterHdfsStore(HDFSStore hdfsStore, AlterHDFSStoreAttributes alterAttributes) {
+      return hdfsStore;
+    }
+  }
+
+  protected static class TestResultSender implements ResultSender {
+
+    private final List<Object> results = new LinkedList<Object>();
+
+    private Throwable t;
+
+    protected List<Object> getResults() throws Throwable {
+      if (t != null) {
+        throw t;
+      }
+      return Collections.unmodifiableList(results);
+    }
+
+    public void lastResult(final Object lastResult) {
+      results.add(lastResult);
+    }
+
+    public void sendResult(final Object oneResult) {
+      results.add(oneResult);
+    }
+
+    public void sendException(final Throwable t) {
+      this.t = t;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
new file mode 100644
index 0000000..8a012b4
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
@@ -0,0 +1,307 @@
+/*=========================================================================
+ * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.logging.log4j.Logger;
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.ResultSender;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommandsJUnitTest;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * The AlterHDFSStoreFunctionJUnitTest test suite class tests the contract and
+ * functionality of the AlterHDFSStoreFunction class. </p>
+ * 
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
+ * @see com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction
+ * @see org.jmock.Expectations
+ * @see org.jmock.Mockery
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ */
+@SuppressWarnings( { "unused" })
+@Category({IntegrationTest.class, HoplogTest.class})
+public class CreateHDFSStoreFunctionJUnitTest {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private Mockery mockContext;
+
+  private static Properties props = new Properties();
+  
+  @Before
+  public void setup() {
+    
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+      }
+    };
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testExecute() throws Throwable {
+
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+    
+    final TestResultSender testResultSender = new TestResultSender();
+    
+    final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
+        1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
+    
+    final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(
+        mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0,
+        2048, true, true, true, 40, 40, 40, 800);
+    
+    final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(mockHdfsStoreConfigHolder));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("Success", result.getMessage());
+
+  }
+
+  
+  
+  @Test
+  public void testExecuteOnMemberWithNoCache() throws Throwable {
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+    
+    final TestResultSender testResultSender = new TestResultSender();
+    final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
+        1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
+    
+    final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
+        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
+        40, 40, 800);
+    
+    final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore) {
+      @Override
+      protected Cache getCache() {
+        throw new CacheClosedException("Expected");
+      }
+    };
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals("", result.getMemberIdOrName());
+    assertNull(result.getMessage());
+  }
+
+  
+  @Test
+  public void testExecuteHandleRuntimeException() throws Throwable {
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+    
+    final TestResultSender testResultSender = new TestResultSender();
+    final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
+        1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
+    
+    final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(
+        mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0,
+        2048, true, true, true, 40, 40, 40, 800);
+    
+    final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore) {
+      @Override
+      protected Cache getCache() {
+        throw new RuntimeException("expected");
+      }
+    };
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+
+    function.execute(mockFunctionContext);
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals("", result.getMemberIdOrName());
+    assertEquals("expected", result.getThrowable().getMessage());
+
+  }
+
+  public static HDFSStoreImpl createMockHDFSStoreImpl(Mockery mockContext, final String storeName, final String namenode, final String homeDir,
+      final int maxFileSize, final int fileRolloverInterval, final float blockCachesize, final String clientConfigFile,
+      final int batchSize, final int batchInterval, final String diskStoreName, final boolean syncDiskwrite,
+      final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent, final boolean minorCompact,
+      final boolean majorCompact, final int majorCompactionInterval, final int majorCompactionThreads,
+      final int minorCompactionThreads, final int purgeInterval) {
+
+    HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreImpl");
+
+    HDFSStoreCommandsJUnitTest.createMockStore(mockContext, mockHdfsStore, storeName, namenode, homeDir, maxFileSize,
+        fileRolloverInterval, minorCompact, minorCompactionThreads, majorCompact, majorCompactionThreads,
+        majorCompactionInterval, purgeInterval, blockCachesize, clientConfigFile, batchSize, batchInterval,
+        diskStoreName, syncDiskwrite, dispatcherThreads, maxMemory, bufferPersistent);
+    
+    return mockHdfsStore;
+  }
+
+  protected static class TestCreateHDFSStoreFunction extends CreateHDFSStoreFunction {
+    private static final long serialVersionUID = 1L;
+
+    private final Cache cache;
+
+    private final DistributedMember member;
+
+    private final XmlEntity xml;
+    
+    private final HDFSStoreImpl hdfsStore;
+
+    public TestCreateHDFSStoreFunction(Cache cache, DistributedMember member, XmlEntity xml , HDFSStoreImpl hdfsStore) {
+      this.cache = cache;
+      this.member = member;
+      this.xml = xml;
+      this.hdfsStore = hdfsStore;
+    }
+
+    @Override
+    protected Cache getCache() {
+      return this.cache;
+    }
+
+    @Override
+    protected DistributedMember getDistributedMember(Cache cache) {
+      return member;
+    }
+
+    @Override
+    protected XmlEntity getXMLEntity(String storeName) {
+      return xml;
+    }
+    
+    @Override
+    protected HDFSStoreImpl createHdfsStore(Cache cache, HDFSStoreConfigHolder configHolder){
+      return hdfsStore;
+    }
+  }
+
+  protected static class TestResultSender implements ResultSender {
+
+    private final List<Object> results = new LinkedList<Object>();
+
+    private Throwable t;
+
+    protected List<Object> getResults() throws Throwable {
+      if (t != null) {
+        throw t;
+      }
+      return Collections.unmodifiableList(results);
+    }
+
+    public void lastResult(final Object lastResult) {
+      results.add(lastResult);
+    }
+
+    public void sendResult(final Object oneResult) {
+      results.add(oneResult);
+    }
+
+    public void sendException(final Throwable t) {
+      this.t = t;
+    }
+  }
+
+}


[12/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java
new file mode 100644
index 0000000..dacc208
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/CursorIterator.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import java.util.Iterator;
+
+/**
+ * Provides an {@link Iterator} that allows access to the current iteration
+ * element.  The implementor must provide access to the current element
+ * as well as a means to move to the next element.
+ * 
+ *
+ * @param <E> the element type
+ */
+public interface CursorIterator<E> extends Iterator<E> {
+  /**
+   * Returns the element at the current position.
+   * @return the current element
+   */
+  E current();
+  
+  /**
+   * Provides an iteration cursor by wrapping an {@link Iterator}.
+   *
+   * @param <E> the element type
+   */
+  public static class WrappedIterator<E> implements CursorIterator<E> {
+    /** the underlying iterator */
+    private final Iterator<E> src;
+    
+    /** the current iteration element */
+    private E current;
+    
+    public WrappedIterator(Iterator<E> src) {
+      this.src = src;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return src.hasNext();
+    }
+
+    @Override
+    public E next() {
+      current = src.next();
+      return current;
+    }
+
+    @Override
+    public E current() {
+      return current;
+    }
+    
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+    
+    /**
+     * Returns the unwrapped interator.
+     * @return the iterator
+     */
+    public Iterator<E> unwrap() {
+      return src;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java
new file mode 100644
index 0000000..52470d0
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/DelegatingSerializedComparator.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedReader.SerializedComparator;
+
+/**
+ * Delegates object comparisons to one or more embedded comparators.
+ *  
+ */
+public interface DelegatingSerializedComparator extends SerializedComparator {
+  /**
+   * Injects the embedded comparators.
+   * @param comparators the comparators for delegation
+   */
+  void setComparators(SerializedComparator[] comparators);
+  
+  /**
+   * Returns the embedded comparators.
+   * @return the comparators
+   */
+  SerializedComparator[] getComparators();
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java
new file mode 100644
index 0000000..fdf3852
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/HFileStoreStatistics.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import static com.gemstone.gemfire.distributed.internal.DistributionStats.getStatTime;
+
+import com.gemstone.gemfire.StatisticDescriptor;
+import com.gemstone.gemfire.Statistics;
+import com.gemstone.gemfire.StatisticsFactory;
+import com.gemstone.gemfire.StatisticsType;
+import com.gemstone.gemfire.StatisticsTypeFactory;
+import com.gemstone.gemfire.internal.DummyStatisticsFactory;
+import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
+
+public class HFileStoreStatistics {
+  private final Statistics stats;
+  
+  private final CacheOperation blockCache;
+  
+  public HFileStoreStatistics(String typeName, String name) {
+    this(new DummyStatisticsFactory(), typeName, name);
+  }
+  
+  public HFileStoreStatistics(StatisticsFactory factory, String typeName, String name) {
+    StatisticsTypeFactory tf = StatisticsTypeFactoryImpl.singleton();
+    
+    StatisticDescriptor bcMisses = tf.createLongCounter("blockCacheMisses", "The total number of block cache misses", "misses");
+    StatisticDescriptor bcHits = tf.createLongCounter("blockCacheHits", "The total number of block cache hits", "hits");
+    StatisticDescriptor bcCached = tf.createLongGauge("blocksCached", "The current number of cached blocks", "blocks");
+    StatisticDescriptor bcBytesCached = tf.createLongGauge("blockBytesCached", "The current number of bytes cached", "bytes");
+    StatisticDescriptor bcBytesEvicted = tf.createLongCounter("blockBytesEvicted", "The total number of bytes cached", "bytes");
+
+    
+    StatisticsType type = tf.createType(typeName, 
+        "Statistics about structured I/O operations for a region", new StatisticDescriptor[] {
+        bcMisses, bcHits, bcCached, bcBytesCached, bcBytesEvicted
+    });
+
+    blockCache = new CacheOperation(bcMisses.getId(), bcHits.getId(), bcCached.getId(), bcBytesCached.getId(), bcBytesEvicted.getId());
+
+
+    stats = factory.createAtomicStatistics(type, name);
+  }
+
+  public void close() {
+    stats.close();
+  }
+  
+  public Statistics getStats() {
+    return stats;
+  }
+  
+  public CacheOperation getBlockCache() {
+    return blockCache;
+  }
+  
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("blockCache = {").append(blockCache).append("}\n");
+    
+    return sb.toString();
+  }
+  
+  public class TimedOperation {
+    protected final int countId;
+    protected final int inProgressId;
+    protected final int timeId;
+    private final int errorsId;
+    
+    public TimedOperation(int count, int inProgress, int time, int errors) {
+      this.countId = count;
+      this.inProgressId = inProgress;
+      this.timeId = time;
+      this.errorsId = errors;
+    }
+    
+    public long begin() {
+      stats.incLong(inProgressId, 1);
+      return getStatTime();
+    }
+    
+    public long end(long start) {
+      stats.incLong(inProgressId, -1);
+      stats.incLong(countId, 1);
+      stats.incLong(timeId, getStatTime() - start);
+      return getStatTime();
+    }
+    
+    public void error(long start) {
+      end(start);
+      stats.incLong(errorsId, 1);
+    }
+    
+    public long getCount() {
+      return stats.getLong(countId);
+    }
+    
+    public long getInProgress() {
+      return stats.getLong(inProgressId);
+    }
+    
+    public long getTime() {
+      return stats.getLong(timeId);
+    }
+    
+    public long getErrors() {
+      return stats.getLong(errorsId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("count=").append(getCount());
+      sb.append(";inProgress=").append(getInProgress());
+      sb.append(";errors=").append(getErrors());
+      sb.append(";time=").append(getTime());
+      
+      return sb.toString();
+    }
+  }
+  
+  public class CacheOperation {
+    private final int missesId;
+    private final int hitsId;
+    private final int cachedId;
+    private final int bytesCachedId;
+    private final int bytesEvictedId;
+    
+    public CacheOperation(int missesId, int hitsId, int cachedId, 
+        int bytesCachedId, int bytesEvictedId) {
+      this.missesId = missesId;
+      this.hitsId = hitsId;
+      this.cachedId = cachedId;
+      this.bytesCachedId = bytesCachedId;
+      this.bytesEvictedId = bytesEvictedId;
+    }
+    
+    public void store(long bytes) {
+      stats.incLong(cachedId, 1);
+      stats.incLong(bytesCachedId, bytes);
+    }
+    
+    public void evict(long bytes) {
+      stats.incLong(cachedId, -1);
+      stats.incLong(bytesCachedId, -bytes);
+      stats.incLong(bytesEvictedId, bytes);
+    }
+    
+    public void hit() {
+      stats.incLong(hitsId, 1);
+    }
+    
+    public void miss() {
+      stats.incLong(missesId, 1);
+    }
+    
+    public long getMisses() {
+      return stats.getLong(missesId);
+    }
+    
+    public long getHits() {
+      return stats.getLong(hitsId);
+    }
+    
+    public long getCached() {
+      return stats.getLong(cachedId);
+    }
+    
+    public long getBytesCached() {
+      return stats.getLong(bytesCachedId);
+    }
+    
+    public long getBytesEvicted() {
+      return stats.getLong(bytesEvictedId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("misses=").append(getMisses());
+      sb.append(";hits=").append(getHits());
+      sb.append(";cached=").append(getCached());
+      sb.append(";bytesCached=").append(getBytesCached());
+      sb.append(";bytesEvicted=").append(getBytesEvicted());
+      
+      return sb.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java
new file mode 100644
index 0000000..df7e1ac
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/KeyValueIterator.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import java.util.Iterator;
+
+/**
+ * Provides an {@link Iterator} view over a collection of keys and values.  The
+ * implementor must provide access to the current key/value as well as a means
+ * to move to the next pair.
+ * 
+ *
+ * @param <K> the key type
+ * @param <V> the value type
+ */
+public interface KeyValueIterator<K, V> extends Iterator<K> {
+  /**
+   * Returns the key at the current position.
+   * @return the key
+   */
+  public K key();
+  
+  /**
+   * Returns the value at the current position.
+   * @return the value
+   */
+  public abstract V value();
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java
new file mode 100644
index 0000000..35baafb
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedOplogStatistics.java
@@ -0,0 +1,505 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import static com.gemstone.gemfire.distributed.internal.DistributionStats.getStatTime;
+
+import com.gemstone.gemfire.StatisticDescriptor;
+import com.gemstone.gemfire.Statistics;
+import com.gemstone.gemfire.StatisticsFactory;
+import com.gemstone.gemfire.StatisticsType;
+import com.gemstone.gemfire.StatisticsTypeFactory;
+import com.gemstone.gemfire.internal.DummyStatisticsFactory;
+import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
+
+public class SortedOplogStatistics {
+  private final Statistics stats;
+  
+  private final IOOperation read;
+  private final ScanOperation scan;
+  private final IOOperation write;
+  private final IOOperation put;
+  private final IOOperation flush;
+  private final IOOperation minorCompaction;
+  private final IOOperation majorCompaction;
+  private final BloomOperation bloom;
+  private final TimedOperation clear;
+  private final TimedOperation destroy;
+  
+  private final IOOperation blockRead;
+  private final CacheOperation blockCache;
+  
+  private final int activeFilesId;
+  private final int inactiveFilesId;
+  private final int activeReadersId;
+  
+  private final int storeUsageBytesId;
+
+  public SortedOplogStatistics(String typeName, String name) {
+    this(new DummyStatisticsFactory(), typeName, name);
+  }
+  
+  public SortedOplogStatistics(StatisticsFactory factory, String typeName, String name) {
+    StatisticsTypeFactory tf = StatisticsTypeFactoryImpl.singleton();
+    
+    StatisticDescriptor readCount = tf.createLongCounter("reads", "The total number of read operations", "ops");
+    StatisticDescriptor readInProgress = tf.createLongGauge("readsInProgress", "The number of read operations in progress", "ops");
+    StatisticDescriptor readTime = tf.createLongCounter("readTime", "The total time spent reading from disk", "nanoseconds");
+    StatisticDescriptor readBytes = tf.createLongCounter("readBytes", "The total number of bytes read from disk", "bytes");
+    StatisticDescriptor readErrors = tf.createLongCounter("readErrors", "The total number of read errors", "errors");
+
+    StatisticDescriptor scanCount = tf.createLongCounter("scans", "The total number of scan operations", "ops");
+    StatisticDescriptor scanInProgress = tf.createLongGauge("scansInProgress", "The number of scan operations in progress", "ops");
+    StatisticDescriptor scanTime = tf.createLongCounter("scanTime", "The total time scanner was operational", "nanoseconds");
+    StatisticDescriptor scanBytes = tf.createLongCounter("scanBytes", "The total number of bytes scanned from disk", "bytes");
+    StatisticDescriptor scanErrors = tf.createLongCounter("scanErrors", "The total number of scan errors", "errors");
+    StatisticDescriptor scanIterations = tf.createLongCounter("scanIterations", "The total number of scan iterations", "ops");
+    StatisticDescriptor scanIterationTime = tf.createLongCounter("scanIterationTime", "The total time spent scanning from persistence layer", "nanoseconds");
+
+    StatisticDescriptor writeCount = tf.createLongCounter("writes", "The total number of write operations", "ops");
+    StatisticDescriptor writeInProgress = tf.createLongGauge("writesInProgress", "The number of write operations in progress", "ops");
+    StatisticDescriptor writeTime = tf.createLongCounter("writeTime", "The total time spent writing to disk", "nanoseconds");
+    StatisticDescriptor writeBytes = tf.createLongCounter("writeBytes", "The total number of bytes written to disk", "bytes");
+    StatisticDescriptor writeErrors = tf.createLongCounter("writeErrors", "The total number of write errors", "errors");
+
+    StatisticDescriptor putCount = tf.createLongCounter("puts", "The total number of put operations", "ops");
+    StatisticDescriptor putInProgress = tf.createLongGauge("putsInProgress", "The number of put operations in progress", "ops");
+    StatisticDescriptor putTime = tf.createLongCounter("putTime", "The total time spent in put calls", "nanoseconds");
+    StatisticDescriptor putBytes = tf.createLongCounter("putBytes", "The total number of bytes put", "bytes");
+    StatisticDescriptor putErrors = tf.createLongCounter("putErrors", "The total number of put errors", "errors");
+
+    StatisticDescriptor flushCount = tf.createLongCounter("flushes", "The total number of flush operations", "ops");
+    StatisticDescriptor flushInProgress = tf.createLongGauge("flushesInProgress", "The number of flush operations in progress", "ops");
+    StatisticDescriptor flushTime = tf.createLongCounter("flushTime", "The total time spent flushing to disk", "nanoseconds");
+    StatisticDescriptor flushBytes = tf.createLongCounter("flushBytes", "The total number of bytes flushed to disk", "bytes");
+    StatisticDescriptor flushErrors = tf.createLongCounter("flushErrors", "The total number of flush errors", "errors");
+
+    StatisticDescriptor minorCompactionCount = tf.createLongCounter("minorCompactions", "The total number of minor compaction operations", "ops");
+    StatisticDescriptor minorCompactionInProgress = tf.createLongGauge("minorCompactionsInProgress", "The number of minor compaction operations in progress", "ops");
+    StatisticDescriptor minorCompactionTime = tf.createLongCounter("minorCompactionTime", "The total time spent in minor compactions", "nanoseconds");
+    StatisticDescriptor minorCompactionBytes = tf.createLongCounter("minorCompactionBytes", "The total number of bytes collected during minor compactions", "bytes");
+    StatisticDescriptor minorCompactionErrors = tf.createLongCounter("minorCompactionErrors", "The total number of minor compaction errors", "errors");
+
+    StatisticDescriptor majorCompactionCount = tf.createLongCounter("majorCompactions", "The total number of major compaction operations", "ops");
+    StatisticDescriptor majorCompactionInProgress = tf.createLongGauge("majorCompactionsInProgress", "The number of major compaction operations in progress", "ops");
+    StatisticDescriptor majorCompactionTime = tf.createLongCounter("majorCompactionTime", "The total time spent in major compactions", "nanoseconds");
+    StatisticDescriptor majorCompactionBytes = tf.createLongCounter("majorCompactionBytes", "The total number of bytes collected during major compactions", "bytes");
+    StatisticDescriptor majorCompactionErrors = tf.createLongCounter("majorCompactionErrors", "The total number of major compaction errors", "errors");
+
+    StatisticDescriptor bloomCount = tf.createLongCounter("bloomFilterCheck", "The total number of Bloom Filter checks", "ops");
+    StatisticDescriptor bloomInProgress = tf.createLongGauge("bloomFilterChecksInProgress", "The number of Bloom Filter checks in progress", "ops");
+    StatisticDescriptor bloomTime = tf.createLongCounter("bloomFilterCheckTime", "The total time spent checking the Bloom Filter", "nanoseconds");
+    StatisticDescriptor bloomErrors = tf.createLongCounter("bloomFilterErrors", "The total number of Bloom Filter errors", "errors");
+    StatisticDescriptor bloomFalsePositive = tf.createLongCounter("bloomFilterFalsePositives", "The total number of Bloom Filter false positives", "false positives");
+
+    StatisticDescriptor clearCount = tf.createLongCounter("clears", "The total number of clear operations", "ops");
+    StatisticDescriptor clearInProgress = tf.createLongGauge("clearsInProgress", "The number of clear operations in progress", "ops");
+    StatisticDescriptor clearTime = tf.createLongCounter("clearTime", "The total time spent in clear operations", "nanoseconds");
+    StatisticDescriptor clearErrors = tf.createLongGauge("clearErrors", "The total number of clear errors", "errors");
+
+    StatisticDescriptor destroyCount = tf.createLongCounter("destroys", "The total number of destroy operations", "ops");
+    StatisticDescriptor destroyInProgress = tf.createLongGauge("destroysInProgress", "The number of destroy operations in progress", "ops");
+    StatisticDescriptor destroyTime = tf.createLongCounter("destroyTime", "The total time spent in destroy operations", "nanoseconds");
+    StatisticDescriptor destroyErrors = tf.createLongGauge("destroyErrors", "The total number of destroy errors", "errors");
+
+    StatisticDescriptor brCount = tf.createLongCounter("blockReads", "The total number of block read operations", "ops");
+    StatisticDescriptor brInProgress = tf.createLongGauge("blockReadsInProgress", "The number of block read operations in progress", "ops");
+    StatisticDescriptor brTime = tf.createLongCounter("blockReadTime", "The total time spent reading blocks from disk", "nanoseconds");
+    StatisticDescriptor brBytes = tf.createLongCounter("blockReadBytes", "The total number of block bytes read from disk", "bytes");
+    StatisticDescriptor brErrors = tf.createLongCounter("blockReadErrors", "The total number of block read errors", "errors");
+
+    StatisticDescriptor bcMisses = tf.createLongCounter("blockCacheMisses", "The total number of block cache misses", "misses");
+    StatisticDescriptor bcHits = tf.createLongCounter("blockCacheHits", "The total number of block cache hits", "hits");
+    StatisticDescriptor bcCached = tf.createLongGauge("blocksCached", "The current number of cached blocks", "blocks");
+    StatisticDescriptor bcBytesCached = tf.createLongGauge("blockBytesCached", "The current number of bytes cached", "bytes");
+    StatisticDescriptor bcBytesEvicted = tf.createLongCounter("blockBytesEvicted", "The total number of bytes cached", "bytes");
+
+    StatisticDescriptor activeFileCount = tf.createLongGauge("activeFileCount", "The total number of active files", "files");
+    StatisticDescriptor inactiveFileCount = tf.createLongGauge("inactiveFileCount", "The total number of inactive files", "files");
+    StatisticDescriptor activeReaderCount = tf.createLongGauge("activeReaderCount", "The total number of active file readers", "files");
+    
+    StatisticDescriptor storeUsageBytes = tf.createLongGauge("storeUsageBytes", "The total volume occupied on persistent store", "bytes");
+    
+    StatisticsType type = tf.createType(typeName, 
+        "Statistics about structured I/O operations for a region", new StatisticDescriptor[] {
+        readCount, readInProgress, readTime, readBytes, readErrors,
+        scanCount, scanInProgress, scanTime, scanBytes, scanErrors, scanIterations, scanIterationTime,
+        writeCount, writeInProgress, writeTime, writeBytes, writeErrors,
+        putCount, putInProgress, putTime, putBytes, putErrors,
+        flushCount, flushInProgress, flushTime, flushBytes, flushErrors,
+        minorCompactionCount, minorCompactionInProgress, minorCompactionTime, minorCompactionBytes, minorCompactionErrors,
+        majorCompactionCount, majorCompactionInProgress, majorCompactionTime, majorCompactionBytes, majorCompactionErrors,
+        bloomCount, bloomInProgress, bloomTime, bloomErrors, bloomFalsePositive,
+        clearCount, clearInProgress, clearTime, clearErrors,
+        destroyCount, destroyInProgress, destroyTime, destroyErrors,
+        brCount, brInProgress, brTime, brBytes, brErrors,
+        bcMisses, bcHits, bcCached, bcBytesCached, bcBytesEvicted,
+        activeFileCount, inactiveFileCount, activeReaderCount, storeUsageBytes
+    });
+
+    read = new IOOperation(readCount.getId(), readInProgress.getId(), readTime.getId(), readBytes.getId(), readErrors.getId());
+    scan = new ScanOperation(scanCount.getId(), scanInProgress.getId(), scanTime.getId(), scanBytes.getId(), scanErrors.getId(), scanIterations.getId(), scanIterationTime.getId());    
+    write = new IOOperation(writeCount.getId(), writeInProgress.getId(), writeTime.getId(), writeBytes.getId(), writeErrors.getId());
+    put = new IOOperation(putCount.getId(), putInProgress.getId(), putTime.getId(), putBytes.getId(), putErrors.getId());
+    flush = new IOOperation(flushCount.getId(), flushInProgress.getId(), flushTime.getId(), flushBytes.getId(), flushErrors.getId());
+    minorCompaction = new IOOperation(minorCompactionCount.getId(), minorCompactionInProgress.getId(), minorCompactionTime.getId(), minorCompactionBytes.getId(), minorCompactionErrors.getId());
+    majorCompaction = new IOOperation(majorCompactionCount.getId(), majorCompactionInProgress.getId(), majorCompactionTime.getId(), majorCompactionBytes.getId(), majorCompactionErrors.getId());
+    bloom = new BloomOperation(bloomCount.getId(), bloomInProgress.getId(), bloomTime.getId(), bloomErrors.getId(), bloomFalsePositive.getId());
+    clear = new TimedOperation(clearCount.getId(), clearInProgress.getId(), clearTime.getId(), clearErrors.getId());
+    destroy = new TimedOperation(destroyCount.getId(), destroyInProgress.getId(), destroyTime.getId(), destroyErrors.getId());
+    
+    blockRead = new IOOperation(brCount.getId(), brInProgress.getId(), brTime.getId(), brBytes.getId(), brErrors.getId());
+    blockCache = new CacheOperation(bcMisses.getId(), bcHits.getId(), bcCached.getId(), bcBytesCached.getId(), bcBytesEvicted.getId());
+
+    activeFilesId = activeFileCount.getId();
+    inactiveFilesId = inactiveFileCount.getId();
+    activeReadersId = activeReaderCount.getId();
+    storeUsageBytesId = storeUsageBytes.getId();
+
+    stats = factory.createAtomicStatistics(type, name);
+  }
+
+  public void close() {
+    stats.close();
+  }
+  
+  public Statistics getStats() {
+    return stats;
+  }
+  
+  public IOOperation getRead() {
+    return read;
+  }
+  
+  public ScanOperation getScan() {
+    return scan;
+  }
+  
+  public IOOperation getWrite() {
+    return write;
+  }
+  
+  public IOOperation getPut() {
+    return put;
+  }
+  
+  public IOOperation getFlush() {
+    return flush;
+  }
+  
+  public IOOperation getMinorCompaction() {
+    return minorCompaction;
+  }
+  
+  public IOOperation getMajorCompaction() {
+    return majorCompaction;
+  }
+  
+  public BloomOperation getBloom() {
+    return bloom;
+  }
+  
+  public TimedOperation getClear() {
+    return clear;
+  }
+  
+  public TimedOperation getDestroy() {
+    return destroy;
+  }
+
+  public IOOperation getBlockRead() {
+    return blockRead;
+  }
+  
+  public CacheOperation getBlockCache() {
+    return blockCache;
+  }
+  
+  public long getActiveFileCount() {
+    return stats.getLong(activeFilesId);
+  }
+  
+  public long getInactiveFileCount() {
+    return stats.getLong(inactiveFilesId);
+  }
+  
+  public long getActiveReaderCount() {
+    return stats.getLong(activeReadersId);
+  }
+  
+  public void incActiveFiles(int amt) {
+    stats.incLong(activeFilesId, amt);
+    assert stats.getLong(activeFilesId) >= 0;
+  }
+  
+  public void incInactiveFiles(int amt) {
+    stats.incLong(inactiveFilesId, amt);
+    assert stats.getLong(inactiveFilesId) >= 0;
+  }
+  
+  public void incActiveReaders(int amt) {
+    stats.incLong(activeReadersId, amt);
+    assert stats.getLong(activeReadersId) >= 0;
+  }
+  
+  public long getStoreUsageBytes() {
+    return stats.getLong(storeUsageBytesId);
+  }
+  
+  public void incStoreUsageBytes(long amt) {
+    stats.incLong(storeUsageBytesId, amt);
+    assert stats.getLong(storeUsageBytesId) >= 0;
+  }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("read = {").append(read).append("}\n");
+    sb.append("scan = {").append(scan).append("}\n");
+    sb.append("write = {").append(write).append("}\n");
+    sb.append("put = {").append(put).append("}\n");
+    sb.append("flush = {").append(flush).append("}\n");
+    sb.append("minorCompaction = {").append(minorCompaction).append("}\n");
+    sb.append("majorCompaction = {").append(majorCompaction).append("}\n");
+    sb.append("bloom = {").append(bloom).append("}\n");
+    sb.append("clear = {").append(clear).append("}\n");
+    sb.append("destroy = {").append(destroy).append("}\n");
+    sb.append("blockRead = {").append(blockRead).append("}\n");
+    sb.append("blockCache = {").append(blockCache).append("}\n");
+    sb.append("activeFiles = ").append(stats.getLong(activeFilesId)).append("\n");
+    sb.append("inactiveFiles = ").append(stats.getLong(inactiveFilesId)).append("\n");
+    sb.append("activeReaders = ").append(stats.getLong(activeReadersId)).append("\n");
+    sb.append("storeUsageBytes = ").append(stats.getLong(storeUsageBytesId)).append("\n");
+    
+    return sb.toString();
+  }
+  
+  public class TimedOperation {
+    protected final int countId;
+    protected final int inProgressId;
+    protected final int timeId;
+    private final int errorsId;
+    
+    public TimedOperation(int count, int inProgress, int time, int errors) {
+      this.countId = count;
+      this.inProgressId = inProgress;
+      this.timeId = time;
+      this.errorsId = errors;
+    }
+    
+    public long begin() {
+      stats.incLong(inProgressId, 1);
+      return getStatTime();
+    }
+    
+    public long end(long start) {
+      stats.incLong(inProgressId, -1);
+      stats.incLong(countId, 1);
+      stats.incLong(timeId, getStatTime() - start);
+      return getStatTime();
+    }
+    
+    public void error(long start) {
+      end(start);
+      stats.incLong(errorsId, 1);
+    }
+    
+    public long getCount() {
+      return stats.getLong(countId);
+    }
+    
+    public long getInProgress() {
+      return stats.getLong(inProgressId);
+    }
+    
+    public long getTime() {
+      return stats.getLong(timeId);
+    }
+    
+    public long getErrors() {
+      return stats.getLong(errorsId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("count=").append(getCount());
+      sb.append(";inProgress=").append(getInProgress());
+      sb.append(";errors=").append(getErrors());
+      sb.append(";time=").append(getTime());
+      
+      return sb.toString();
+    }
+  }
+  
+  public class IOOperation extends TimedOperation {
+    protected final int bytesId;
+    
+    public IOOperation(int count, int inProgress, int time, int bytes, int errors) {
+      super(count, inProgress, time, errors);
+      this.bytesId = bytes;
+    }
+    
+    public long end(long bytes, long start) {
+      stats.incLong(bytesId, bytes);
+      return super.end(start);
+    }
+    
+    public long getBytes() {
+      return stats.getLong(bytesId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder(super.toString());
+      sb.append(";bytes=").append(getBytes());
+      
+      return sb.toString();
+    }
+  }
+
+  public class ScanOperation extends IOOperation {
+    private final int iterationsId;
+    private final int iterationTimeId;
+
+    public ScanOperation(int count, int inProgress, int time, int bytes, int errors, int iterCount, int iterTime) {
+      super(count, inProgress, time, bytes, errors);
+      iterationsId = iterCount;
+      iterationTimeId = iterTime;
+    }
+    
+    public long beginIteration() {
+      return getStatTime();
+    }
+    
+    public void endIteration(long bytes, long start){
+      stats.incLong(iterationsId, 1);
+      stats.incLong(bytesId, bytes);
+      stats.incLong(iterationTimeId, getStatTime() - start);
+    }
+    
+    public long getIterations() {
+      return stats.getLong(iterationsId);
+    }
+    
+    public long getIterationTime() {
+      return stats.getLong(iterationTimeId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder(super.toString());
+      sb.append(";iterations=").append(getIterations());
+      sb.append(";iterationTime=").append(getIterationTime());
+      
+      return sb.toString();
+    }
+  }
+
+  public class BloomOperation extends TimedOperation {
+    private final int falsePositiveId;
+    
+    public BloomOperation(int count, int inProgress, int time, int errors, int falsePositive) {
+      super(count, inProgress, time, errors);
+      this.falsePositiveId = falsePositive;
+    }
+    
+    public void falsePositive() {
+      stats.incLong(falsePositiveId, 1);
+    }
+    
+    public long getFalsePositives() {
+      return stats.getLong(falsePositiveId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder(super.toString());
+      sb.append(";falsePositives=").append(getFalsePositives());
+      
+      return sb.toString();
+    }
+  }
+  
+  public class CacheOperation {
+    private final int missesId;
+    private final int hitsId;
+    private final int cachedId;
+    private final int bytesCachedId;
+    private final int bytesEvictedId;
+    
+    public CacheOperation(int missesId, int hitsId, int cachedId, 
+        int bytesCachedId, int bytesEvictedId) {
+      this.missesId = missesId;
+      this.hitsId = hitsId;
+      this.cachedId = cachedId;
+      this.bytesCachedId = bytesCachedId;
+      this.bytesEvictedId = bytesEvictedId;
+    }
+    
+    public void store(long bytes) {
+      stats.incLong(cachedId, 1);
+      stats.incLong(bytesCachedId, bytes);
+    }
+    
+    public void evict(long bytes) {
+      stats.incLong(cachedId, -1);
+      stats.incLong(bytesCachedId, -bytes);
+      stats.incLong(bytesEvictedId, bytes);
+    }
+    
+    public void hit() {
+      stats.incLong(hitsId, 1);
+    }
+    
+    public void miss() {
+      stats.incLong(missesId, 1);
+    }
+    
+    public long getMisses() {
+      return stats.getLong(missesId);
+    }
+    
+    public long getHits() {
+      return stats.getLong(hitsId);
+    }
+    
+    public long getCached() {
+      return stats.getLong(cachedId);
+    }
+    
+    public long getBytesCached() {
+      return stats.getLong(bytesCachedId);
+    }
+    
+    public long getBytesEvicted() {
+      return stats.getLong(bytesEvictedId);
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("misses=").append(getMisses());
+      sb.append(";hits=").append(getHits());
+      sb.append(";cached=").append(getCached());
+      sb.append(";bytesCached=").append(getBytesCached());
+      sb.append(";bytesEvicted=").append(getBytesEvicted());
+      
+      return sb.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java
new file mode 100644
index 0000000..1042e22
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/SortedReader.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+import org.apache.hadoop.io.RawComparator;
+
+/**
+ * Defines a means to read sorted data including performing range scans.
+ * 
+ * @param <V> type of value returned by the sorted reader
+ * 
+ */
+public interface SortedReader<V> extends Closeable {
+  /**
+   * Defines the names of additional data that may be associated with a sorted
+   * reader.
+   */
+  public enum Metadata {
+    /** identifies the disk store associated with the soplog, optional */
+    DISK_STORE,
+    
+    /** identifies the RVV data, optional */
+    RVV;
+
+    /**
+     * Converts the metadata name to bytes.
+     * @return the bytes
+     */
+    public byte[] bytes() {
+      return ("gemfire." + name()).getBytes();
+    }
+  }
+  
+  /**
+   * Filters data based on metadata values.
+   */
+  public interface MetadataFilter {
+    /**
+     * Returns the name this filter acts upon.
+     * @return the name
+     */
+    Metadata getName();
+    
+    /**
+     * Returns true if the metadata value passes the filter.
+     * @param value the value to check; may be null if the metadata value does
+     *              not exist or has not been assigned yet
+     * @return true if accepted
+     */
+    boolean accept(byte[] value);
+  }
+  
+  /**
+   * Allows comparisons between serialized objects.
+   */
+  public interface SerializedComparator extends RawComparator<byte[]> {
+  }
+  
+  /**
+   * Allows sorted iteration through a set of keys and values.
+   */
+  public interface SortedIterator<V> extends KeyValueIterator<ByteBuffer, V> {
+    /**
+     * Closes the iterator and frees any retained resources.
+     */
+    public abstract void close();
+  }
+
+  /**
+   * Defines the statistics available on a sorted file.
+   */
+  public interface SortedStatistics {
+    /**
+     * Returns the number of keys in the file.
+     * @return the key count
+     */
+    long keyCount();
+    
+    /**
+     * Returns the first key in the file.
+     * @return the first key
+     */
+    byte[] firstKey();
+    
+    /**
+     * Returns the last key in the file.
+     * @return the last key
+     */
+    byte[] lastKey();
+    
+    /**
+     * Returns the average key size in bytes.
+     * @return the average key size
+     */
+    double avgKeySize();
+    
+    /**
+     * Returns the average value size in bytes.
+     * @return the average value size
+     */
+    double avgValueSize();
+    
+    /**
+     * Frees any resources held by for statistics generation.
+     */
+    void close();
+  }
+  
+  /**
+   * Returns true if the bloom filter might contain the supplied key.  The 
+   * nature of the bloom filter is such that false positives are allowed, but
+   * false negatives cannot occur.
+   * 
+   * @param key the key to test
+   * @return true if the key might be present
+   * @throws IOException read error
+   */
+  boolean mightContain(byte[] key) throws IOException;
+
+  /**
+   * Returns the value associated with the given key.
+   * 
+   * @param key the key
+   * @return the value, or null if the key is not found
+   * @throws IOException read error
+   */
+  V read(byte[] key) throws IOException;
+
+  /**
+   * Iterates from the first key in the file to the requested key.
+   * @param to the ending key
+   * @param inclusive true if the ending key is included in the iteration
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> head(byte[] to, boolean inclusive) throws IOException;
+  
+  /**
+   * Iterates from the requested key to the last key in the file.
+   * @param from the starting key
+   * @param inclusive true if the starting key should be included in the iteration
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> tail(byte[] from, boolean inclusive) throws IOException;
+
+  /**
+   * Iterators over the entire contents of the sorted file.
+   * 
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> scan() throws IOException;
+  
+  /**
+   * Scans the available keys and allows iteration over the interval [from, to) 
+   * where the starting key is included and the ending key is excluded from 
+   * the results.
+   * 
+   * @param from the start key
+   * @param to the end key
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> scan(byte[] from, byte[] to) throws IOException;
+
+  /**
+   * Scans the keys and returns an iterator over the interval [equalTo, equalTo].
+   * 
+   * @param equalTo the key to match
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> scan(byte[] equalTo) throws IOException;
+  
+  /**
+   * Scans the keys and allows iteration between the given keys.
+   * 
+   * @param from the start key
+   * @param fromInclusive true if the start key is included in the scan
+   * @param to the end key
+   * @param toInclusive true if the end key is included in the scan
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> scan(byte[] from, boolean fromInclusive, 
+      byte[] to, boolean toInclusive) throws IOException;
+
+  /**
+   * Scans the keys and allows iteration between the given keys after applying
+   * the metdata filter and the order flag.  These parameters override values
+   * configured using <code>withAscending</code> or <code>withFilter</code>.
+   * 
+   * @param from the start key
+   * @param fromInclusive true if the start key is included in the scan
+   * @param to the end key
+   * @param toInclusive true if the end key is included in the scan
+   * @param ascending true if ascending
+   * @param filter filters data based on metadata values
+   * @return the sorted iterator
+   * @throws IOException scan error
+   */
+  SortedIterator<V> scan(
+      byte[] from, boolean fromInclusive, 
+      byte[] to, boolean toInclusive,
+      boolean ascending,
+      MetadataFilter filter) throws IOException;
+
+  /**
+   * Changes the iteration order of subsequent operations.
+   * 
+   * @param ascending true if ascending order (default)
+   * @return the reader
+   */
+  SortedReader<V> withAscending(boolean ascending);
+  
+  /**
+   * Applies a metadata filter to subsequent operations.
+   * 
+   * @param filter the filter to apply
+   * @return the reader
+   */
+  SortedReader<V> withFilter(MetadataFilter filter);
+  
+  /**
+   * Returns the comparator used for sorting keys.
+   * @return the comparator
+   */
+  SerializedComparator getComparator();
+  
+  /**
+   * Returns the statistics regarding the keys present in the sorted file.
+   * @return the statistics
+   * @throws IOException unable retrieve statistics
+   */
+  SortedStatistics getStatistics() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java
new file mode 100644
index 0000000..2934f07
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/soplog/TrackedReference.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache.persistence.soplog;
+
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Tracks the usage of a reference.
+ * 
+ *
+ * @param <T> the reference type
+ */
+public final class TrackedReference<T> {
+  /** the referent */
+  private final T ref;
+  
+  /** the number of uses */
+  private final AtomicInteger uses;
+  
+  /** list of users using this reference. Mainly for debugging */
+  final ConcurrentHashMap<String, AtomicInteger> users;
+
+  /**
+   * Decrements the use count of each reference.
+   * @param refs the references to decrement
+   */
+  public static <T> void decrementAll(Iterable<TrackedReference<T>> refs) {
+    for (TrackedReference<?> tr : refs) {
+      tr.decrement();
+    }
+  }
+  
+  public TrackedReference(T ref) {
+    this.ref = ref;
+    uses = new AtomicInteger(0);
+    users = new ConcurrentHashMap<String, AtomicInteger>();
+  }
+  
+  /**
+   * Returns the referent.
+   * @return the referent
+   */
+  public final T get() {
+    return ref;
+  }
+  
+  /**
+   * Returns the current count.
+   * @return the current uses
+   */
+  public int uses() {
+    return uses.get();
+  }
+  
+  /**
+   * Returns true if the reference is in use.
+   * @return true if used
+   */
+  public boolean inUse() {
+    return uses() > 0;
+  }
+  
+  /**
+   * Increments the use count and returns the reference.
+   * @return the reference
+   */
+  public T getAndIncrement() {
+    increment();
+    return ref;
+  }
+  
+  /**
+   * Increments the use counter and returns the current count.
+   * @return the current uses
+   */
+  public int increment() {
+    return increment(null);
+  }
+  
+  /**
+   * Increments the use counter and returns the current count.
+   * @return the current uses
+   */
+  public int increment(String user) {
+    int val = uses.incrementAndGet();
+    if (user != null) {
+      AtomicInteger counter = users.get(user);
+      if (counter == null) {
+        counter = new AtomicInteger();
+        users.putIfAbsent(user, counter);
+        counter = users.get(user);
+      }
+      counter.incrementAndGet();
+    }
+    assert val >= 1;
+    
+    return val;
+  }
+  
+  /**
+   * Decrements the use counter and returns the current count.
+   * @return the current uses
+   */
+  public int decrement() {
+    return decrement(null);
+  }
+  
+  /**
+   * Decrements the use counter and returns the current count.
+   * @return the current uses
+   */
+  public int decrement(String user) {
+    int val = uses.decrementAndGet();
+    assert val >= 0;
+    if (user != null) {
+      AtomicInteger counter = users.get(user);
+      if (counter != null) {
+        counter.decrementAndGet();
+      }
+    }
+    
+    return val;
+  }
+  
+  @Override
+  public String toString() {
+    if (users != null) {
+      StringBuffer sb = new StringBuffer();
+      sb.append(ref.toString()).append(": ").append(uses());
+      for (Entry<String, AtomicInteger> user : users.entrySet()) {
+        sb.append(" ").append(user.getKey()).append(":").append(user.getValue().intValue());
+      }
+      return sb.toString();
+    }
+    return uses() + ": " + ref.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
index ca7818a..e6c07d9 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/BaseCommand.java
@@ -1145,7 +1145,7 @@ public abstract class BaseCommand implements Command {
         VersionTagHolder versionHolder = new VersionTagHolder();
         ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
         // From Get70.getValueAndIsObject()
-        Object data = region.get(entryKey, null, true, true, true, id, versionHolder, true);
+        Object data = region.get(entryKey, null, true, true, true, id, versionHolder, true, false);
         VersionTag vt = versionHolder.getVersionTag();
 
         updateValues(values, entryKey, data, vt);
@@ -1252,7 +1252,7 @@ public abstract class BaseCommand implements Command {
         }
 
         ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
-        data = region.get(key, null, true, true, true, id, versionHolder, true);
+        data = region.get(key, null, true, true, true, id, versionHolder, true, false);
         versionTag = versionHolder.getVersionTag();
         updateValues(values, key, data, versionTag);
 
@@ -1345,7 +1345,7 @@ public abstract class BaseCommand implements Command {
       key = it.next();
       versionHolder = new VersionTagHolder();
 
-      Object value = region.get(key, null, true, true, true, requestingClient, versionHolder, true);
+      Object value = region.get(key, null, true, true, true, requestingClient, versionHolder, true, false);
       
       updateValues(values, key, value, versionHolder.getVersionTag());
 
@@ -1548,7 +1548,7 @@ public abstract class BaseCommand implements Command {
           ClientProxyMembershipID id = servConn == null ? null : servConn
               .getProxyID();
           data = region.get(key, null, true, true, true, id, versionHolder,
-              true);
+              true, false);
           versionTag = versionHolder.getVersionTag();
           updateValues(values, key, data, versionTag);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
index 55047c7..7898b3c 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Get70.java
@@ -24,6 +24,7 @@ import com.gemstone.gemfire.cache.client.internal.GetOp;
 import com.gemstone.gemfire.cache.operations.GetOperationContext;
 import com.gemstone.gemfire.cache.operations.internal.GetOperationContextImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionStats;
+import com.gemstone.gemfire.internal.Assert;
 import com.gemstone.gemfire.internal.cache.CachedDeserializable;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
@@ -304,7 +305,7 @@ public class Get70 extends BaseCommand {
 //    } else {
       ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
       VersionTagHolder versionHolder = new VersionTagHolder();
-      data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, versionHolder, true);
+      data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, versionHolder, true, true /*allowReadFromHDFS*/);
 //    }
     versionTag = versionHolder.getVersionTag();
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
index 69d54a1..2a617a8 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tier/sockets/command/Request.java
@@ -242,7 +242,7 @@ public class Request extends BaseCommand {
 
     boolean isObject = true;
     ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
-    Object data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, null, false);
+    Object data  = ((LocalRegion) region).get(key, callbackArg, true, true, true, id, null, false, true/*allowReadFromHDFS*/);
     
     // If the value in the VM is a CachedDeserializable,
     // get its value. If it is Token.REMOVED, Token.DESTROYED,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
index 90522b2..e896649 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/ClientTXRegionStub.java
@@ -67,8 +67,8 @@ public class ClientTXRegionStub implements TXRegionStub {
 
   
   public Object findObject(KeyInfo keyInfo, boolean isCreate,
-                           boolean generateCallbacks, Object value, boolean preferCD,
-                           ClientProxyMembershipID requestingClient, EntryEventImpl event) {
+      boolean generateCallbacks, Object value, boolean preferCD,
+      ClientProxyMembershipID requestingClient, EntryEventImpl event, boolean allowReadFromHDFS) {
     return proxy.get(keyInfo.getKey(), keyInfo.getCallbackArg(), event);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
index 1637c4a..7c7df53 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/DistributedTXRegionStub.java
@@ -17,10 +17,12 @@
 package com.gemstone.gemfire.internal.cache.tx;
 
 import java.util.Collections;
+import java.util.Map;
 
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
+import com.gemstone.gemfire.cache.RemoteTransactionException;
 import com.gemstone.gemfire.cache.TransactionDataNodeHasDepartedException;
 import com.gemstone.gemfire.cache.TransactionDataNotColocatedException;
 import com.gemstone.gemfire.cache.TransactionException;
@@ -30,6 +32,7 @@ import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedM
 import com.gemstone.gemfire.internal.cache.DistributedPutAllOperation;
 import com.gemstone.gemfire.internal.cache.DistributedRemoveAllOperation;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.KeyInfo;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionException;
@@ -51,6 +54,7 @@ import com.gemstone.gemfire.internal.cache.partitioned.RemoteSizeMessage;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.internal.cache.tier.sockets.VersionedObjectList;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.util.concurrent.StoppableReentrantReadWriteLock;
 
 public class DistributedTXRegionStub extends AbstractPeerTXRegionStub {
   
@@ -155,13 +159,9 @@ public class DistributedTXRegionStub extends AbstractPeerTXRegionStub {
   }
 
   
-  public Object findObject(KeyInfo keyInfo,
-                           boolean isCreate,
-                           boolean generateCallbacks,
-                           Object value,
-                           boolean preferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent) {
+  public Object findObject(KeyInfo keyInfo, boolean isCreate,
+      boolean generateCallbacks, Object value, boolean preferCD,
+      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean allowReadFromHDFS) {
     Object retVal = null;
     final Object key = keyInfo.getKey();
     final Object callbackArgument = keyInfo.getCallbackArg();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
index 01b1ed8..6723646 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/PartitionedTXRegionStub.java
@@ -275,15 +275,15 @@ public class PartitionedTXRegionStub extends AbstractPeerTXRegionStub {
 
   
   public Object findObject(KeyInfo keyInfo, boolean isCreate,
-                           boolean generateCallbacks, Object value, boolean peferCD,
-                           ClientProxyMembershipID requestingClient,
-                           EntryEventImpl clientEvent) {
+      boolean generateCallbacks, Object value, boolean peferCD,
+      ClientProxyMembershipID requestingClient,
+      EntryEventImpl clientEvent, boolean allowReadFromHDFS) {
     Object retVal = null;
     final Object key = keyInfo.getKey();
     final Object callbackArgument = keyInfo.getCallbackArg();
     PartitionedRegion pr = (PartitionedRegion)region;
     try {
-      retVal = pr.getRemotely((InternalDistributedMember)state.getTarget(), keyInfo.getBucketId(), key, callbackArgument, peferCD, requestingClient, clientEvent, false);
+      retVal = pr.getRemotely((InternalDistributedMember)state.getTarget(), keyInfo.getBucketId(), key, callbackArgument, peferCD, requestingClient, clientEvent, false, allowReadFromHDFS);
     } catch (TransactionException e) {
       RuntimeException re = getTransactionException(keyInfo, e);
       re.initCause(e.getCause());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
index f2859f1..482882f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/tx/TXRegionStub.java
@@ -42,8 +42,8 @@ public interface TXRegionStub {
   boolean containsValueForKey(KeyInfo keyInfo);
 
   Object findObject(KeyInfo keyInfo, boolean isCreate,
-                    boolean generateCallbacks, Object value, boolean preferCD,
-                    ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent);
+      boolean generateCallbacks, Object value, boolean preferCD,
+      ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean allowReadFromHDFS);
 
   Object getEntryForIterator(KeyInfo keyInfo, boolean allowTombstone);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
index fe09d03..94524bd 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/AbstractGatewaySender.java
@@ -157,6 +157,8 @@ public abstract class AbstractGatewaySender implements GatewaySender,
   
   protected boolean isBucketSorted;
   
+  protected boolean isHDFSQueue;
+  
   protected boolean isMetaQueue;
   
   private int parallelismForReplicatedRegion;
@@ -258,6 +260,7 @@ public abstract class AbstractGatewaySender implements GatewaySender,
     this.maxMemoryPerDispatcherQueue = this.queueMemory / this.dispatcherThreads;
     this.myDSId = InternalDistributedSystem.getAnyInstance().getDistributionManager().getDistributedSystemId();
     this.serialNumber = DistributionAdvisor.createSerialNumber();
+    this.isHDFSQueue = attrs.isHDFSQueue();
     this.isMetaQueue = attrs.isMetaQueue();
     if (!(this.cache instanceof CacheCreation)) {
       this.stopper = new Stopper(cache.getCancelCriterion());
@@ -266,7 +269,8 @@ public abstract class AbstractGatewaySender implements GatewaySender,
         this.statistics = new GatewaySenderStats(cache.getDistributedSystem(),
             id);
       }
-      initializeEventIdIndex();
+      if (!attrs.isHDFSQueue())
+        initializeEventIdIndex();
     }
     this.isBucketSorted = attrs.isBucketSorted();
   }
@@ -314,10 +318,12 @@ public abstract class AbstractGatewaySender implements GatewaySender,
             cache.getDistributedSystem(), AsyncEventQueueImpl
                 .getAsyncEventQueueIdFromSenderId(id));
       }
-      initializeEventIdIndex();
+      if (!attrs.isHDFSQueue())
+        initializeEventIdIndex();
     }
     this.isBucketSorted = attrs.isBucketSorted();
-
+    this.isHDFSQueue = attrs.isHDFSQueue();
+   
   }
   
   public GatewaySenderAdvisor getSenderAdvisor() {
@@ -476,6 +482,10 @@ public abstract class AbstractGatewaySender implements GatewaySender,
     return this.isBucketSorted;
   }
 
+  public boolean getIsHDFSQueue() {
+    return this.isHDFSQueue;
+  }
+  
   public boolean getIsMetaQueue() {
     return this.isMetaQueue;
   }
@@ -853,6 +863,12 @@ public abstract class AbstractGatewaySender implements GatewaySender,
       return;
     }
     
+    if (getIsHDFSQueue() && event.getOperation().isEviction()) {
+      if (logger.isDebugEnabled())
+        logger.debug("Eviction event not queued: " + event);
+      stats.incEventsNotQueued();
+      return;
+    }
     // this filter is defined by Asif which exist in old wan too. new wan has
     // other GatewaEventFilter. Do we need to get rid of this filter. Cheetah is
     // not cinsidering this filter

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
index 1cef940..025616d 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderAttributes.java
@@ -30,6 +30,7 @@ import com.gemstone.gemfire.cache.wan.GatewayTransportFilter;
 public class GatewaySenderAttributes {
 
   public static final boolean DEFAULT_IS_BUCKETSORTED = true;
+  public static final boolean DEFAULT_IS_HDFSQUEUE = false;
   public static final boolean DEFAULT_IS_META_QUEUE = false;
 
 
@@ -81,6 +82,7 @@ public class GatewaySenderAttributes {
   
   public boolean isBucketSorted = GatewaySenderAttributes.DEFAULT_IS_BUCKETSORTED;
   
+  public boolean isHDFSQueue = GatewaySenderAttributes.DEFAULT_IS_HDFSQUEUE;
   public boolean isMetaQueue = GatewaySenderAttributes.DEFAULT_IS_META_QUEUE;
   
   public int getSocketBufferSize() {
@@ -189,6 +191,9 @@ public class GatewaySenderAttributes {
   public GatewayEventSubstitutionFilter getGatewayEventSubstitutionFilter() {
     return this.eventSubstitutionFilter;
   }
+  public boolean isHDFSQueue() {
+    return this.isHDFSQueue;
+  }
   public boolean isMetaQueue() {
     return this.isMetaQueue;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
index 07a3be5..b63c7cb 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderEventProcessor.java
@@ -36,6 +36,9 @@ import com.gemstone.gemfire.InternalGemFireException;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue;
 import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EnumListenerEvent;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
index f995ba4..8524ccf 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java
@@ -22,6 +22,8 @@ package com.gemstone.gemfire.internal.cache.wan.parallel;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.CacheListener;
 import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
@@ -186,6 +188,11 @@ public class ConcurrentParallelGatewaySenderQueue implements RegionQueue {
    getPGSProcessor( bucketId).notifyEventProcessorIfRequired(bucketId);
   }
   
+  public HDFSBucketRegionQueue getBucketRegionQueue(PartitionedRegion region,
+    int bucketId) throws ForceReattemptException {
+	return getPGSProcessor(bucketId).getBucketRegionQueue(region, bucketId);
+  }
+  
   public void clear(PartitionedRegion pr, int bucketId) {
   	getPGSProcessor(bucketId).clear(pr, bucketId);
   }
@@ -200,6 +207,11 @@ public class ConcurrentParallelGatewaySenderQueue implements RegionQueue {
   	getPGSProcessor(bucketId).conflateEvent(conflatableObject, bucketId, tailKey);
   }
   
+  public HDFSGatewayEventImpl get(PartitionedRegion region, byte[] regionKey,
+      int bucketId) throws ForceReattemptException {
+    return getPGSProcessor(bucketId).get(region, regionKey, bucketId);
+  }
+  
   public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) {
 	for(int i =0; i< processors.length; i++){
   	 processors[i].addShadowPartitionedRegionForUserRR(userRegion);;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
index 11502af..417ba13 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderEventProcessor.java
@@ -28,6 +28,9 @@ import org.apache.logging.log4j.Logger;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSParallelGatewaySenderQueue;
 import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
@@ -101,7 +104,10 @@ public class ParallelGatewaySenderEventProcessor extends
     }
     
     ParallelGatewaySenderQueue queue;
-    queue = new ParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher);
+    if (sender.getIsHDFSQueue())
+      queue = new HDFSParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher);
+    else
+      queue = new ParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher);
     
     queue.start();
     this.queue = queue;
@@ -139,8 +145,12 @@ public class ParallelGatewaySenderEventProcessor extends
 
       // while merging 42004, kept substituteValue as it is(it is barry's
       // change 42466). bucketID is merged with eventID.getBucketID
+	 if (!sender.getIsHDFSQueue())
       gatewayQueueEvent = new GatewaySenderEventImpl(operation, event,
           substituteValue, true, eventID.getBucketID());
+    else
+      gatewayQueueEvent = new HDFSGatewayEventImpl(operation,
+          event, substituteValue, true, eventID.getBucketID());
 
       if (getSender().beforeEnqueue(gatewayQueueEvent)) {
         long start = getSender().getStatistics().startTime();
@@ -198,6 +208,16 @@ public class ParallelGatewaySenderEventProcessor extends
   	((ParallelGatewaySenderQueue)this.queue).conflateEvent(conflatableObject, bucketId, tailKey);
   }
   
+  public HDFSGatewayEventImpl get(PartitionedRegion region, byte[] regionKey,
+    int bucketId) throws ForceReattemptException {
+    return ((HDFSParallelGatewaySenderQueue)this.queue).get(region, regionKey, bucketId);
+  }
+  
+  public HDFSBucketRegionQueue getBucketRegionQueue(PartitionedRegion region,
+    int bucketId) throws ForceReattemptException {
+  	return ((HDFSParallelGatewaySenderQueue)this.queue).getBucketRegionQueue(region, bucketId);
+  }
+  
   public void addShadowPartitionedRegionForUserPR(PartitionedRegion pr) {
 	// TODO Auto-generated method stub
 	((ParallelGatewaySenderQueue)this.queue).addShadowPartitionedRegionForUserPR(pr);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
index 46ff263..b0b1a32 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java
@@ -492,7 +492,7 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
       if (this.userRegionNameToshadowPRMap.containsKey(regionName))
         return;
       
-      if(userPR.getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()){
+      if(!isUsedForHDFS() && userPR.getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()){
         throw new GatewaySenderException(
             LocalizedStrings.ParallelGatewaySenderQueue_NON_PERSISTENT_GATEWAY_SENDER_0_CAN_NOT_BE_ATTACHED_TO_PERSISTENT_REGION_1
                 .toLocalizedString(new Object[] { this.sender.getId(),
@@ -552,7 +552,7 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
         }
 
         ParallelGatewaySenderQueueMetaRegion meta = metaRegionFactory.newMetataRegion(cache,
-            prQName, ra, sender);
+            prQName, ra, sender, isUsedForHDFS());
 
         try {
           prQ = (PartitionedRegion)cache
@@ -630,6 +630,10 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
       bucketRegion.clear();
     }
   }
+  protected boolean isUsedForHDFS()
+  {
+    return false;
+  }
   protected void afterRegionAdd (PartitionedRegion userPR) {
 
   }
@@ -1853,12 +1857,18 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
     public ParallelGatewaySenderQueueMetaRegion(String regionName,
         RegionAttributes attrs, LocalRegion parentRegion,
         GemFireCacheImpl cache, AbstractGatewaySender pgSender) {
+      this( regionName, attrs, parentRegion, cache, pgSender, false);
+    }
+    public ParallelGatewaySenderQueueMetaRegion(String regionName,
+        RegionAttributes attrs, LocalRegion parentRegion,
+        GemFireCacheImpl cache, AbstractGatewaySender pgSender, boolean isUsedForHDFS) {
       super(regionName, attrs, parentRegion, cache,
           new InternalRegionArguments().setDestroyLockFlag(true)
               .setRecreateFlag(false).setSnapshotInputStream(null)
               .setImageTarget(null)
               .setIsUsedForParallelGatewaySenderQueue(true)
-              .setParallelGatewaySender((AbstractGatewaySender)pgSender));
+              .setParallelGatewaySender((AbstractGatewaySender)pgSender)
+              .setIsUsedForHDFSParallelGatewaySenderQueue(isUsedForHDFS));
       this.sender = (AbstractGatewaySender)pgSender;
       
     }
@@ -1915,9 +1925,9 @@ public class ParallelGatewaySenderQueue implements RegionQueue {
   
   static class MetaRegionFactory {
     ParallelGatewaySenderQueueMetaRegion newMetataRegion(
-        GemFireCacheImpl cache, final String prQName, final RegionAttributes ra, AbstractGatewaySender sender) {
+        GemFireCacheImpl cache, final String prQName, final RegionAttributes ra, AbstractGatewaySender sender, boolean isUsedForHDFS) {
       ParallelGatewaySenderQueueMetaRegion meta = new ParallelGatewaySenderQueueMetaRegion(
-          prQName, ra, null, cache, sender);
+          prQName, ra, null, cache, sender, isUsedForHDFS);
       return meta;
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
index 0015665..77f9596 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/AsyncEventQueueCreation.java
@@ -41,6 +41,7 @@ public class AsyncEventQueueCreation implements AsyncEventQueue {
   private int maxQueueMemory = 0;
   private boolean isParallel = false;
   private boolean isBucketSorted = false;
+  private boolean isHDFSQueue = false;
   private int dispatcherThreads = 1;
   private OrderPolicy orderPolicy = OrderPolicy.KEY;
   
@@ -61,6 +62,7 @@ public class AsyncEventQueueCreation implements AsyncEventQueue {
     this.orderPolicy = senderAttrs.policy;
     this.asyncEventListener = eventListener;
     this.isBucketSorted = senderAttrs.isBucketSorted; 
+    this.isHDFSQueue = senderAttrs.isHDFSQueue;
     this.gatewayEventSubstitutionFilter = senderAttrs.eventSubstitutionFilter;
   }
   
@@ -211,4 +213,11 @@ public class AsyncEventQueueCreation implements AsyncEventQueue {
   public void setBucketSorted(boolean isBucketSorted) {
     this.isBucketSorted = isBucketSorted;
   }
+  public boolean isHDFSQueue() {
+    return this.isHDFSQueue;
+  }
+  
+  public void setIsHDFSQueue(boolean isHDFSQueue) {
+    this.isHDFSQueue = isHDFSQueue;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
index d52d05e..019079d 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
@@ -91,6 +91,12 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.i18n.LogWriterI18n;
 import com.gemstone.gemfire.internal.Assert;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.CacheConfig;
 import com.gemstone.gemfire.internal.cache.CacheServerLauncher;
@@ -193,7 +199,8 @@ public class CacheCreation implements InternalCache {
    * This is important for unit testing 44914.
    */
   protected final Map diskStores = new LinkedHashMap();
-
+  protected final Map hdfsStores = new LinkedHashMap();
+  
   private final List<File> backups = new ArrayList<File>();
 
   private CacheConfig cacheConfig = new CacheConfig();
@@ -507,6 +514,13 @@ public class CacheCreation implements InternalCache {
       }
     }
 
+    for(Iterator iter = this.hdfsStores.entrySet().iterator(); iter.hasNext(); ) {
+      Entry entry = (Entry) iter.next();
+      HDFSStoreCreation hdfsStoreCreation = (HDFSStoreCreation) entry.getValue();
+      HDFSStoreFactory storefactory = cache.createHDFSStoreFactory(hdfsStoreCreation);
+      storefactory.create((String) entry.getKey());
+    }
+
     cache.initializePdxRegistry();
 
     
@@ -517,6 +531,19 @@ public class CacheCreation implements InternalCache {
         (RegionAttributesCreation) getRegionAttributes(id);
       creation.inheritAttributes(cache, false);
 
+      // TODO: HDFS: HDFS store/queue will be mapped against region path and not
+      // the attribute id; don't really understand what this is trying to do
+      if (creation.getHDFSStoreName() != null)
+      {
+        HDFSStoreImpl store = cache.findHDFSStore(creation.getHDFSStoreName());
+        if(store == null) {
+          HDFSIntegrationUtil.createDefaultAsyncQueueForHDFS((Cache)cache, creation.getHDFSWriteOnly(), id);
+        }
+      }
+      if (creation.getHDFSStoreName() != null && creation.getPartitionAttributes().getColocatedWith() == null) {
+        creation.addAsyncEventQueueId(HDFSStoreFactoryImpl.getEventQueueName(id));
+      }
+      
       RegionAttributes attrs;
       // Don't let the RegionAttributesCreation escape to the user
       AttributesFactory factory = new AttributesFactory(creation);
@@ -1395,6 +1422,27 @@ public class CacheCreation implements InternalCache {
   }
   
   @Override
+  public HDFSStoreFactory createHDFSStoreFactory() {
+    // TODO Auto-generated method stub
+    return new HDFSStoreFactoryImpl(this);
+  }
+  @Override
+  public HDFSStore findHDFSStore(String storeName) {
+    return (HDFSStore)this.hdfsStores.get(storeName);
+  }
+
+  @Override
+  public Collection<HDFSStoreImpl> getHDFSStores() {
+    return this.hdfsStores.values();
+  }
+
+  public void addHDFSStore(String name, HDFSStoreCreation hs) {
+    this.hdfsStores.put(name, hs);
+  }
+
+  
+
+  @Override
   public DistributedMember getMyId() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
index aa7d49a..c6b0509 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXml.java
@@ -487,6 +487,8 @@ public abstract class CacheXml implements EntityResolver2, ErrorHandler {
   protected static final String PERSISTENT_REPLICATE_DP = "persistent-replicate";
   protected static final String PARTITION_DP = "partition";
   protected static final String PERSISTENT_PARTITION_DP = "persistent-partition";
+  protected static final String HDFS_PARTITION_DP = "hdfs-partition";
+  protected static final String HDFS_PERSISTENT_PARTITION_DP = "hdfs-persistent-partition";
 
   /** The name of the <code>keep-alive-timeout</code> attribute */
   protected static final String KEEP_ALIVE_TIMEOUT = "keep-alive-timeout";
@@ -763,6 +765,35 @@ public abstract class CacheXml implements EntityResolver2, ErrorHandler {
   public static final String ASYNC_EVENT_QUEUE = "async-event-queue";
   protected static final String ASYNC_EVENT_QUEUE_IDS = "async-event-queue-ids";
   
+  protected static final String HDFS_EVENT_QUEUE = "hdfs-event-queue";
+  protected static final String HDFS_STORE_NAME = "hdfs-store-name";
+  public static final String HDFS_STORE = "hdfs-store";
+  protected static final String HDFS_HOME_DIR = "home-dir";
+  protected static final String HDFS_READ_CACHE_SIZE = "read-cache-size";
+  protected static final String HDFS_MAX_MEMORY = "max-memory";
+  protected static final String HDFS_BATCH_SIZE = "batch-size";
+  protected static final String HDFS_BATCH_INTERVAL = "batch-interval";
+  protected static final String HDFS_DISPATCHER_THREADS = "dispatcher-threads";
+  protected static final String HDFS_BUFFER_PERSISTENT = "buffer-persistent";
+  protected static final String HDFS_SYNCHRONOUS_DISK_WRITE = "synchronous-disk-write";
+  protected static final String HDFS_DISK_STORE = "disk-store";
+  protected static final String HDFS_MAX_WRITE_ONLY_FILE_SIZE = "max-write-only-file-size";
+  public static final String HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = "write-only-file-rollover-interval";
+  
+  protected static final String HDFS_NAMENODE_URL = "namenode-url";
+  protected static final String HDFS_CLIENT_CONFIG_FILE = "hdfs-client-config-file";
+  public static final String HDFS_PURGE_INTERVAL = "purge-interval";
+  public static final String HDFS_MAJOR_COMPACTION = "major-compaction";
+  public static final String HDFS_MAJOR_COMPACTION_INTERVAL = "major-compaction-interval";
+  public static final String HDFS_MAJOR_COMPACTION_THREADS = "major-compaction-threads";
+  public static final String HDFS_MINOR_COMPACTION = "minor-compaction";
+  public static final String HDFS_MINOR_COMPACTION_THREADS = "minor-compaction-threads";   
+  
+  public static final String HDFS_TIME_FOR_FILE_ROLLOVER = "file-rollover-time-secs";
+  
+  protected static final String HDFS_WRITE_ONLY = "hdfs-write-only";
+  protected static final String HDFS_QUEUE_BATCH_SIZE = "batch-size-mb";
+  
   /** The name of the <code>compressor</code> attribute */
   protected static final String COMPRESSOR = "compressor";
   /** The name of the <code>off-heap</code> attribute



[11/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
index ea3c975..b6c072c 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
@@ -1938,6 +1938,10 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
           dpString = PERSISTENT_REPLICATE_DP;
         } else if (dp == DataPolicy.PERSISTENT_PARTITION) {
           dpString = PERSISTENT_PARTITION_DP;
+        } else if (dp == DataPolicy.HDFS_PARTITION) {
+          dpString = HDFS_PARTITION_DP;
+        } else if (dp == DataPolicy.HDFS_PERSISTENT_PARTITION) {
+          dpString = HDFS_PERSISTENT_PARTITION_DP;
         } else if (dp.isPartition()) {
           if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_1) >= 0) {
             dpString = PARTITION_DP;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
index f344938..890f8aa 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
@@ -87,6 +87,7 @@ import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.PoolFactory;
 import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
 import com.gemstone.gemfire.cache.partition.PartitionListener;
 import com.gemstone.gemfire.cache.query.IndexType;
 import com.gemstone.gemfire.cache.query.internal.index.IndexCreationData;
@@ -1019,7 +1020,161 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
 
     stack.push(attrs);
   }
+  /**
+   * When a <code>hdfs-store</code> element is first encountered, we
+   * create a {@link HDFSStoreCreation}, populate it accordingly, and
+   * push it on the stack.
+   * <pre>
+   * {@code
+   * <hdfs-store name="" gemfire-home-dir="" namenode-url="" hdfs-client-config-file="">
+   * ...
+   * </hdfs-store>
+   * }
+   * 
+   */
+  private void startHDFSStore(Attributes atts) {
+    // this is the only place to create DSAC objects
+    HDFSStoreCreation attrs = new HDFSStoreCreation();
+    String name = atts.getValue(NAME);
+    if (name == null) {
+      throw new InternalGemFireException(
+          LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
+    } else {
+      attrs.setName(name);
+    }
 
+    String namenode = atts.getValue(HDFS_NAMENODE_URL);
+    if (namenode == null) {
+      throw new InternalGemFireException(
+          LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
+    } else {
+      attrs.setNameNodeURL(namenode);
+    }
+
+    String clientConfig = atts.getValue(HDFS_CLIENT_CONFIG_FILE);
+    if (clientConfig != null) {
+      attrs.setHDFSClientConfigFile(clientConfig);
+    }
+    
+    String folderPath = atts.getValue(HDFS_HOME_DIR);
+    if (folderPath != null) {
+      attrs.setHomeDir(folderPath);
+    }
+   
+    String readCacheSize = atts.getValue(HDFS_READ_CACHE_SIZE);
+    if (readCacheSize != null) {
+      try {
+        attrs.setBlockCacheSize(Float.valueOf(readCacheSize));
+      } catch (NumberFormatException e) {
+        throw new CacheXmlException(
+            LocalizedStrings.DistributedSystemConfigImpl_0_IS_NOT_A_VALID_INTEGER_1
+            .toLocalizedString(new Object[] { readCacheSize, HDFS_READ_CACHE_SIZE }),
+            e);
+      }
+    }
+    
+    Integer maxMemory = getIntValue(atts, HDFS_MAX_MEMORY);
+    if (maxMemory != null) {
+      attrs.setMaxMemory(maxMemory);
+    }
+    
+    Integer batchSize = getIntValue(atts, HDFS_BATCH_SIZE);
+    if (batchSize != null) {
+      attrs.setBatchSize(batchSize);
+    }
+    
+    Integer batchInterval = getIntValue(atts, HDFS_BATCH_INTERVAL);
+    if (batchInterval != null) {
+      attrs.setBatchInterval(batchInterval);
+    }
+    
+    Integer dispatcherThreads = getIntValue(atts, HDFS_DISPATCHER_THREADS);
+    if (dispatcherThreads != null) {
+      attrs.setDispatcherThreads(dispatcherThreads);
+    }
+    
+    Boolean bufferPersistent = getBoolean(atts, HDFS_BUFFER_PERSISTENT);
+    if (bufferPersistent != null) {
+      attrs.setBufferPersistent(bufferPersistent);
+    }
+    
+    Boolean synchronousDiskWrite = getBoolean(atts, HDFS_SYNCHRONOUS_DISK_WRITE);
+    if (synchronousDiskWrite != null) {
+      attrs.setSynchronousDiskWrite(synchronousDiskWrite);
+    }
+    
+    String diskstoreName = atts.getValue(HDFS_DISK_STORE);
+    if (diskstoreName != null) {
+      attrs.setDiskStoreName(diskstoreName);
+    }
+    
+    Integer purgeInterval = getInteger(atts, HDFS_PURGE_INTERVAL);
+    if (purgeInterval != null) {
+      attrs.setPurgeInterval(purgeInterval);
+    }
+    Boolean majorCompaction = getBoolean(atts, HDFS_MAJOR_COMPACTION);
+    if (majorCompaction != null) {
+      attrs.setMajorCompaction(Boolean.valueOf(majorCompaction));
+    }
+    
+    // configure major compaction interval
+    Integer majorCompactionInterval = getIntValue(atts, HDFS_MAJOR_COMPACTION_INTERVAL);
+    if (majorCompactionInterval != null) {
+      attrs.setMajorCompactionInterval(majorCompactionInterval);
+    }
+    
+    // configure compaction concurrency
+    Integer value = getIntValue(atts, HDFS_MAJOR_COMPACTION_THREADS);
+    if (value != null)
+      attrs.setMajorCompactionThreads(value);
+    
+    Boolean minorCompaction = getBoolean(atts, HDFS_MINOR_COMPACTION);
+    if (minorCompaction != null) {
+      attrs.setMinorCompaction(Boolean.valueOf(minorCompaction));
+    }
+    
+    // configure compaction concurrency
+    value = getIntValue(atts, HDFS_MINOR_COMPACTION_THREADS);
+    if (value != null)
+      attrs.setMinorCompactionThreads(value);
+    
+    String maxFileSize = atts.getValue(HDFS_MAX_WRITE_ONLY_FILE_SIZE);
+    if (maxFileSize != null) {
+      attrs.setWriteOnlyFileRolloverSize(parseInt(maxFileSize));
+    }
+    
+    String fileRolloverInterval = atts.getValue(HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL);
+    if (fileRolloverInterval != null) {
+      attrs.setWriteOnlyFileRolloverInterval(parseInt(fileRolloverInterval));
+    }
+    stack.push(name);
+    stack.push(attrs);
+  }
+  
+  /**
+   * After popping the current <code>HDFSStoreCreation</code> off the
+   * stack, we add it to the <code>HDFSStoreCreation</code> that should be on the
+   * top of the stack.
+   */
+  private void endHDFSStore() {
+    HDFSStoreCreation hsc = (HDFSStoreCreation) stack.pop();
+    String name = (String) stack.pop();
+    CacheCreation cache;
+    Object top = stack.peek();
+    if (top instanceof CacheCreation) {
+      cache = (CacheCreation) top;
+    }
+    else {
+      String s = "Did not expect a " + top.getClass().getName()
+          + " on top of the stack.";
+      Assert.assertTrue(false, s);
+      cache = null; // Dead code
+    }
+    if (name != null) {
+      cache.addHDFSStore(name, hsc);
+    }
+  }
+	
   private Integer getIntValue(Attributes atts, String param) {
     String maxInputFileSizeMB = atts.getValue(param);
     if (maxInputFileSizeMB != null) {
@@ -1114,6 +1269,12 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       else if (dp.equals(PERSISTENT_PARTITION_DP)) {
         attrs.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
       }
+      else if (dp.equals(HDFS_PARTITION_DP)) {
+        attrs.setDataPolicy(DataPolicy.HDFS_PARTITION);
+      }
+      else if (dp.equals(HDFS_PERSISTENT_PARTITION_DP)) {
+        attrs.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION);
+      }
       else {
         throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_DATA_POLICY_0.toLocalizedString(dp));
       }
@@ -1234,7 +1395,16 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     if(offHeapStr != null) {
       attrs.setOffHeap(Boolean.valueOf(offHeapStr).booleanValue());
     }
+    String hdfsStoreName = atts.getValue(HDFS_STORE_NAME);
+    if (hdfsStoreName != null) {
+      attrs.setHDFSStoreName(hdfsStoreName);
+    }
+    String hdfsWriteOnly= atts.getValue(HDFS_WRITE_ONLY);
+    if (hdfsWriteOnly != null) {
+      attrs.setHDFSWriteOnly(Boolean.valueOf(hdfsWriteOnly).booleanValue());
+    }
 
+    
     stack.push(attrs);
   }
   
@@ -2836,6 +3006,9 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
     } else if(qName.equals(PDX_SERIALIZER)) {
       //do nothing
     }
+	else if (qName.equals(HDFS_STORE)) {
+        startHDFSStore(atts);
+    }
     else if (qName.equals(COMPRESSOR)) {
     }
     else {
@@ -3244,6 +3417,9 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       else if (qName.equals(PDX_SERIALIZER)) {
         endPdxSerializer();
       }
+      else if (qName.equals(HDFS_STORE)) {
+          endHDFSStore();
+      }
       else if (qName.equals(COMPRESSOR)) {
         endCompressor();
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
index 4dfe6ae..d0f5676 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/RegionAttributesCreation.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheListener;
 import com.gemstone.gemfire.cache.CacheLoader;
 import com.gemstone.gemfire.cache.CacheWriter;
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.CustomExpiry;
 import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskStoreFactory;
@@ -122,6 +123,8 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
   * @since prPersistPrint2 
   * */
   private String diskStoreName;
+  private String hdfsStoreName;
+  private boolean hdfsWriteOnly = false;
   private boolean isDiskSynchronous = AttributesFactory.DEFAULT_DISK_SYNCHRONOUS;
   
   private boolean cloningEnabled = false;
@@ -268,7 +271,8 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
     this.poolName = attrs.getPoolName();
     this.multicastEnabled = attrs.getMulticastEnabled();
     this.cloningEnabled = attrs.getCloningEnabled();
-
+	this.hdfsStoreName = attrs.getHDFSStoreName();
+    
     this.compressor = attrs.getCompressor();
     this.offHeap = attrs.getOffHeap();
     if (attrs instanceof UserSpecifiedRegionAttributes) {
@@ -496,6 +500,10 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
     if(this.cloningEnabled != other.getCloningEnabled()){
       throw new RuntimeException(LocalizedStrings.RegionAttributesCreation__CLONING_ENABLE_IS_NOT_THE_SAME_THIS_0_OTHER_1.toLocalizedString(new Object[] {Boolean.valueOf(this.cloningEnabled), Boolean.valueOf(other.getCloningEnabled())}));
     }
+ 	if (! equal(this.hdfsStoreName, other.getHDFSStoreName())) {
+      //TODO:HDFS write a new exception string
+      throw new RuntimeException(" HDFS Store name does not match");
+    }
     if(! equal(this.compressor, other.getCompressor())) {
       throw new RuntimeException("Compressors are not the same.");
     }
@@ -1440,7 +1448,25 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
         setDiskSynchronous(parent.isDiskSynchronous());
       }
     }
-
+    if (!hasHDFSStoreName()) {
+      if (parentIsUserSpecified) {
+        if (parentWithHas.hasHDFSStoreName()) {
+          setHDFSStoreName(parent.getHDFSStoreName());
+        }
+      } else {
+        setHDFSStoreName(parent.getHDFSStoreName());
+      }
+    }
+    if (!hasHDFSWriteOnly()) {
+      if (parentIsUserSpecified) {
+        if (parentWithHas.hasHDFSWriteOnly()) {
+          setHDFSWriteOnly(parent.getHDFSWriteOnly());
+        }
+      } else {
+        setHDFSWriteOnly(parent.getHDFSWriteOnly());
+      }
+    }
+    
     if(!hasCompressor()) {
       if (parentIsUserSpecified) {
         if (parentWithHas.hasCompressor()) {
@@ -1528,6 +1554,15 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
     return this.evictionAttributes;
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CustomEvictionAttributes getCustomEvictionAttributes() {
+    // TODO: HDFS: no support for configuring this from XML yet
+    return null;
+  }
+
   public void setPoolName(String poolName) {
     if ("".equals(poolName)) {
       poolName = null;
@@ -1620,4 +1655,20 @@ public class RegionAttributesCreation extends UserSpecifiedRegionAttributes impl
   public Set<String> getGatewaySenderIds() {
     return this.gatewaySenderIds;
   }
+  public String getHDFSStoreName() {
+    return this.hdfsStoreName;
+  }
+  public void setHDFSStoreName(String hdfsStoreName) {
+    //TODO:HDFS : throw an exception if a disk store is already configured
+    // and vice versa
+    this.hdfsStoreName = hdfsStoreName;
+    setHasHDFSStoreName(true);
+  }
+  public void setHDFSWriteOnly(boolean writeOnly) {
+    this.hdfsWriteOnly= writeOnly;
+    setHasHDFSWriteOnly(true);
+  }
+  public boolean getHDFSWriteOnly() {
+    return hdfsWriteOnly;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
index ff960ca..2a939b4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/i18n/LocalizedStrings.java
@@ -1997,6 +1997,32 @@ public class LocalizedStrings extends ParentLocalizedStrings {
   public static final StringId SnappyCompressor_UNABLE_TO_LOAD_NATIVE_SNAPPY_LIBRARY = new StringId(5502, "Unable to load native Snappy library.");
   public static final StringId SnappyCompressor_UNABLE_TO_LOAD_NATIVE_SNAPPY_LIBRARY_MISSING_LIBRARY = new StringId(5503, "Unable to load native Snappy library from: {0}");
   
+  /** HOPLOG STRINGS, 5505 - 5600 **/
+  public static final StringId HOPLOG_REGION_CLOSE_FAILED = new StringId(5505, "IO error while trying to close region and release hdfs connection: {0}");
+  public static final StringId HOPLOG_HDFS_CLIENT_CONFIG_FILE_ABSENT = new StringId(5506, "HDFS client config file does not exist: {0}");
+  public static final StringId HOPLOG_IO_ERROR = new StringId(5507, "IO Exception while executing HDFS operation: {0}");
+  public static final StringId HOPLOG_UNABLE_TO_DELETE_FILE = new StringId(5508, "Unable to delete file: {0}");
+  public static final StringId HOPLOG_UNABLE_TO_DELETE_HDFS_DATA = new StringId(5509, "Unable to delete HDFS data while destroying region");
+  public static final StringId HOPLOG_CLOSE_FAILED = new StringId(5510, "IO error while trying to close hoplog.");
+  public static final StringId HOPLOG_FLUSH_FOR_BATCH_FAILED = new StringId(5511, "A batch of data could not be persisted on HDFS. It will be retried.");
+  public static final StringId HOPLOG_HDFS_STORE_NOT_FOUND = new StringId(5512, "HDFS store ''{0}'' does not exist.");
+  public static final StringId HOPLOG_TRYING_TO_CREATE_STANDALONE_SYSTEM = new StringId(5513, "The namenode url {0} is not valid. Please use the format hdfs://HOST:PORT");
+  public static final StringId HOPLOG_DOES_NOT_USE_HDFSSTORE = new StringId(5514, "{0} does not use HDFSSTORE");
+  public static final StringId HOPLOG_CONFIGURED_AS_WRITEONLY = new StringId(5515, "{0} is defined as WRITEONLY");
+  public static final StringId HOPLOG_MISSING_IN_BUCKET_FORCED_CLOSED = new StringId(5516, "A hoplog file, {0}, was not found in bucket lists. Closing it now, it may impact active reads.");
+  public static final StringId HOPLOG_MIN_IS_MORE_THAN_MAX = new StringId(5517, "Value of {0} is {1}. It should not be more than {2} value {3}");
+  public static final StringId HOPLOG_NOT_STARTED_YET = new StringId(5518, "HDFS store is not started yet. Gemfire is running without HDFS.");
+  public static final StringId HOPLOG_0_COLOCATE_WITH_REGION_1_NOT_INITIALIZED_YET = new StringId(5519, "Current region: {0} colocated with region {1} is yet initialized.");
+  public static final StringId HOPLOG_SUSPEND_OF_0_FAILED_IN_1 = new StringId(5520, "Failed to suspend active {0} in {1}");
+  public static final StringId HOPLOG_CLEANED_UP_BY_JANITOR = new StringId(5521, "Hoplog is cleaned up by janitor task.");
+  public static final StringId HOPLOG_HDFS_UNREACHABLE = new StringId(5522, "HDFS at {0} is unreachable.");
+  public static final StringId HOPLOG_MAJOR_COMPACTION_SCHEDULED_FOR_BETTER_ESTIMATE = new StringId(5523, "A major compaction has been automatically scheduled for better accuracy of count_estimate() function");
+  public static final StringId HOPLOG_FAILED_TO_READ_HDFS_FILE = new StringId(5524, "Exception while reading file on HDFS: {0}");
+  public static final StringId HOPLOG_HDFS_COMPACTION_ERROR = new StringId(5525, "Error while compacting files of bucket {0}");
+  public static final StringId HOPLOG_HDFS_COMPACTION_OVERLOADED = new StringId(5526, "Too many pending tasks for {0}. Skipping compaction request for {1}");
+  public static final StringId HOPLOG_FLUSH_OPERATION_FAILED = new StringId(5527, "IO error while trying to flush buffer and create hoplog.");
+  public static final StringId HOPLOG_HOPLOG_REMOVE_FAILED = new StringId(5528, "IO error while trying to remove hoplog.");
+  /** HOPLOG STRINGS, 5505 - 5600 **/
 
   public static final StringId PartitionAttributesImpl_CANNOT_DETERMINE_LOCAL_MAX_MEMORY_FOR_PARTITION_ATTRIBUTE_SINCE_NO_CACHE_IS_AVAILABLE_FROM_WHICH_TO_FETCH_THE_OFF_HEAP_MEMORY_ALLOCATOR = new StringId(5600, "Cannot determine local max memory for partition attribute since no cache is available from which to fetch the off-heap memory allocator");
 
@@ -2070,6 +2096,10 @@ public class LocalizedStrings extends ParentLocalizedStrings {
   public static final StringId ParallelAsyncEventQueue_0_CAN_NOT_BE_USED_WITH_REPLICATED_REGION_1 = new StringId(5716,"Parallel Async Event Queue {0} can not be used with replicated region {1}");
   public static final StringId ParallelGatewaySender_0_CAN_NOT_BE_USED_WITH_REPLICATED_REGION_1 = new StringId(5717,"Parallel gateway sender {0} can not be used with replicated region {1}");
 
+  public static final StringId HDFSSTORE_IS_USED_IN_NONHDFS_REGION = new StringId(5808, "Only regions with HDFS_PARTITION or HDFS_PERSISTENT_PARTITION data policies can specify a HDFS Store");
+  public static final StringId EVICTORSERVICE_CAUGHT_EXCEPTION_0 = new StringId(5809, "Evictor Service caught following exception : {0}");
+  public static final StringId HDFSSTORE_IS_USED_IN_REPLICATED_TABLE = new StringId(5810, "HDFS Store cannot be used for REPLICATED TABLE");
+  public static final StringId HDFS_USER_IS_SAME_AS_GF_USER = new StringId(5811, "Gemfire user is the same as HDFS user, may cause security risks: {0}");
   public static final StringId GF_KERBEROS_KEYTAB_FILE_ABSENT = new StringId(5812, "Gemfire kerberos keytab file is missing: {0}");
   public static final StringId GF_KERBEROS_NAMENODE_PRINCIPAL_UNDEF = new StringId(5813, "Namenode principal must be configured when using kerberos authentication");
   public static final StringId GF_KERBEROS_KEYTAB_UNDEF = new StringId(5814, "Gemfire kerberos keytab file is not configured");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
index 08de0c9..9ff3249 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
@@ -393,7 +393,7 @@ public class RegionProvider implements Closeable {
     r = cache.getRegion(key);
     if (r != null) return r;
     do {
-      Result result = cliCmds.createRegion(key, defaultRegionType, null, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
+      Result result = cliCmds.createRegion(key, defaultRegionType, null, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
       r = cache.getRegion(key);
       if (result.getStatus() == Status.ERROR && r == null) {
         String err = "";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
index f087c89..3003827 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedRegionMXBean.java
@@ -130,6 +130,7 @@ public interface DistributedRegionMXBean {
   /**
    * Returns the number of entries in the Region.
    * 
+   * For HDFS regions it will be count of only in memory data.
    */
   public long getSystemRegionEntryCount();
 
@@ -304,4 +305,14 @@ public interface DistributedRegionMXBean {
    * Returns the number of members whose entry count is 0.
    */
   public int getEmptyNodes();
+  
+  
+  /**
+   * An estimated entry count for HDFS Read-Write region.This may not be accurate but acts
+   * as an indicative value.
+   * 
+   * For other regions it will be -1 ( Not Available)
+   */
+  public long getEstimatedSizeForHDFSRegion();
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
index 88c4058..a6f65d4 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
@@ -130,6 +130,14 @@ public interface DistributedSystemMXBean {
    */
   public Map<String, String[]> listMemberDiskstore();
 
+  
+  /**
+   *  @return A map of all {@link DistributedMember}s and their HDFSStore's.
+   */
+  
+  public Map<String, String[]> listMemberHDFSStore();
+  
+  
   /**
    * Returns a list of IDs for all gateway senders.
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
index 4b849e0..ed27569 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/MemberMXBean.java
@@ -227,6 +227,13 @@ public interface MemberMXBean {
   public String[] listDiskStores(boolean includeRegionOwned);
 
   /**
+   * 
+   * @return  list of HDFSStore's present in the Cache
+   */
+  
+  public String[] getHDFSStores();
+
+  /**
    * Returns the GemFire specific properties for this member.
    */
   public GemFireProperties listGemFireProperties();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
index a913105..8c11d00 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/RegionMXBean.java
@@ -127,6 +127,8 @@ public interface RegionMXBean {
    * partitioned regions it will be the entry count for the primary buckets
    * hosted within this member.
    *
+   * For HDFS regions it will be count of only in memory data.
+   * 
    */
   public long getEntryCount();
 
@@ -348,4 +350,12 @@ public interface RegionMXBean {
    */
   public int getLocalMaxMemory();
   
+  /**
+   * Estimated entry count for HDFS Read-Write regions.This may not be accurate but
+   * acts as an indicative value. All HDFS Read-Write regions regions are PartitionedRegions. Hence
+   * the estimated value will be for primary buckets hosted within the member.
+   * 
+   * For other regions it will be -1 ( Not Available)
+   */
+  public long getEstimatedSizeForHDFSRegion();
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java b/geode-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
index a1e70e7..69e079d 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
@@ -48,4 +48,5 @@ public interface ConverterHint {
   public static final String LOG_LEVEL             = "converter.hint.log.levels";
 
   public static final String STRING_DISABLER       = "converter.hint.disable-string-converter";
+  public static final String HDFSSTORE_ALL         = "converter.hint.cluster.hdfsstore";
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
index 48b899b..5fbbc61 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionBridge.java
@@ -674,4 +674,9 @@ public class DistributedRegionBridge {
       return false;
     }
   }
+  
+  public long getEstimatedSizeForHDFSRegion() {
+    return monitor.getEstimatedSizeForHDFSRegion();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
index 4580e7f..549acc7 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedRegionMBean.java
@@ -321,4 +321,9 @@ public class DistributedRegionMBean implements DistributedRegionMXBean {
     return bridge.getEntrySize();
   }
 
+  @Override
+  public long getEstimatedSizeForHDFSRegion() {
+    return bridge.getEstimatedSizeForHDFSRegion();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
index 632415a..bcacc41 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemBridge.java
@@ -821,6 +821,25 @@ public class DistributedSystemBridge {
     return Collections.emptyMap();
   }
   
+  
+  /**
+   *  @return A map of all {@link DistributedMember}s and their HDFSStore's.
+   */  
+  
+  public Map<String, String[]> getMemberHDFSStoreMap() {
+    Iterator<MemberMXBean> memberIterator = mapOfMembers.values().iterator();    
+    if (memberIterator != null) {
+      Map<String, String[]> mapOfHdfs = new HashMap<String, String[]>();
+      while (memberIterator.hasNext()) {
+        MemberMXBean bean = memberIterator.next();
+        mapOfHdfs.put(bean.getMember(), bean.getHDFSStores());
+      }
+
+      return mapOfHdfs;
+    }
+    return Collections.emptyMap();
+  }
+
   /**
    *
    * @param member

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
index 3458bf5..bd92f9f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/DistributedSystemMBean.java
@@ -450,4 +450,11 @@ public class DistributedSystemMBean extends NotificationBroadcasterSupport
   public void setQueryCollectionsDepth(int queryCollectionsDepth) {
     bridge.setQueryCollectionsDepth(queryCollectionsDepth);;
   }
+
+  @Override
+  public Map<String, String[]> listMemberHDFSStore() {
+    return bridge.getMemberHDFSStoreMap();
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java
new file mode 100644
index 0000000..29bc246
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/HDFSRegionBridge.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.beans;
+
+import java.util.Map;
+import java.util.Set;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion.SizeEntry;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.management.internal.ManagementConstants;
+import com.gemstone.gemfire.management.internal.beans.stats.MBeanStatsMonitor;
+import com.gemstone.gemfire.management.internal.beans.stats.StatType;
+import com.gemstone.gemfire.management.internal.beans.stats.StatsRate;
+
+/**
+ * 
+ * 
+ * MBean Bridge for HDFS region which is a type of Partitioned Region
+ */
+public class HDFSRegionBridge<K, V> extends PartitionedRegionBridge<K, V> {
+
+  private SortedOplogStatistics soplogStats;
+
+  private MBeanStatsMonitor hdfsRegionMonitor;
+
+  private static final String WRITTEN_BYTES = "writeBytes";
+
+  private static final String READ_BYTES = "readBytes";
+
+  private static final String SCANNED_BYTES = "scanBytes";
+
+  public static final String HDFS_REGION_MONITOR = "HDFSRegionMonitor";
+
+  private StatsRate diskWritesRate;
+
+  private StatsRate diskReadsRate;
+  
+  private PartitionedRegion parRegion;
+
+  public HDFSRegionBridge(Region<K, V> region) {
+    super(region);
+
+    HDFSRegionDirector director = HDFSRegionDirector.getInstance();
+
+    String regionFullPath = region.getFullPath();
+    this.soplogStats = director.getHdfsRegionStats(regionFullPath);
+    this.hdfsRegionMonitor = new MBeanStatsMonitor(HDFS_REGION_MONITOR + "_" + regionFullPath);
+    hdfsRegionMonitor.addStatisticsToMonitor(soplogStats.getStats());
+    this.parRegion = (PartitionedRegion)region;
+    configureHDFSRegionMetrics();
+  }
+
+  private void configureHDFSRegionMetrics() {
+
+    diskWritesRate = new StatsRate(WRITTEN_BYTES, StatType.INT_TYPE, hdfsRegionMonitor);
+
+    String[] readsRates = new String[] { READ_BYTES, SCANNED_BYTES };
+
+    diskReadsRate = new StatsRate(readsRates, StatType.INT_TYPE, hdfsRegionMonitor);
+  }
+
+  
+  private long estimatedEntryCount = 0;
+  
+
+  /**
+   * Initialized skipCount to 10 as for the first time we want to compute size
+   * of HDFS region.
+   */
+  private int skipCount = 10;
+
+  /**
+   * 
+   * An estimated entry count for HDFS region.This may not be accurate but acts
+   * as an indicative value.
+   * 
+   * 
+   * Even for estimating size we need to iterate over all BucketRegions and call
+   * BucketRegion.size(). This is expensive as compared to reading directly from
+   * a statistics value. Hence we are skipping 10 samples.
+   * 
+   */
+  public long getEstimatedSizeForHDFSRegion() {
+    if(parRegion.isHDFSReadWriteRegion()){
+      if(skipCount % 10 == 0) {
+        computeEntryCount();
+        skipCount = 1;
+      } else {
+        skipCount++;
+      }
+      return estimatedEntryCount;
+    }else{
+      return ManagementConstants.NOT_AVAILABLE_LONG;
+    }
+    
+  }
+  
+  private void computeEntryCount() {
+
+    if (parRegion.isDataStore()) { //if not a DataStore do nothing and keep the entryCount as 0;
+      int numLocalEntries = 0;
+      Map<Integer, SizeEntry> localPrimaryBucketRegions = parRegion.getDataStore()
+          .getSizeEstimateForLocalPrimaryBuckets();
+      if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) {
+        for (Map.Entry<Integer, SizeEntry> me : localPrimaryBucketRegions.entrySet()) {
+          numLocalEntries += me.getValue().getSize();
+
+        }
+      }
+      this.estimatedEntryCount = numLocalEntries;
+    }
+  }
+  
+  @Override
+  public long getEntryCount() {
+    if (parRegion.isDataStore()) {
+      int numLocalEntries = 0;
+      Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
+      if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) {
+        for (BucketRegion br : localPrimaryBucketRegions) {
+          // TODO soplog, fix this for griddb regions
+          numLocalEntries += br.getRegionMap().sizeInVM() - br.getTombstoneCount();
+
+        }
+      }
+      return numLocalEntries;
+    } else {
+      return  ManagementConstants.ZERO;
+    }
+  }
+
+
+  @Override
+  public long getEntrySize() {
+    return ManagementConstants.NOT_AVAILABLE_LONG;
+  }
+
+  @Override
+  public long getDiskUsage() {
+    if (soplogStats != null) {
+      return soplogStats.getStoreUsageBytes();
+    }
+    return ManagementConstants.NOT_AVAILABLE_LONG;
+  }
+
+  @Override
+  public float getDiskReadsRate() {
+    return diskReadsRate.getRate();
+  }
+
+  @Override
+  public float getDiskWritesRate() {
+    return diskWritesRate.getRate();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
index b82b94d..21d7140 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBean.java
@@ -455,6 +455,11 @@ public class MemberMBean extends NotificationBroadcasterSupport implements
   }
 
   @Override
+  public String[] getHDFSStores() {
+    return bridge.getHDFSStores();
+  }
+  
+  @Override
   public long getGetsAvgLatency() {
     return bridge.getGetsAvgLatency();
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
index 638ba06..1425572 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MemberMBeanBridge.java
@@ -49,6 +49,7 @@ import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
 import com.gemstone.gemfire.cache.persistence.PersistentID;
 import com.gemstone.gemfire.cache.wan.GatewayReceiver;
 import com.gemstone.gemfire.cache.wan.GatewaySender;
@@ -1009,6 +1010,32 @@ public class MemberMBeanBridge {
     return listDiskStores(true);
   }
 
+  
+
+  
+  /**
+   * @return list all the HDFSStore's name at cache level
+   */
+  
+  public String[] getHDFSStores() {
+    GemFireCacheImpl cacheImpl = (GemFireCacheImpl) cache;
+    String[] retStr = null;
+    Collection<HDFSStoreImpl> hdfsStoreCollection = null;
+    hdfsStoreCollection = cacheImpl.getHDFSStores();
+      
+    if (hdfsStoreCollection != null && hdfsStoreCollection.size() > 0) {
+      retStr = new String[hdfsStoreCollection.size()];
+      Iterator<HDFSStoreImpl> it = hdfsStoreCollection.iterator();
+      int i = 0;
+      while (it.hasNext()) {
+        retStr[i] = it.next().getName();
+        i++;
+
+      }
+    }
+    return retStr;
+  }
+      
   /**
    * 
    * @return log of the member.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
index 7450746..3a8440a 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/PartitionedRegionBridge.java
@@ -76,7 +76,14 @@ public class PartitionedRegionBridge<K, V>  extends RegionMBeanBridge<K, V> {
   
   
   public static <K, V> PartitionedRegionBridge<K, V> getInstance(Region<K, V> region) {
-    return new PartitionedRegionBridge<K, V> (region);
+
+    if (region.getAttributes().getDataPolicy().withHDFS()) {
+      PartitionedRegionBridge<K, V> bridge = new HDFSRegionBridge<K, V>(region);
+      return bridge;
+    } else {
+      return new PartitionedRegionBridge<K, V> (region);
+    }
+
   }
   
   
@@ -302,4 +309,8 @@ public class PartitionedRegionBridge<K, V>  extends RegionMBeanBridge<K, V> {
   public int getLocalMaxMemory() {
     return partitionAttributesData.getLocalMaxMemory();
   }
+
+  public long getEstimatedSizeForHDFSRegion() {
+    return -1;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
index 86fe73e..1c7dcf7 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBean.java
@@ -314,4 +314,9 @@ public class RegionMBean<K, V> extends NotificationBroadcasterSupport implements
     return bridge.getLocalMaxMemory(); 
   }
 
+  @Override
+  public long getEstimatedSizeForHDFSRegion() {
+    return bridge.getEstimatedSizeForHDFSRegion();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
index 66f61e2..cd3cb90 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/RegionMBeanBridge.java
@@ -590,4 +590,9 @@ public class RegionMBeanBridge<K, V> {
   public int getLocalMaxMemory() {
     return -1;
   }
+
+  
+  public long getEstimatedSizeForHDFSRegion() {
+    return -1;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
index 7a4d9b4..c855171 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/beans/stats/RegionClusterStatsMonitor.java
@@ -111,6 +111,8 @@ public class RegionClusterStatsMonitor {
 
   private static final String PERSISTENT_ENABLED = "PersistentEnabled";
   
+  private static final String ESTIMATED_SIZE_FOR_HDFS_REGION = "EstimatedSizeForHDFSRegion";
+
   private volatile long lastAccessedTime = 0;
 
   private volatile long lastModifiedTime = 0;
@@ -190,6 +192,7 @@ public class RegionClusterStatsMonitor {
     typeMap.put(AVERAGE_READS, Float.TYPE);
     typeMap.put(AVERAGE_WRITES, Float.TYPE);
     typeMap.put(ENTRY_SIZE, Long.TYPE);
+    typeMap.put(ESTIMATED_SIZE_FOR_HDFS_REGION, Long.TYPE);
 
   }
 
@@ -333,6 +336,10 @@ public class RegionClusterStatsMonitor {
   public long getTotalEntriesOnlyOnDisk() {
     return aggregator.getLongValue(TOTAL_ENTRIES_ONLY_ON_DISK);
   }
+  
+  public long getEstimatedSizeForHDFSRegion() {
+    return aggregator.getLongValue(ESTIMATED_SIZE_FOR_HDFS_REGION);
+  }
 
   public int getAvgBucketSize() {
     int bucketNum = getBucketCount();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
index ad006b7..cb893bd 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
@@ -210,6 +210,14 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
                   help = CliStrings.CREATE_REGION__GATEWAYSENDERID__HELP)
       @CliMetaData (valueSeparator = ",") 
       String[] gatewaySenderIds,
+      @CliOption (key = CliStrings.CREATE_REGION__HDFSSTORE_NAME,
+                  help = CliStrings.CREATE_REGION__HDFSSTORE_NAME__HELP ,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE)
+      String hdfsStoreName,
+      @CliOption (key = CliStrings.CREATE_REGION__HDFSSTORE_WRITEONLY,      
+                  help = CliStrings.CREATE_REGION__HDFSSTORE_WRITEONLY__HELP,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE)
+      Boolean hdfsWriteOnly,      
       @CliOption (key = CliStrings.CREATE_REGION__KEYCONSTRAINT,
                   help = CliStrings.CREATE_REGION__KEYCONSTRAINT__HELP)
       String keyConstraint,
@@ -319,7 +327,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
             prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
             prRedundantCopies, prStartupRecoveryDelay,
             prTotalMaxMemory, prTotalNumBuckets,
-            offHeap, mcastEnabled, regionAttributes);
+            offHeap, mcastEnabled, hdfsStoreName , hdfsWriteOnly,  regionAttributes);
         
 
         if (regionAttributes.getPartitionAttributes() == null && regionFunctionArgs.hasPartitionAttributes()) {
@@ -339,7 +347,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
           concurrencyChecksEnabled, cloningEnabled, concurrencyLevel, 
           prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
           prRedundantCopies, prStartupRecoveryDelay,
-          prTotalMaxMemory, prTotalNumBuckets, null,compressor, offHeap , mcastEnabled);
+          prTotalMaxMemory, prTotalNumBuckets, null,compressor, offHeap , mcastEnabled, hdfsStoreName , hdfsWriteOnly);
         
         if (!regionShortcut.name().startsWith("PARTITION") && regionFunctionArgs.hasPartitionAttributes()) {
           throw new IllegalArgumentException(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
new file mode 100644
index 0000000..6e573f1
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
@@ -0,0 +1,695 @@
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Set;
+
+import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
+import org.springframework.shell.core.annotation.CliCommand;
+import org.springframework.shell.core.annotation.CliOption;
+
+import com.gemstone.gemfire.SystemFailure;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.execute.Execution;
+import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
+import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
+import com.gemstone.gemfire.internal.lang.ClassUtils;
+import com.gemstone.gemfire.management.cli.CliMetaData;
+import com.gemstone.gemfire.management.cli.ConverterHint;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction.AlterHDFSStoreAttributes;
+import com.gemstone.gemfire.management.internal.cli.functions.CliFunctionResult;
+import com.gemstone.gemfire.management.internal.cli.functions.CreateHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction;
+import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResultException;
+import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
+import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
+import com.gemstone.gemfire.management.internal.cli.result.ResultDataException;
+import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
+import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
+import com.gemstone.gemfire.management.internal.cli.util.MemberNotFoundException;
+import com.gemstone.gemfire.management.internal.configuration.SharedConfigurationWriter;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+
+/**
+ * The HdfsStoreCommands class encapsulates all GemFire Hdfs Store commands in Gfsh.
+ *  </p>
+ *  
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.management.internal.cli.commands.AbstractCommandsSupport
+ */
+
+
+public class HDFSStoreCommands   extends AbstractCommandsSupport {  
+  @CliCommand (value = CliStrings.CREATE_HDFS_STORE, help = CliStrings.CREATE_HDFS_STORE__HELP)
+  @CliMetaData (relatedTopic = CliStrings.TOPIC_GEMFIRE_HDFSSTORE, writesToSharedConfiguration = true)
+  public Result createHdfsStore(      
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__NAME,                  
+                  mandatory = true,
+                  optionContext = ConverterHint.HDFSSTORE_ALL, 
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__NAME__HELP)
+      String hdfsUniqueName,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__NAMENODE,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__NAMENODE__HELP)
+      String namenode, 
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__HOMEDIR,
+                  optionContext = ConverterHint.DIR_PATHSTRING,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__HOMEDIR__HELP)
+      String homeDir,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__BATCHSIZE,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__BATCHSIZE__HELP)
+      Integer batchSize,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL__HELP)
+      Integer batchInterval,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__READCACHESIZE,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__READCACHESIZE__HELP)
+      Float readCacheSize,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS__HELP)
+      Integer dispatcherThreads,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAXMEMORY,
+                  mandatory = false,
+                  unspecifiedDefaultValue =CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__MAXMEMORY__HELP)
+      Integer maxMemory,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT__HELP)
+      Boolean bufferPersistent,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE__HELP)
+      Boolean syncDiskWrite,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME__HELP)
+      String diskStoreName,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT__HELP)
+      Boolean minorCompact,            
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP)
+      Integer minorCompactionThreads,
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT__HELP)
+      Boolean majorCompact,   
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP)
+      Integer majorCompactionInterval, 
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP)
+      Integer majorCompactionThreads,  
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL__HELP)
+      Integer purgeInterval,  
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE__HELP)
+      Integer maxWriteonlyFileSize,  
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL__HELP)
+      Integer fileRolloverInterval,  
+      @CliOption (key = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE,
+                  optionContext = ConverterHint.FILE_PATHSTRING,
+                  mandatory = false,
+                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+                  help = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE__HELP)      
+      String clientConfigFile,
+      @CliOption(key=CliStrings.CREATE_HDFS_STORE__GROUP,
+                 help=CliStrings.CREATE_HDFS_STORE__GROUP__HELP,
+                 optionContext=ConverterHint.MEMBERGROUP)
+      @CliMetaData (valueSeparator = ",")
+       String[] groups ) {
+    try {
+      
+      return getCreatedHdfsStore(groups, hdfsUniqueName, namenode, homeDir, clientConfigFile, fileRolloverInterval,
+          maxWriteonlyFileSize, minorCompact, majorCompact, batchSize, batchInterval, diskStoreName, bufferPersistent,
+          dispatcherThreads, syncDiskWrite, readCacheSize, majorCompactionInterval, majorCompactionThreads,
+          minorCompactionThreads, purgeInterval, maxMemory);
+      
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+
+    } catch (Throwable th) {
+      String formattedErrString = CliStrings.format(CliStrings.CREATE_HDFS_STORE__ERROR_WHILE_CREATING_REASON_0,
+          new Object[] { th.getMessage() });
+      SystemFailure.checkFailure();
+      return ResultBuilder.createGemFireErrorResult(formattedErrString);
+    }
+  }
+
+  public Result getCreatedHdfsStore(String[] groups, String hdfsUniqueName, String namenode, String homeDir,
+      String clientConfigFile, Integer fileRolloverInterval, Integer maxWriteonlyFileSize, Boolean minorCompact,
+      Boolean majorCompact, Integer batchSize, Integer batchInterval, String diskStoreName, Boolean bufferPersistent,
+      Integer dispatcherThreads, Boolean syncDiskWrite, Float readCacheSize, Integer majorCompactionInterval,
+      Integer majorCompactionThreads, Integer minorCompactionThreads, Integer purgeInterval, Integer maxMemory) {
+
+    XmlEntity xmlEntity = null;
+
+    Set<DistributedMember> targetMembers = null;
+
+    try {
+      targetMembers = getGroupMembers(groups);
+    } catch (CommandResultException cre) {
+      return cre.getResult();
+    }
+
+    HDFSStoreConfigHolder configHolder = new HDFSStoreConfigHolder();
+    configHolder.setName(hdfsUniqueName);
+    if (readCacheSize != null)
+      configHolder.setBlockCacheSize(readCacheSize);
+
+    if (fileRolloverInterval != null)
+      configHolder.setWriteOnlyFileRolloverInterval(fileRolloverInterval);
+    if (clientConfigFile != null)
+      configHolder.setHDFSClientConfigFile(clientConfigFile);
+    if (homeDir != null)
+      configHolder.setHomeDir(homeDir);
+    if (maxWriteonlyFileSize != null)
+      configHolder.setWriteOnlyFileRolloverSize(maxWriteonlyFileSize);
+    if (namenode != null)
+      configHolder.setNameNodeURL(namenode);
+
+    if (minorCompact != null)
+      configHolder.setMinorCompaction(minorCompact);
+    if (majorCompact != null)
+      configHolder.setMajorCompaction(majorCompact);
+    if (majorCompactionInterval != null)
+      configHolder.setMajorCompactionInterval(majorCompactionInterval);
+    if (majorCompactionThreads != null)
+      configHolder.setMajorCompactionThreads(majorCompactionThreads);
+    if (minorCompactionThreads != null)
+      configHolder.setMinorCompactionThreads(minorCompactionThreads);
+    if (purgeInterval != null)
+      configHolder.setPurgeInterval(purgeInterval);
+
+    if (batchSize != null)
+      configHolder.setBatchSize(batchSize);
+    if (batchInterval != null)
+      configHolder.setBatchInterval(batchInterval);
+    if (diskStoreName != null)
+      configHolder.setDiskStoreName(diskStoreName);
+    if (syncDiskWrite != null)
+      configHolder.setSynchronousDiskWrite(syncDiskWrite);
+    if (dispatcherThreads != null)
+      configHolder.setDispatcherThreads(dispatcherThreads);
+    if (maxMemory != null)
+      configHolder.setMaxMemory(maxMemory);
+    if (bufferPersistent != null)
+      configHolder.setBufferPersistent(bufferPersistent);
+
+    ResultCollector<?, ?> resultCollector = getMembersFunctionExecutor(targetMembers)
+    .withArgs(configHolder).execute(new CreateHDFSStoreFunction());
+    
+    List<CliFunctionResult> hdfsStoreCreateResults = CliFunctionResult.cleanResults((List<?>)resultCollector
+        .getResult());
+
+    TabularResultData tabularResultData = ResultBuilder.createTabularResultData();
+
+    Boolean accumulatedData = false;
+
+    for (CliFunctionResult hdfsStoreCreateResult : hdfsStoreCreateResults) {
+      if (hdfsStoreCreateResult.getThrowable() != null) {
+        String memberId = hdfsStoreCreateResult.getMemberIdOrName();
+        String errorMsg = hdfsStoreCreateResult.getThrowable().getMessage();
+        String errClass = hdfsStoreCreateResult.getThrowable().getClass().getName();
+        tabularResultData.accumulate("Member", memberId);
+        tabularResultData.accumulate("Result", "ERROR: " + errClass + ": " + errorMsg);
+        accumulatedData = true;
+        tabularResultData.setStatus(Status.ERROR);
+      }
+      else if (hdfsStoreCreateResult.isSuccessful()) {
+        String memberId = hdfsStoreCreateResult.getMemberIdOrName();
+        String successMsg = hdfsStoreCreateResult.getMessage();
+        tabularResultData.accumulate("Member", memberId);
+        tabularResultData.accumulate("Result", successMsg);
+        if (xmlEntity == null) {
+          xmlEntity = hdfsStoreCreateResult.getXmlEntity();
+        }
+        accumulatedData = true;
+      }
+    }
+
+    if (!accumulatedData) {
+      return ResultBuilder.createInfoResult("Unable to create hdfs store:" + hdfsUniqueName);
+    }
+
+    Result result = ResultBuilder.buildResult(tabularResultData);
+    if (xmlEntity != null) {
+      result.setCommandPersisted((new SharedConfigurationWriter()).addXmlEntity(xmlEntity, groups));
+    }
+
+    return ResultBuilder.buildResult(tabularResultData);
+  }
+  
+  
+  @CliCommand(value = CliStrings.DESCRIBE_HDFS_STORE, help = CliStrings.DESCRIBE_HDFS_STORE__HELP)
+  @CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEMFIRE_HDFSSTORE})
+  public Result describeHdfsStore(
+      @CliOption(key = CliStrings.DESCRIBE_HDFS_STORE__MEMBER, 
+                 mandatory = true, optionContext = ConverterHint.MEMBERIDNAME, 
+                 help = CliStrings.DESCRIBE_HDFS_STORE__MEMBER__HELP)
+      final String memberName,
+      @CliOption(key = CliStrings.DESCRIBE_HDFS_STORE__NAME, 
+                 mandatory = true, 
+                 optionContext = ConverterHint.HDFSSTORE_ALL, 
+                 help = CliStrings.DESCRIBE_HDFS_STORE__NAME__HELP)
+      final String hdfsStoreName) {
+    try{
+      return toCompositeResult(getHDFSStoreDescription(memberName , hdfsStoreName));
+      
+      }catch (HDFSStoreNotFoundException e){
+         return ResultBuilder.createShellClientErrorResult(((HDFSStoreNotFoundException)e).getMessage());
+      } 
+      catch (FunctionInvocationTargetException ignore) {
+      return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
+          CliStrings.DESCRIBE_HDFS_STORE));
+      
+    } catch (MemberNotFoundException e) {
+      return ResultBuilder.createShellClientErrorResult(e.getMessage());
+      
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+      
+    } catch (Throwable t) {
+      SystemFailure.checkFailure();
+      return ResultBuilder.createGemFireErrorResult(String.format(CliStrings.DESCRIBE_HDFS_STORE__ERROR_MESSAGE,
+          memberName, hdfsStoreName, t));
+    }
+  }        
+  
+  public HDFSStoreConfigHolder getHDFSStoreDescription(String memberName, String hdfsStoreName) {
+
+    final DistributedMember member = getMember(getCache(), memberName);
+    
+    ResultCollector<?, ?> resultCollector = getMembersFunctionExecutor(Collections.singleton(member))
+    .withArgs(hdfsStoreName).execute(new DescribeHDFSStoreFunction());
+    
+    Object result = ((List<?>)resultCollector.getResult()).get(0);
+
+    if (result instanceof HDFSStoreConfigHolder) {
+      return (HDFSStoreConfigHolder)result;
+    }
+    if (result instanceof HDFSStoreNotFoundException) {
+      throw (HDFSStoreNotFoundException)result;
+    }
+    else {
+      final Throwable cause = (result instanceof Throwable ? (Throwable)result : null);
+      throw new RuntimeException(CliStrings.format(CliStrings.UNEXPECTED_RETURN_TYPE_EXECUTING_COMMAND_ERROR_MESSAGE,
+          ClassUtils.getClassName(result), CliStrings.DESCRIBE_HDFS_STORE), cause);
+
+    }
+  }
+  
+  public Result toCompositeResult(final HDFSStoreConfigHolder storePrms) {
+    final CompositeResultData hdfsStoreCompositeResult = ResultBuilder.createCompositeResultData();
+    final CompositeResultData.SectionResultData hdfsStoreSection = hdfsStoreCompositeResult.addSection();
+
+    hdfsStoreSection.addData("Hdfs Store Name", storePrms.getName());
+    hdfsStoreSection.addData("Name Node URL", storePrms.getNameNodeURL());
+    hdfsStoreSection.addData("Home Dir", storePrms.getHomeDir());
+    hdfsStoreSection.addData("Block Cache", storePrms.getBlockCacheSize());
+    hdfsStoreSection.addData("File RollOver Interval", storePrms.getWriteOnlyFileRolloverInterval());
+    hdfsStoreSection.addData("Max WriteOnly File Size", storePrms.getWriteOnlyFileRolloverSize());
+
+    hdfsStoreSection.addData("Client Configuration File", storePrms.getHDFSClientConfigFile());
+
+    hdfsStoreSection.addData("Disk Store Name", storePrms.getDiskStoreName());
+    hdfsStoreSection.addData("Batch Size In MB", storePrms.getBatchSize());
+    hdfsStoreSection.addData("Batch Interval Time", storePrms.getBatchInterval());
+    hdfsStoreSection.addData("Maximum Memory", storePrms.getMaxMemory());
+    hdfsStoreSection.addData("Dispatcher Threads", storePrms.getDispatcherThreads());
+    hdfsStoreSection.addData("Buffer Persistence", storePrms.getBufferPersistent());
+    hdfsStoreSection.addData("Synchronous Persistence", storePrms.getSynchronousDiskWrite());
+
+    hdfsStoreSection.addData("Major Compaction Enabled", storePrms.getMajorCompaction());
+    hdfsStoreSection.addData("Major Compaction Threads", storePrms.getMajorCompactionThreads());
+    hdfsStoreSection.addData("Major compaction Interval", storePrms.getMajorCompactionInterval());
+    hdfsStoreSection.addData("Minor Compaction Enabled", storePrms.getMinorCompaction());
+    hdfsStoreSection.addData("Minor Compaction Threads", storePrms.getMinorCompactionThreads());
+    hdfsStoreSection.addData("Purge Interval", storePrms.getPurgeInterval());
+
+    return ResultBuilder.buildResult(hdfsStoreCompositeResult);
+  } 
+  
+  @CliCommand(value = CliStrings.LIST_HDFS_STORE, help = CliStrings.LIST_HDFS_STORE__HELP)
+  @CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEMFIRE_HDFSSTORE })
+  public Result listHdfsStore() {  
+    try {
+      Set<DistributedMember> dataMembers = getNormalMembers(getCache());
+      if (dataMembers.isEmpty()) {
+        return ResultBuilder.createInfoResult(CliStrings.NO_CACHING_MEMBERS_FOUND_MESSAGE);
+      }
+      return toTabularResult(getHdfsStoreListing(dataMembers));
+
+    } catch (FunctionInvocationTargetException ignore) {
+      return ResultBuilder.createGemFireErrorResult(
+          CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
+          CliStrings.LIST_HDFS_STORE));
+
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+
+    } catch (Throwable t) {
+      SystemFailure.checkFailure();
+      return ResultBuilder.createGemFireErrorResult(
+          String.format(CliStrings.LIST_HDFS_STORE__ERROR_MESSAGE, t.getMessage()));
+    }
+  }
+  
+  protected List<HdfsStoreDetails> getHdfsStoreListing(Set<DistributedMember> members) {
+
+    final Execution membersFunctionExecutor = getMembersFunctionExecutor(members);
+
+    if (membersFunctionExecutor instanceof AbstractExecution) {
+      ((AbstractExecution)membersFunctionExecutor).setIgnoreDepartedMembers(true);
+    }
+
+    final ResultCollector<?, ?> resultCollector = membersFunctionExecutor.execute(new ListHDFSStoresFunction());
+    final List<?> results = (List<?>)resultCollector.getResult();
+    final List<HdfsStoreDetails> hdfsStoreList = new ArrayList<HdfsStoreDetails>(results.size());
+
+    for (final Object result : results) {
+      if (result instanceof Set) { // ignore FunctionInvocationTargetExceptions and other Exceptions...
+        hdfsStoreList.addAll((Set<HdfsStoreDetails>)result);
+      }
+    }
+
+    Collections.sort(hdfsStoreList, new Comparator<HdfsStoreDetails>() {
+      public <T extends Comparable<T>> int compare(final T obj1, final T obj2) {
+        return (obj1 == null && obj2 == null ? 0 : (obj1 == null ? 1 : (obj2 == null ? -1 : obj1.compareTo(obj2))));
+      }
+
+      @Override
+      public int compare(HdfsStoreDetails store1, HdfsStoreDetails store2) {
+        int comparisonValue = compare(store1.getMemberName(), store2.getMemberName());
+        comparisonValue = (comparisonValue != 0 ? comparisonValue : compare(store1.getMemberId(), store2.getMemberId()));
+        return (comparisonValue != 0 ? comparisonValue : store1.getStoreName().compareTo(store2.getStoreName()));
+      }
+    });
+
+    return hdfsStoreList;
+  }
+  
+
+  protected Result toTabularResult(final List<HdfsStoreDetails> hdfsStoreList) throws ResultDataException {
+    if (!hdfsStoreList.isEmpty()) {
+      final TabularResultData hdfsStoreData = ResultBuilder.createTabularResultData();
+      for (final HdfsStoreDetails hdfsStoreDetails : hdfsStoreList) {
+        hdfsStoreData.accumulate("Member Name", hdfsStoreDetails.getMemberName());
+        hdfsStoreData.accumulate("Member Id", hdfsStoreDetails.getMemberId());
+        hdfsStoreData.accumulate("Hdfs Store Name", hdfsStoreDetails.getStoreName());
+      }
+      return ResultBuilder.buildResult(hdfsStoreData);
+    }
+    else {
+      return ResultBuilder.createInfoResult(CliStrings.LIST_HDFS_STORE__HDFS_STORES_NOT_FOUND_MESSAGE);
+    }
+  }
+  
+
+  @CliCommand(value=CliStrings.DESTROY_HDFS_STORE, help=CliStrings.DESTROY_HDFS_STORE__HELP)
+  @CliMetaData(shellOnly=false, relatedTopic={CliStrings.TOPIC_GEMFIRE_HDFSSTORE}, writesToSharedConfiguration=true)
+  public Result destroyHdfstore(
+      @CliOption  (key=CliStrings.DESTROY_HDFS_STORE__NAME, 
+                   optionContext=ConverterHint.HDFSSTORE_ALL,
+                   mandatory=true,
+                   help=CliStrings.DESTROY_HDFS_STORE__NAME__HELP)
+        String hdfsStoreName,
+      @CliOption(key=CliStrings.DESTROY_HDFS_STORE__GROUP,
+                 help=CliStrings.DESTROY_HDFS_STORE__GROUP__HELP,
+                 optionContext=ConverterHint.MEMBERGROUP)
+      @CliMetaData (valueSeparator = ",")
+        String[] groups) {
+    try{      
+       return destroyStore(hdfsStoreName,groups);
+ 
+    } catch (FunctionInvocationTargetException ignore) {
+      return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
+          CliStrings.DESTROY_HDFS_STORE));
+      
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+      
+    } catch (Throwable th) {
+      SystemFailure.checkFailure();
+      return ResultBuilder.createGemFireErrorResult(CliStrings.format(
+          CliStrings.DESTROY_HDFS_STORE__ERROR_WHILE_DESTROYING_REASON_0, new Object[] { th.getMessage() }));
+    }
+ }
+  
+  protected Result destroyStore(String hdfsStoreName , String[] groups){
+      TabularResultData tabularData = ResultBuilder.createTabularResultData();
+      boolean accumulatedData = false;
+
+      Set<DistributedMember> targetMembers = null;
+      try {
+        targetMembers = getGroupMembers(groups);
+      } catch (CommandResultException cre) {
+        return cre.getResult();
+      }
+      
+      ResultCollector<?, ?> rc = getMembersFunctionExecutor(targetMembers)
+      .withArgs(hdfsStoreName).execute(new DestroyHDFSStoreFunction());
+      
+      List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>)rc.getResult());
+
+      XmlEntity xmlEntity = null;
+      for (CliFunctionResult result : results) {
+        
+        if (result.getThrowable() != null) {
+          tabularData.accumulate("Member", result.getMemberIdOrName());
+          tabularData.accumulate("Result", "ERROR: " + result.getThrowable().getClass().getName() + ": "
+              + result.getThrowable().getMessage());
+          accumulatedData = true;
+          tabularData.setStatus(Status.ERROR);
+        }
+        else if (result.getMessage() != null) {
+          tabularData.accumulate("Member", result.getMemberIdOrName());
+          tabularData.accumulate("Result", result.getMessage());
+          accumulatedData = true;
+          
+          if (xmlEntity == null) {
+            xmlEntity = result.getXmlEntity();
+          }
+        }
+      }
+      
+      if (!accumulatedData) {
+        return ResultBuilder.createInfoResult("No matching hdfs stores found.");
+      }
+      
+      Result result = ResultBuilder.buildResult(tabularData);
+      if (xmlEntity != null) {
+        result.setCommandPersisted((new SharedConfigurationWriter()).deleteXmlEntity(xmlEntity, groups));
+      }
+      
+      return result;
+  }
+  @CliCommand(value=CliStrings.ALTER_HDFS_STORE, help=CliStrings.ALTER_HDFS_STORE__HELP)
+  @CliMetaData(shellOnly=false, relatedTopic={CliStrings.TOPIC_GEMFIRE_HDFSSTORE}, writesToSharedConfiguration=true)
+  public Result alterHdfstore(
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__NAME,                  
+          mandatory = true,
+          optionContext = ConverterHint.HDFSSTORE_ALL, 
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__NAME__HELP)
+      String hdfsUniqueName,     
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__BATCHSIZE,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__BATCHSIZE__HELP)
+      Integer batchSize,
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL__HELP)
+      Integer batchInterval,      
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT__HELP)
+      Boolean minorCompact,                                                                                                         
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP)
+      Integer minorCompactionThreads,
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT__HELP)
+      Boolean majorCompact,   
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP)
+      Integer majorCompactionInterval, 
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP)
+      Integer majorCompactionThreads,  
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL__HELP)
+      Integer purgeInterval,        
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL__HELP)
+      Integer fileRolloverInterval,
+      @CliOption (key = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE,
+          mandatory = false,
+          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
+          help = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE__HELP)
+      Integer maxWriteonlyFileSize,  
+      @CliOption(key=CliStrings.ALTER_HDFS_STORE__GROUP,
+         help=CliStrings.ALTER_HDFS_STORE__GROUP__HELP,
+         optionContext=ConverterHint.MEMBERGROUP)
+      @CliMetaData (valueSeparator = ",")
+      String[] groups){
+    try {                         
+      
+      return getAlteredHDFSStore(groups, hdfsUniqueName, batchSize, batchInterval, minorCompact,
+          minorCompactionThreads, majorCompact, majorCompactionInterval, majorCompactionThreads, purgeInterval,
+          fileRolloverInterval, maxWriteonlyFileSize);
+      
+    } catch (FunctionInvocationTargetException ignore) {
+      return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
+          CliStrings.ALTER_HDFS_STORE));
+      
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+      
+    } catch (Throwable th) {
+      SystemFailure.checkFailure();
+      return ResultBuilder.createGemFireErrorResult(CliStrings.format(
+          CliStrings.ALTER_HDFS_STORE__ERROR_WHILE_ALTERING_REASON_0, new Object[] { th.getMessage() }));
+    }
+ }
+  
+  
+  protected Result getAlteredHDFSStore(String[] groups, String hdfsUniqueName, Integer batchSize,
+      Integer batchInterval, Boolean minorCompact, Integer minorCompactionThreads, Boolean majorCompact,
+      Integer majorCompactionInterval, Integer majorCompactionThreads, Integer purgeInterval,
+      Integer fileRolloverInterval, Integer maxWriteonlyFileSize) {
+    
+    Set<DistributedMember> targetMembers = null;
+    try {
+      targetMembers = getGroupMembers(groups);
+    } catch (CommandResultException cre) {
+      return cre.getResult();
+    }
+    
+    TabularResultData tabularData = ResultBuilder.createTabularResultData();
+    
+	AlterHDFSStoreAttributes alterAttributes = new AlterHDFSStoreAttributes(
+				hdfsUniqueName, batchSize, batchInterval, minorCompact,
+				majorCompact, minorCompactionThreads, majorCompactionInterval,
+				majorCompactionThreads, purgeInterval, fileRolloverInterval,
+				maxWriteonlyFileSize);
+	
+    ResultCollector<?, ?> rc = getMembersFunctionExecutor(targetMembers)
+    .withArgs(alterAttributes).execute(new AlterHDFSStoreFunction());
+    
+    List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>)rc.getResult());
+
+    XmlEntity xmlEntity = null;
+
+    for (CliFunctionResult result : results) {
+      if (result.getThrowable() != null) {
+        tabularData.accumulate("Member", result.getMemberIdOrName());
+        tabularData.accumulate("Result", "ERROR: " + result.getThrowable().getClass().getName() + ": "
+            + result.getThrowable().getMessage());
+        tabularData.setStatus(Status.ERROR);
+      }
+      else if (result.getMessage() != null) {
+        tabularData.accumulate("Member", result.getMemberIdOrName());
+        tabularData.accumulate("Result", result.getMessage());
+
+        if (xmlEntity == null) {
+          xmlEntity = result.getXmlEntity();
+        }
+      }
+    }
+    
+    Result result = ResultBuilder.buildResult(tabularData);
+    
+    if (xmlEntity != null) {
+      result.setCommandPersisted((new SharedConfigurationWriter()).deleteXmlEntity(xmlEntity, groups));
+    }
+    
+    return result;
+  }
+  @CliAvailabilityIndicator({CliStrings.CREATE_HDFS_STORE, CliStrings.LIST_HDFS_STORE,
+    CliStrings.DESCRIBE_HDFS_STORE, CliStrings.ALTER_HDFS_STORE, CliStrings.DESTROY_HDFS_STORE})
+  public boolean hdfsStoreCommandsAvailable() {
+    // these hdfs store commands are always available in GemFire
+    return (!CliUtil.isGfshVM() || (getGfsh() != null && getGfsh().isConnectedAndReady()));
+  }  
+  
+  @Override
+  protected Set<DistributedMember> getMembers(final Cache cache) {
+    return CliUtil.getAllMembers(cache);
+  }
+  
+  protected Set<DistributedMember> getNormalMembers(final Cache cache) {
+    return CliUtil.getAllNormalMembers(cache);
+  }
+  
+  protected Set<DistributedMember> getGroupMembers(String[] groups) throws CommandResultException {    
+      return  CliUtil.findAllMatchingMembers(groups, null); 
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
new file mode 100644
index 0000000..e595c77
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
@@ -0,0 +1,88 @@
+/*
+ * =========================================================================
+ *  Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ *  This product is protected by U.S. and international copyright
+ *  and intellectual property laws. Pivotal products are covered by
+ *  more patents listed at http://www.pivotal.io/patents.
+ * ========================================================================
+ */
+package com.gemstone.gemfire.management.internal.cli.converters;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import com.gemstone.gemfire.management.cli.ConverterHint;
+import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+
+import org.springframework.shell.core.Completion;
+import org.springframework.shell.core.Converter;
+import org.springframework.shell.core.MethodTarget;
+
+/**
+ * 
+ * @author Namrata Thanvi
+ * 
+ */
+
+public class HdfsStoreNameConverter implements Converter<String> {
+
+  @Override
+  public boolean supports(Class<?> type, String optionContext) {
+    return String.class.equals(type) && ConverterHint.HDFSSTORE_ALL.equals(optionContext);
+  }
+
+  @Override
+  public String convertFromText(String value, Class<?> targetType, String optionContext) {
+    return value;
+  }
+
+  @Override
+  public boolean getAllPossibleValues(List<Completion> completions, Class<?> targetType, String existingData,
+      String optionContext, MethodTarget target) {
+    if (String.class.equals(targetType) && ConverterHint.HDFSSTORE_ALL.equals(optionContext)) {
+      Set<String> hdfsStoreNames = getHdfsStoreNames();
+
+      for (String hdfsStoreName : hdfsStoreNames) {
+        if (existingData != null) {
+          if (hdfsStoreName.startsWith(existingData)) {
+            completions.add(new Completion(hdfsStoreName));
+          }
+        }
+        else {
+          completions.add(new Completion(hdfsStoreName));
+        }
+      }
+    }
+
+    return !completions.isEmpty();
+  }
+
+  private Set<String> getHdfsStoreNames() {
+    SortedSet<String> hdfsStoreNames = new TreeSet<String>();
+    Gfsh gfsh = Gfsh.getCurrentInstance();
+
+    if (gfsh != null && gfsh.isConnectedAndReady()) {
+      Map<String, String[]> hdfsStoreInfo = gfsh.getOperationInvoker().getDistributedSystemMXBean()
+          .listMemberHDFSStore();
+      if (hdfsStoreInfo != null) {
+        Set<Entry<String, String[]>> entries = hdfsStoreInfo.entrySet();
+
+        for (Entry<String, String[]> entry : entries) {
+          String[] value = entry.getValue();
+          if (value != null) {
+            hdfsStoreNames.addAll(Arrays.asList(value));
+          }
+        }
+
+      }
+    }
+
+    return hdfsStoreNames;
+  }
+
+}


[23/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java
new file mode 100644
index 0000000..1e6a034
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSParallelGatewaySenderQueue.java
@@ -0,0 +1,471 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+
+import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.EntryNotFoundException;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.SystemTimer;
+import com.gemstone.gemfire.internal.SystemTimer.SystemTimerTask;
+import com.gemstone.gemfire.internal.cache.ColocationHelper;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
+import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
+import com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue;
+
+/**
+ * Parallel Gateway Sender Queue extended for HDFS functionality 
+ *
+ */
+public class HDFSParallelGatewaySenderQueue extends ParallelGatewaySenderQueue {
+
+  private int currentBucketIndex = 0;
+  private int elementsPeekedAcrossBuckets = 0;
+  private SystemTimer rollListTimer = null;
+  public static final String ROLL_SORTED_LIST_TIME_INTERVAL_MS__PROP = "gemfire.ROLL_SORTED_LIST_TIME_INTERVAL_MS";
+  private final int ROLL_SORTED_LIST_TIME_INTERVAL_MS = Integer.getInteger(ROLL_SORTED_LIST_TIME_INTERVAL_MS__PROP, 3000);
+  
+  public HDFSParallelGatewaySenderQueue(AbstractGatewaySender sender,
+      Set<Region> userPRs, int idx, int nDispatcher) {
+     
+    super(sender, userPRs, idx, nDispatcher);
+    //only first dispatcher Hemant?
+    if (sender.getBucketSorted() && this.index == 0) {
+      rollListTimer = new SystemTimer(sender.getCache().getDistributedSystem(),
+          true);
+      // schedule the task to roll the skip lists
+      rollListTimer.scheduleAtFixedRate(new RollSortedListsTimerTask(), 
+          ROLL_SORTED_LIST_TIME_INTERVAL_MS, ROLL_SORTED_LIST_TIME_INTERVAL_MS);
+    }
+  }
+  
+  @Override
+  public Object peek() throws InterruptedException, CacheException {
+    /* If you call peek and use super.peek it leads to the following exception.
+     * So I'm adding an explicit UnsupportedOperationException.
+     Caused by: java.lang.ClassCastException: com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue cannot be cast to com.gemstone.gemfire.internal.cache.BucketRegionQueue
+        at com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue.getRandomPrimaryBucket(ParallelGatewaySenderQueue.java:964)
+        at com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQueue.peek(ParallelGatewaySenderQueue.java:1078)
+     */
+    throw new UnsupportedOperationException();
+  }
+  
+  
+  @Override
+  public void cleanUp() {
+    super.cleanUp();
+    cancelRollListTimer();
+  }
+  
+  private void cancelRollListTimer() {
+    if (rollListTimer != null) {
+      rollListTimer.cancel();
+      rollListTimer = null;
+    }
+  }
+  /**
+   * A call to this function peeks elements from the first local primary bucket. 
+   * Next call to this function peeks elements from the next local primary 
+   * bucket and so on.  
+   */
+  @Override
+  public List peek(int batchSize, int timeToWait) throws InterruptedException,
+  CacheException {
+    
+    List batch = new ArrayList();
+    
+    int batchSizeInBytes = batchSize*1024*1024;
+    PartitionedRegion prQ = getRandomShadowPR();
+    if (prQ == null || prQ.getLocalMaxMemory() == 0) {
+      try {
+        Thread.sleep(50);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+      blockProcesorThreadIfRequired();
+      return batch;
+    }
+    
+    ArrayList list = null;
+    ArrayList<Integer> pbuckets = new ArrayList<Integer>(prQ
+        .getDataStore().getAllLocalPrimaryBucketIds());
+    ArrayList<Integer> buckets = new ArrayList<Integer>();
+    for(Integer i : pbuckets) {
+    	if(i % this.nDispatcher == this.index)
+    		buckets.add(i);
+    }
+    // In case of failures, peekedEvents would possibly have some elements 
+    // add them. 
+    if (this.resetLastPeeked) {
+      int previousBucketId = -1;
+      boolean stillPrimary = true; 
+      Iterator<GatewaySenderEventImpl>  iter = peekedEvents.iterator();
+      // we need to remove the events of the bucket that are no more primary on 
+      // this node as they cannot be persisted from this node. 
+      while(iter.hasNext()) {
+        HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)iter.next();
+        if (previousBucketId != hdfsEvent.getBucketId()){
+          stillPrimary = buckets.contains(hdfsEvent.getBucketId());
+          previousBucketId = hdfsEvent.getBucketId();
+        }
+        if (stillPrimary)
+          batch.add(hdfsEvent);
+        else {
+          iter.remove();
+        }
+      }
+      this.resetLastPeeked = false;
+    }
+    
+    if (buckets.size() == 0) {
+      // Sleep a bit before trying again. provided by Dan
+      try {
+        Thread.sleep(50);
+      }
+      catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+      return batch;
+    }
+    
+    if (this.sender.getBucketSorted()) {
+      
+    }
+    
+    // Each call to this function returns index of next bucket 
+    // that is to be processed. This function takes care 
+    // of the bucket sequence that is peeked by a sequence of 
+    // peek calls. 
+    // If there are bucket movements between two consecutive 
+    // calls to this function then there is chance that a bucket 
+    // is processed twice while another one is skipped. But, that is 
+    // ok because in the next round, it will be processed. 
+    Integer bIdIndex = getCurrentBucketIndex(buckets.size());
+    
+    // If we have gone through all the buckets once and no  
+    // elements were peeked from any of the buckets, take a nap.  
+    // This always sleep in the first call but that should be ok  
+    // because the timeToWait in practical use cases would be greater 
+    // than this sleep of 100 ms.  
+    if (bIdIndex == 0 && getAndresetElementsPeekedAcrossBuckets() == 0) { 
+      try { 
+        Thread.sleep(100); 
+      } catch (InterruptedException e) { 
+        Thread.currentThread().interrupt(); 
+      } 
+    } 
+    
+    HDFSBucketRegionQueue hrq = ((HDFSBucketRegionQueue)prQ
+        .getDataStore().getLocalBucketById(buckets.get(bIdIndex)));
+    
+    if (hrq == null) {
+      // bucket moved to another node after getAllLocalPrimaryBucketIds
+      // was called. Peeking not possible. return. 
+      return batch;
+    }
+    long entriesWaitingTobePeeked = hrq.totalEntries();
+    
+    if (entriesWaitingTobePeeked == 0) {
+      blockProcesorThreadIfRequired();
+      return batch;
+    }
+    
+    long currentTimeInMillis = System.currentTimeMillis();
+    long bucketSizeInBytes = hrq.getQueueSizeInBytes();
+    if (((currentTimeInMillis - hrq.getLastPeekTimeInMillis()) >  timeToWait)  
+        || ( bucketSizeInBytes > batchSizeInBytes)
+        || hrq.shouldDrainImmediately()) {
+      // peek now
+      if (logger.isDebugEnabled()) { 
+        logger.debug("Peeking queue " + hrq.getId()   + ": bucketSizeInBytes " + bucketSizeInBytes
+            + ":  batchSizeInBytes" + batchSizeInBytes
+            + ":  timeToWait" + timeToWait
+            + ":  (currentTimeInMillis - hrq.getLastPeekTimeInMillis())" + (currentTimeInMillis - hrq.getLastPeekTimeInMillis()));
+      }
+
+      list = peekAhead(buckets.get(bIdIndex), hrq);
+      
+      if (list != null && list.size() != 0 ) {
+        for (Object object : list) {
+          batch.add(object);
+          peekedEvents.add((HDFSGatewayEventImpl)object);
+        }
+      }
+    }
+    else {
+      blockProcesorThreadIfRequired();
+    }
+    if (logger.isDebugEnabled()  &&  batch.size() > 0) {
+      logger.debug(this + ":  Peeked a batch of " + batch.size() + " entries");
+    }
+    
+    setElementsPeekedAcrossBuckets(batch.size()); 
+    
+    return batch;
+  }
+  
+  /**
+   * This function maintains an index of the last processed bucket.
+   * When it is called, it returns index of the next bucket. 
+   * @param totalBuckets
+   * @return current bucket index
+   */
+  private int getCurrentBucketIndex(int totalBuckets) {
+    int retBucket = currentBucketIndex;
+    if (retBucket >=  totalBuckets) {
+      currentBucketIndex = 0;
+      retBucket = 0;
+    }
+    
+    currentBucketIndex++;
+    
+    return retBucket;
+  }
+  
+  @Override
+  public void remove(int batchSize) throws CacheException {
+    int destroyed = 0;
+    HDFSGatewayEventImpl event = null;
+    
+    if (this.peekedEvents.size() > 0)
+      event = (HDFSGatewayEventImpl)this.peekedEvents.remove();
+    
+    while (event != null && destroyed < batchSize) {
+      Region currentRegion = event.getRegion();
+      int currentBucketId = event.getBucketId();
+      int bucketId = event.getBucketId();
+        
+      ArrayList<HDFSGatewayEventImpl> listToDestroy = new ArrayList<HDFSGatewayEventImpl>();
+      ArrayList<Object> destroyedSeqNum = new ArrayList<Object>();
+      
+      // create a batch of all the entries of a bucket 
+      while (bucketId == currentBucketId) {
+        listToDestroy.add(event);
+        destroyedSeqNum.add(event.getShadowKey());
+        destroyed++;
+
+        if (this.peekedEvents.size() == 0 || (destroyed) >= batchSize) {
+          event = null; 
+          break;
+        }
+
+        event = (HDFSGatewayEventImpl)this.peekedEvents.remove();
+
+        bucketId = event.getBucketId();
+
+        if (!this.sender.isRunning()){
+          if (logger.isDebugEnabled()) {
+            logger.debug("ParallelGatewaySenderQueue#remove: Cache is closing down. Ignoring remove request.");
+          }
+          return;
+        }
+      }
+      try {
+        HDFSBucketRegionQueue brq = getBucketRegionQueue((PartitionedRegion) currentRegion, currentBucketId);
+        
+        if (brq != null) {
+          // destroy the entries from the bucket 
+          brq.destroyKeys(listToDestroy);
+          // Adding the removed event to the map for BatchRemovalMessage
+          // We need to provide the prQ as there could be multiple
+          // queue in a PGS now.
+          PartitionedRegion prQ = brq.getPartitionedRegion();
+          addRemovedEvents(prQ, currentBucketId, destroyedSeqNum);
+        }
+        
+      } catch (ForceReattemptException e) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("ParallelGatewaySenderQueue#remove: " + "Got ForceReattemptException for " + this
+          + " for bucket = " + bucketId);
+        }
+      }
+      catch(EntryNotFoundException e) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("ParallelGatewaySenderQueue#remove: " + "Got EntryNotFoundException for " + this
+            + " for bucket = " + bucketId );
+        }
+      }
+    }
+  }
+  
+  /** 
+  * Keeps a track of number of elements peeked across all buckets.  
+  */ 
+  private void setElementsPeekedAcrossBuckets(int peekedElements) { 
+    this.elementsPeekedAcrossBuckets +=peekedElements; 
+  } 
+  
+  /** 
+  * Returns the number of elements peeked across all buckets. Also, 
+  * resets this counter. 
+  */ 
+  private int getAndresetElementsPeekedAcrossBuckets() { 
+    int peekedElements = this.elementsPeekedAcrossBuckets; 
+    this.elementsPeekedAcrossBuckets = 0; 
+    return peekedElements; 
+  } 
+
+  @Override
+  public void remove() throws CacheException {
+    throw new UnsupportedOperationException("Method HDFSParallelGatewaySenderQueue#remove is not supported");
+  }
+ 
+  @Override
+  public void put(Object object) throws InterruptedException, CacheException {
+    super.put(object);
+  }
+  
+  protected ArrayList peekAhead(int bucketId, HDFSBucketRegionQueue hrq) throws CacheException {
+    
+    if (logger.isDebugEnabled()) {
+      logger.debug(this + ": Peekahead for the bucket " + bucketId);
+    }
+    ArrayList  list = hrq.peekABatch();
+    if (logger.isDebugEnabled() && list != null ) {
+      logger.debug(this + ": Peeked" + list.size() + "objects from bucket " + bucketId);
+    }
+
+    return list;
+  }
+  
+  @Override
+  public Object take() {
+    throw new UnsupportedOperationException("take() is not supported for " + HDFSParallelGatewaySenderQueue.class.toString());
+  }
+  
+  protected boolean isUsedForHDFS()
+  {
+    return true;
+  }
+  
+  @Override
+  protected void afterRegionAdd (PartitionedRegion userPR) {
+  }
+  
+  /**
+   * gets the value for region key from the HDFSBucketRegionQueue 
+ * @param region 
+   * @throws ForceReattemptException 
+   */
+  public HDFSGatewayEventImpl get(PartitionedRegion region, byte[] regionKey, int bucketId) throws ForceReattemptException  {
+    try {
+      HDFSBucketRegionQueue brq = getBucketRegionQueue(region, bucketId);
+      
+      if (brq ==null)
+        return null;
+      
+      return brq.getObjectForRegionKey(region, regionKey);
+    } catch(EntryNotFoundException e) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("HDFSParallelGatewaySenderQueue#get: " + "Got EntryNotFoundException for " + this
+            + " for bucket = " + bucketId);
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public void clear(PartitionedRegion pr, int bucketId) {
+    HDFSBucketRegionQueue brq;
+    try {
+      brq = getBucketRegionQueue(pr, bucketId);
+      if (brq == null)
+        return;
+      brq.clear();
+    } catch (ForceReattemptException e) {
+      //do nothing, bucket was destroyed.
+    }
+  }
+  
+  @Override
+  public int size(PartitionedRegion pr, int bucketId) throws ForceReattemptException {
+   HDFSBucketRegionQueue hq = getBucketRegionQueue(pr, bucketId);
+   return hq.size();
+  }
+
+  public HDFSBucketRegionQueue getBucketRegionQueue(PartitionedRegion region,
+      int bucketId) throws ForceReattemptException {
+    PartitionedRegion leader = ColocationHelper.getLeaderRegion(region);
+    if (leader == null)
+      return null;
+    String leaderregionPath = leader.getFullPath();
+    PartitionedRegion prQ = this.userRegionNameToshadowPRMap.get(leaderregionPath);
+    if (prQ == null)
+      return null;
+    HDFSBucketRegionQueue brq;
+
+    brq = ((HDFSBucketRegionQueue)prQ.getDataStore()
+        .getLocalBucketById(bucketId));
+    if(brq == null) {
+      prQ.getRegionAdvisor().waitForLocalBucketStorage(bucketId);
+    }
+    brq = ((HDFSBucketRegionQueue)prQ.getDataStore()
+        .getInitializedBucketForId(null, bucketId));
+    return brq;
+  }
+  
+  /**
+   * This class has the responsibility of rolling the lists of Sorted event 
+   * Queue. The rolling of lists by a separate thread is required because 
+   * neither put thread nor the peek/remove thread can do that. Put thread
+   * cannot do it because that would mean doing some synchronization with 
+   * other put threads and peek thread that would hamper the put latency. 
+   * Peek thread cannot do it because if the event insert rate is too high
+   * the list size can go way beyond what its size. 
+   *
+   */
+  class RollSortedListsTimerTask extends SystemTimerTask {
+    
+    
+    /**
+     * This function ensures that if any of the buckets has lists that are beyond 
+     * its size, they gets rolled over into new skip lists. 
+     */
+    @Override
+    public void run2() {
+      Set<PartitionedRegion> prQs = getRegions();
+      for (PartitionedRegion prQ : prQs) {
+        ArrayList<Integer> buckets = new ArrayList<Integer>(prQ
+            .getDataStore().getAllLocalPrimaryBucketIds());
+        for (Integer bId : buckets) {
+          HDFSBucketRegionQueue hrq =  ((HDFSBucketRegionQueue)prQ
+              .getDataStore().getLocalBucketById(bId));
+          if (hrq == null) {
+            // bucket moved to another node after getAllLocalPrimaryBucketIds
+            // was called. continue fixing the next bucket. 
+            continue;
+          }
+          if (logger.isDebugEnabled()) {
+            logger.debug("Rolling over the list for bucket id: " + bId);
+          }
+          hrq.rolloverSkipList();
+         }
+      }
+    }
+  }
+   
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
new file mode 100644
index 0000000..16d3d87
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreConfigHolder.java
@@ -0,0 +1,559 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.Serializable;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.StoreExistsException;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+
+/**
+ * Class to hold all hdfs store related configuration. Instead of copying the
+ * same members in two different classes, factory and store, this class will be
+ * used. The idea is let HdfsStoreImpl and HdfsStoreCreation delegate get calls,
+ * set calls and copy constructor calls this class. Moreover this config holder
+ * can be entirely replaced to support alter config
+ * 
+ */
+public class HDFSStoreConfigHolder implements HDFSStore, HDFSStoreFactory ,Serializable {  
+  private String name = null;
+  private String namenodeURL = null;
+  private String homeDir = DEFAULT_HOME_DIR;
+  private String clientConfigFile = null;
+  private float blockCacheSize = DEFAULT_BLOCK_CACHE_SIZE;
+  private int maxFileSize = DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT;
+  private int fileRolloverInterval = DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL;
+  protected boolean isAutoCompact = DEFAULT_MINOR_COMPACTION;
+  protected boolean autoMajorCompact = DEFAULT_MAJOR_COMPACTION;
+  protected int maxConcurrency = DEFAULT_MINOR_COMPACTION_THREADS;
+  protected int majorCompactionConcurrency = DEFAULT_MAJOR_COMPACTION_THREADS;
+  protected int majorCompactionIntervalMins = DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS;
+  protected int maxInputFileSizeMB = DEFAULT_INPUT_FILE_SIZE_MAX_MB;
+  protected int maxInputFileCount = DEFAULT_INPUT_FILE_COUNT_MAX;
+  protected int minInputFileCount = DEFAULT_INPUT_FILE_COUNT_MIN;
+  protected int oldFileCleanupIntervalMins = DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS;
+  
+  protected int batchSize = DEFAULT_BATCH_SIZE_MB;
+  protected int batchIntervalMillis = DEFAULT_BATCH_INTERVAL_MILLIS;
+  protected int maximumQueueMemory = DEFAULT_MAX_BUFFER_MEMORY;
+  protected boolean isPersistenceEnabled = DEFAULT_BUFFER_PERSISTANCE;
+  protected String diskStoreName = null;
+  protected boolean diskSynchronous = DEFAULT_DISK_SYNCHRONOUS; 
+  protected int dispatcherThreads = DEFAULT_DISPATCHER_THREADS;
+  
+  private static final Logger logger = LogService.getLogger();
+  protected final String logPrefix;
+
+  public HDFSStoreConfigHolder() {
+    this(null);
+  }
+
+  /**
+   * @param config configuration source for creating this instance 
+   */
+  public HDFSStoreConfigHolder(HDFSStore config) {
+    this.logPrefix = "<" + getName() + "> ";
+    if (config == null) {
+      return;
+    }
+    
+    this.name = config.getName();
+    this.namenodeURL = config.getNameNodeURL();
+    this.homeDir = config.getHomeDir();
+    this.clientConfigFile = config.getHDFSClientConfigFile();
+    this.blockCacheSize = config.getBlockCacheSize();
+    this.maxFileSize = config.getWriteOnlyFileRolloverSize();
+    this.fileRolloverInterval = config.getWriteOnlyFileRolloverInterval();
+    isAutoCompact = config.getMinorCompaction();
+    maxConcurrency = config.getMinorCompactionThreads();
+    autoMajorCompact = config.getMajorCompaction();
+    majorCompactionConcurrency = config.getMajorCompactionThreads();
+    majorCompactionIntervalMins = config.getMajorCompactionInterval();
+    maxInputFileSizeMB = config.getInputFileSizeMax();
+    maxInputFileCount = config.getInputFileCountMax();
+    minInputFileCount = config.getInputFileCountMin();
+    oldFileCleanupIntervalMins = config.getPurgeInterval();
+    
+    batchSize = config.getBatchSize();
+    batchIntervalMillis = config.getBatchInterval();
+    maximumQueueMemory = config.getMaxMemory();
+    isPersistenceEnabled = config.getBufferPersistent();
+    diskStoreName = config.getDiskStoreName();
+    diskSynchronous = config.getSynchronousDiskWrite();
+    dispatcherThreads = config.getDispatcherThreads();
+  }
+  
+  public void resetDefaultValues() {
+    name = null;
+    namenodeURL = null;
+    homeDir = null;
+    clientConfigFile = null;
+    blockCacheSize = -1f;
+    maxFileSize = -1;
+    fileRolloverInterval = -1;
+    
+    isAutoCompact = false;
+    maxConcurrency = -1;
+    maxInputFileSizeMB = -1;
+    maxInputFileCount = -1;
+    minInputFileCount = -1;
+    oldFileCleanupIntervalMins = -1;
+
+    autoMajorCompact = false;
+    majorCompactionConcurrency = -1;
+    majorCompactionIntervalMins = -1;
+    
+    batchSize = -1;
+    batchIntervalMillis = -1;
+    maximumQueueMemory = -1;
+    isPersistenceEnabled = false;
+    diskStoreName = null;
+    diskSynchronous = false; 
+    dispatcherThreads = -1;
+  }
+  
+  public void copyFrom(HDFSStoreMutator mutator) {
+    if (mutator.getWriteOnlyFileRolloverInterval() >= 0) {
+      logAttrMutation("fileRolloverInterval", mutator.getWriteOnlyFileRolloverInterval());
+      setWriteOnlyFileRolloverInterval(mutator.getWriteOnlyFileRolloverInterval());
+    }
+    if (mutator.getWriteOnlyFileRolloverSize() >= 0) {
+      logAttrMutation("MaxFileSize", mutator.getWriteOnlyFileRolloverInterval());
+      setWriteOnlyFileRolloverSize(mutator.getWriteOnlyFileRolloverSize());
+    }
+    
+    if (mutator.getMinorCompaction() != null) {
+      logAttrMutation("MinorCompaction", mutator.getMinorCompaction());
+      setMinorCompaction(mutator.getMinorCompaction());
+    }
+    
+    if (mutator.getMinorCompactionThreads() >= 0) {
+      logAttrMutation("MaxThreads", mutator.getMinorCompactionThreads());
+      setMinorCompactionThreads(mutator.getMinorCompactionThreads());
+    }
+    
+    if (mutator.getMajorCompactionInterval() > -1) {
+      logAttrMutation("MajorCompactionIntervalMins", mutator.getMajorCompactionInterval());
+      setMajorCompactionInterval(mutator.getMajorCompactionInterval());
+    }
+    if (mutator.getMajorCompactionThreads() >= 0) {
+      logAttrMutation("MajorCompactionMaxThreads", mutator.getMajorCompactionThreads());
+      setMajorCompactionThreads(mutator.getMajorCompactionThreads());
+    }
+    if (mutator.getMajorCompaction() != null) {
+      logAttrMutation("AutoMajorCompaction", mutator.getMajorCompaction());
+      setMajorCompaction(mutator.getMajorCompaction());
+    }
+    if (mutator.getInputFileCountMax() >= 0) {
+      logAttrMutation("maxInputFileCount", mutator.getInputFileCountMax());
+      setInputFileCountMax(mutator.getInputFileCountMax());
+    }
+    if (mutator.getInputFileSizeMax() >= 0) {
+      logAttrMutation("MaxInputFileSizeMB", mutator.getInputFileSizeMax());
+      setInputFileSizeMax(mutator.getInputFileSizeMax());
+    }
+    if (mutator.getInputFileCountMin() >= 0) {
+      logAttrMutation("MinInputFileCount", mutator.getInputFileCountMin());
+      setInputFileCountMin(mutator.getInputFileCountMin());
+    }    
+    if (mutator.getPurgeInterval() >= 0) {
+      logAttrMutation("OldFilesCleanupIntervalMins", mutator.getPurgeInterval());
+      setPurgeInterval(mutator.getPurgeInterval());
+    }
+    
+    if (mutator.getBatchSize() >= 0) {
+      logAttrMutation("batchSizeMB", mutator.getWriteOnlyFileRolloverInterval());
+      setBatchSize(mutator.getBatchSize());
+    }
+    if (mutator.getBatchInterval() >= 0) {
+      logAttrMutation("batchTimeInterval", mutator.getWriteOnlyFileRolloverInterval());
+      setBatchInterval(mutator.getBatchInterval());
+    }
+  }
+
+  void logAttrMutation(String name, Object value) {
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Alter " + name + ":" + value, logPrefix);
+    }
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+  @Override
+  public HDFSStoreFactory setName(String name) {
+    this.name = name;
+    return this;
+  }
+
+  @Override
+  public String getNameNodeURL() {
+    return namenodeURL;
+  }
+  @Override
+  public HDFSStoreFactory setNameNodeURL(String namenodeURL) {
+    this.namenodeURL = namenodeURL;
+    return this;
+  }
+
+  @Override
+  public String getHomeDir() {
+    return homeDir;
+  }
+  @Override
+  public HDFSStoreFactory setHomeDir(String homeDir) {
+    this.homeDir = homeDir;
+    return this;
+  }
+
+  @Override
+  public String getHDFSClientConfigFile() {
+    return clientConfigFile;
+  }
+  @Override
+  public HDFSStoreFactory setHDFSClientConfigFile(String clientConfigFile) {
+    this.clientConfigFile = clientConfigFile;
+    return this;
+  }
+  
+  @Override
+  public HDFSStoreFactory setBlockCacheSize(float percentage) {
+    if(percentage < 0 || percentage > 100) {
+      throw new IllegalArgumentException("Block cache size must be between 0 and 100, inclusive");
+    }
+    this.blockCacheSize  = percentage;
+    return this;
+  }
+  
+  @Override
+  public float getBlockCacheSize() {
+    return blockCacheSize;
+  }
+  
+  @Override
+  public HDFSStoreFactory setWriteOnlyFileRolloverSize(int maxFileSize) {
+    assertIsPositive(CacheXml.HDFS_WRITE_ONLY_FILE_ROLLOVER_INTERVAL, maxFileSize);
+    this.maxFileSize = maxFileSize;
+    return this;
+  }
+  @Override
+  public int getWriteOnlyFileRolloverSize() {
+    return maxFileSize;
+  }
+
+  @Override
+  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int count) {
+    assertIsPositive(CacheXml.HDFS_TIME_FOR_FILE_ROLLOVER, count);
+    this.fileRolloverInterval = count;
+    return this;
+  }
+  @Override
+  public int getWriteOnlyFileRolloverInterval() {
+    return fileRolloverInterval;
+  }
+  
+  @Override
+  public boolean getMinorCompaction() {
+    return isAutoCompact;
+  }
+  @Override
+  public HDFSStoreFactory setMinorCompaction(boolean auto) {
+    this.isAutoCompact = auto;
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setMinorCompactionThreads(int count) {
+    assertIsPositive(CacheXml.HDFS_MINOR_COMPACTION_THREADS, count);
+    this.maxConcurrency = count;
+    return this;
+  }
+  @Override
+  public int getMinorCompactionThreads() {
+    return maxConcurrency;
+  }
+
+  @Override
+  public HDFSStoreFactory setMajorCompaction(boolean auto) {
+    this.autoMajorCompact = auto;
+    return this;
+  }
+  @Override
+  public boolean getMajorCompaction() {
+    return autoMajorCompact;
+  }
+
+  @Override
+  public HDFSStoreFactory setMajorCompactionInterval(int count) {
+    HDFSStoreCreation.assertIsPositive(CacheXml.HDFS_MAJOR_COMPACTION_INTERVAL, count);
+    this.majorCompactionIntervalMins = count;
+    return this;
+  }
+  @Override
+  public int getMajorCompactionInterval() {
+    return majorCompactionIntervalMins;
+  }
+
+  @Override
+  public HDFSStoreFactory setMajorCompactionThreads(int count) {
+    HDFSStoreCreation.assertIsPositive(CacheXml.HDFS_MAJOR_COMPACTION_THREADS, count);
+    this.majorCompactionConcurrency = count;
+    return this;
+  }
+  @Override
+  public int getMajorCompactionThreads() {
+    return majorCompactionConcurrency;
+  }
+  
+  @Override
+  public HDFSStoreFactory setInputFileSizeMax(int size) {
+    HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MAX_INPUT_FILE_SIZE_MB", size);
+    this.maxInputFileSizeMB = size;
+    return this;
+  }
+  @Override
+  public int getInputFileSizeMax() {
+    return maxInputFileSizeMB;
+  }
+
+  @Override
+  public HDFSStoreFactory setInputFileCountMin(int count) {
+    HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MIN_INPUT_FILE_COUNT", count);
+    this.minInputFileCount = count;
+    return this;
+  }
+  @Override
+  public int getInputFileCountMin() {
+    return minInputFileCount;
+  }
+
+  @Override
+  public HDFSStoreFactory setInputFileCountMax(int count) {
+    HDFSStoreCreation.assertIsPositive("HDFS_COMPACTION_MAX_INPUT_FILE_COUNT", count);
+    this.maxInputFileCount = count;
+    return this;
+  }
+  @Override
+  public int getInputFileCountMax() {
+    return maxInputFileCount;
+  }
+
+  @Override
+  public int getPurgeInterval() {
+    return oldFileCleanupIntervalMins ;
+  }    
+  @Override
+  public HDFSStoreFactory setPurgeInterval(int interval) {
+    assertIsPositive(CacheXml.HDFS_PURGE_INTERVAL, interval);
+    this.oldFileCleanupIntervalMins = interval;
+    return this;
+  }
+  
+  protected void validate() {
+    if (minInputFileCount > maxInputFileCount) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.HOPLOG_MIN_IS_MORE_THAN_MAX
+          .toLocalizedString(new Object[] {
+              "HDFS_COMPACTION_MIN_INPUT_FILE_COUNT",
+              minInputFileCount,
+              "HDFS_COMPACTION_MAX_INPUT_FILE_COUNT",
+              maxInputFileCount }));
+    }
+  }
+
+  /**
+   * This method should not be called on this class.
+   * @see HDFSStoreFactory#create(String)
+   */
+  @Override
+  public HDFSStore create(String name) throws GemFireConfigException,
+      StoreExistsException {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * This method should not be called on this class.
+   * @see HDFSStoreImpl#destroy()
+   */
+  @Override
+  public void destroy() {
+    throw new UnsupportedOperationException();
+  }
+  
+  public static void assertIsPositive(String name, int count) {
+    if (count < 1) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
+              .toLocalizedString(new Object[] { name, count }));
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("HDFSStoreConfigHolder@");
+    builder.append(System.identityHashCode(this));
+    builder.append(" [");
+    appendStrProp(builder, name, "name");
+    appendStrProp(builder, namenodeURL, "namenodeURL");
+    appendStrProp(builder, homeDir, "homeDir");
+    appendStrProp(builder, clientConfigFile, "clientConfigFile");
+    if (blockCacheSize > -1) {
+      builder.append("blockCacheSize=");
+      builder.append(blockCacheSize);
+      builder.append(", ");
+    }
+    appendIntProp(builder, maxFileSize, "maxFileSize");
+    appendIntProp(builder, fileRolloverInterval, "fileRolloverInterval");
+    appendBoolProp(builder, isAutoCompact, "isAutoCompact");
+    appendBoolProp(builder, autoMajorCompact, "autoMajorCompact");
+    appendIntProp(builder, maxConcurrency, "maxConcurrency");
+    appendIntProp(builder, majorCompactionConcurrency, "majorCompactionConcurrency");
+    appendIntProp(builder, majorCompactionIntervalMins, "majorCompactionIntervalMins");
+    appendIntProp(builder, maxInputFileSizeMB, "maxInputFileSizeMB");
+    appendIntProp(builder, maxInputFileCount, "maxInputFileCount");
+    appendIntProp(builder, minInputFileCount, "minInputFileCount");
+    appendIntProp(builder, oldFileCleanupIntervalMins, "oldFileCleanupIntervalMins");
+    appendIntProp(builder, batchSize, "batchSize");
+    appendIntProp(builder, batchIntervalMillis, "batchInterval");
+    appendIntProp(builder, maximumQueueMemory, "maximumQueueMemory");
+    appendIntProp(builder, dispatcherThreads, "dispatcherThreads");
+    appendBoolProp(builder, isPersistenceEnabled, "isPersistenceEnabled");
+    appendStrProp(builder, diskStoreName, "diskStoreName");
+    appendBoolProp(builder, diskSynchronous, "diskSynchronous");
+
+    builder.append("]");
+    return builder.toString();
+  }
+
+  private void appendStrProp(StringBuilder builder, String value, String name) {
+    if (value != null) {
+      builder.append(name + "=");
+      builder.append(value);
+      builder.append(", ");
+    }
+  }
+
+  private void appendIntProp(StringBuilder builder, int value, String name) {
+    if (value > -1) {
+      builder.append(name + "=");
+      builder.append(value);
+      builder.append(", ");
+    }
+  }
+  
+  private void appendBoolProp(StringBuilder builder, boolean value, String name) {
+    builder.append(name + "=");
+    builder.append(value);
+    builder.append(", ");
+  }
+
+  @Override
+  public HDFSStoreMutator createHdfsStoreMutator() {
+    // as part of alter execution, hdfs store will replace the config holder
+    // completely. Hence mutator at the config holder is not needed
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public HDFSStore alter(HDFSStoreMutator mutator) {
+    // as part of alter execution, hdfs store will replace the config holder
+    // completely. Hence mutator at the config holder is not needed
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String getDiskStoreName() {
+    return this.diskStoreName;
+  }
+  @Override
+  public HDFSStoreFactory setDiskStoreName(String name) {
+    this.diskStoreName = name;
+    return this;
+  }
+
+  @Override
+  public int getBatchInterval() {
+    return this.batchIntervalMillis;
+  }
+  @Override
+  public HDFSStoreFactory setBatchInterval(int intervalMillis){
+    this.batchIntervalMillis = intervalMillis;
+    return this;
+  }
+  
+  @Override
+  public boolean getBufferPersistent() {
+    return isPersistenceEnabled;
+  }
+  @Override
+  public HDFSStoreFactory setBufferPersistent(boolean isPersistent) {
+    this.isPersistenceEnabled = isPersistent;
+    return this;
+  }
+
+  @Override
+  public int getDispatcherThreads() {
+    return dispatcherThreads;
+  }
+  @Override
+  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads) {
+    this.dispatcherThreads = dispatcherThreads;
+    return this;
+  }
+  
+  @Override
+  public int getMaxMemory() {
+    return this.maximumQueueMemory;
+  }
+  @Override
+  public HDFSStoreFactory setMaxMemory(int memory) {
+    this.maximumQueueMemory = memory;
+    return this;
+  }
+  
+  @Override
+  public int getBatchSize() {
+    return this.batchSize;
+  }
+  @Override
+  public HDFSStoreFactory setBatchSize(int size){
+    this.batchSize = size;
+    return this;
+  }
+  
+  @Override
+  public boolean getSynchronousDiskWrite() {
+    return this.diskSynchronous;
+  }
+  @Override
+  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous) {
+    this.diskSynchronous = isSynchronous;
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
new file mode 100644
index 0000000..9ecc5e3
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreCreation.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.StoreExistsException;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+
+/**
+ */
+public class HDFSStoreCreation implements HDFSStoreFactory {
+  protected HDFSStoreConfigHolder configHolder;
+  
+  public HDFSStoreCreation() {
+    this(null);
+  }
+
+  /**
+   * Copy constructor for HDFSStoreCreation
+   * @param config configuration source for creating this instance 
+   */
+  public HDFSStoreCreation(HDFSStoreCreation config) {
+    this.configHolder = new HDFSStoreConfigHolder(config == null ? null : config.configHolder);
+  }
+
+  @Override
+  public HDFSStoreFactory setName(String name) {
+    configHolder.setName(name);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setNameNodeURL(String namenodeURL) {
+    configHolder.setNameNodeURL(namenodeURL);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setHomeDir(String homeDir) {
+    configHolder.setHomeDir(homeDir);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setHDFSClientConfigFile(String clientConfigFile) {
+    configHolder.setHDFSClientConfigFile(clientConfigFile);
+    return this;
+  }
+  
+  @Override
+  public HDFSStoreFactory setBlockCacheSize(float percentage) {
+    configHolder.setBlockCacheSize(percentage);
+    return this;
+  }
+  
+  @Override
+  public HDFSStoreFactory setWriteOnlyFileRolloverSize(int maxFileSize) {
+    configHolder.setWriteOnlyFileRolloverSize(maxFileSize);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int count) {
+    configHolder.setWriteOnlyFileRolloverInterval(count);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setMinorCompaction(boolean auto) {
+    configHolder.setMinorCompaction(auto);
+    return this;
+  }
+  
+  @Override
+  public HDFSStoreFactory setMinorCompactionThreads(int count) {
+    configHolder.setMinorCompactionThreads(count);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setMajorCompaction(boolean auto) {
+    configHolder.setMajorCompaction(auto);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setMajorCompactionInterval(int count) {
+    configHolder.setMajorCompactionInterval(count);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setMajorCompactionThreads(int count) {
+    configHolder.setMajorCompactionThreads(count);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setInputFileSizeMax(int size) {
+    configHolder.setInputFileSizeMax(size);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setInputFileCountMin(int count) {
+    configHolder.setInputFileCountMin(count);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setInputFileCountMax(int count) {
+    configHolder.setInputFileCountMax(count);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setPurgeInterval(int interval) {
+    configHolder.setPurgeInterval(interval);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setDiskStoreName(String name) {
+    configHolder.setDiskStoreName(name);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setMaxMemory(int memory) {
+    configHolder.setMaxMemory(memory);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setBatchInterval(int intervalMillis) {
+    configHolder.setBatchInterval(intervalMillis);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setBatchSize(int size) {
+    configHolder.setBatchSize(size);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setBufferPersistent(boolean isPersistent) {
+    configHolder.setBufferPersistent(isPersistent);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous) {
+    configHolder.setSynchronousDiskWrite(isSynchronous);
+    return this;
+  }
+
+  @Override
+  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads) {
+    configHolder.setDispatcherThreads(dispatcherThreads);
+    return this;
+  }
+  
+  /**
+   * This method should not be called on this class.
+   * @see HDFSStoreFactory#create(String)
+   */
+  @Override
+  public HDFSStore create(String name) throws GemFireConfigException,
+      StoreExistsException {
+    throw new UnsupportedOperationException();
+  }
+
+  public static void assertIsPositive(String name, int count) {
+    if (count < 1) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
+              .toLocalizedString(new Object[] { name, count }));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java
new file mode 100644
index 0000000..749f01c
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreFactoryImpl.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.StoreExistsException;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+
+
+/**
+ * Implementation of HDFSStoreFactory 
+ * 
+ */
+public class HDFSStoreFactoryImpl extends HDFSStoreCreation {
+  public static final String DEFAULT_ASYNC_QUEUE_ID_FOR_HDFS= "HDFS_QUEUE";
+  
+  private Cache cache;
+  
+  public HDFSStoreFactoryImpl(Cache cache) {
+    this(cache, null);
+  }
+  
+  public HDFSStoreFactoryImpl(Cache cache, HDFSStoreCreation config) {
+    super(config);
+    this.cache = cache;
+  }
+
+  @Override
+  public HDFSStore create(String name) {
+    if (name == null) {
+      throw new GemFireConfigException("HDFS store name not provided");
+    }
+    
+    this.configHolder.validate();
+    
+    HDFSStore result = null;
+    synchronized (this) {
+      if (this.cache instanceof GemFireCacheImpl) {
+        GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache;
+        if (gfc.findHDFSStore(name) != null) {
+          throw new StoreExistsException(name);
+        }
+        
+        HDFSStoreImpl hsi = new HDFSStoreImpl(name, this.configHolder);
+        gfc.addHDFSStore(hsi);
+        result = hsi;
+      }
+    }
+    return result;
+  }
+
+  public static final String getEventQueueName(String regionPath) {
+    return HDFSStoreFactoryImpl.DEFAULT_ASYNC_QUEUE_ID_FOR_HDFS + "_"
+        + regionPath.replace('/', '_');
+  }
+
+  public HDFSStore getConfigView() {
+    return (HDFSStore) configHolder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
new file mode 100644
index 0000000..b5d56b6
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreImpl.java
@@ -0,0 +1,638 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSStoreDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import com.gemstone.gemfire.internal.util.SingletonCallable;
+import com.gemstone.gemfire.internal.util.SingletonValue;
+import com.gemstone.gemfire.internal.util.SingletonValue.SingletonBuilder;
+
+/**
+ * Represents a HDFS based persistent store for region data.
+ * 
+ */
+public class HDFSStoreImpl implements HDFSStore {
+  
+  private volatile HDFSStoreConfigHolder configHolder; 
+  
+  private final SingletonValue<FileSystem> fs;
+
+  /**
+   * Used to make sure that only one thread creates the writer at a time. This prevents the dispatcher
+   * threads from cascading the Connection lock in DFS client see bug 51195
+   */
+  private final SingletonCallable<HoplogWriter> singletonWriter = new SingletonCallable<HoplogWriter>();
+
+  private final HFileStoreStatistics stats;
+  private final BlockCache blockCache;
+
+  private static HashSet<String> secureNameNodes = new HashSet<String>();
+  
+  private final boolean PERFORM_SECURE_HDFS_CHECK = Boolean.getBoolean(HoplogConfig.PERFORM_SECURE_HDFS_CHECK_PROP);
+  private static final Logger logger = LogService.getLogger();
+  protected final String logPrefix;
+  
+  static {
+    HdfsConfiguration.init();
+  }
+  
+  public HDFSStoreImpl(String name, final HDFSStore config) {
+    this.configHolder = new HDFSStoreConfigHolder(config);
+    configHolder.setName(name);
+
+    this.logPrefix = "<" + "HdfsStore:" + name + "> ";
+
+    stats = new HFileStoreStatistics(InternalDistributedSystem.getAnyInstance(), "HDFSStoreStatistics", name);
+
+    final Configuration hconf = new Configuration();
+        
+    // Set the block cache size.
+    // Disable the static block cache. We keep our own cache on the HDFS Store
+    // hconf.setFloat("hfile.block.cache.size", 0f);
+    if (this.getBlockCacheSize() != 0) {
+      long cacheSize = (long) (HeapMemoryMonitor.getTenuredPoolMaxMemory() * this.getBlockCacheSize() / 100);
+
+      // TODO use an off heap block cache if we're using off heap memory?
+      // See CacheConfig.instantiateBlockCache.
+      // According to Anthony, the off heap block cache is still
+      // experimental. Our own off heap cache might be a better bet.
+//      this.blockCache = new LruBlockCache(cacheSize,
+//          StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf, HFileSortedOplogFactory.convertStatistics(stats));
+      this.blockCache = new LruBlockCache(cacheSize, StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf);
+    } else {
+      this.blockCache = null;
+    }
+    
+    final String clientFile = config.getHDFSClientConfigFile();
+    fs = new SingletonValue<FileSystem>(new SingletonBuilder<FileSystem>() {
+      @Override
+      public FileSystem create() throws IOException {
+        return createFileSystem(hconf, clientFile, false);
+      }
+
+      @Override
+      public void postCreate() {
+      }
+      
+      @Override
+      public void createInProgress() {
+      }
+    });
+    
+    FileSystem fileSystem = null;
+    try {
+      fileSystem = fs.get();
+    } catch (Throwable ex) {
+      throw new HDFSIOException(ex.getMessage(),ex);
+    }    
+    //HDFSCompactionConfig has already been initialized
+    long cleanUpIntervalMillis = getPurgeInterval() * 60 * 1000;
+    Path cleanUpIntervalPath = new Path(getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
+    HoplogUtil.exposeCleanupIntervalMillis(fileSystem, cleanUpIntervalPath, cleanUpIntervalMillis);
+  }
+  
+  /**
+   * Creates a new file system every time.  
+   */
+  public FileSystem createFileSystem() {
+    Configuration hconf = new Configuration();
+    try {
+      return createFileSystem(hconf, this.getHDFSClientConfigFile(), true);
+    } catch (Throwable ex) {
+      throw new HDFSIOException(ex.getMessage(),ex);
+    }
+  }
+  
+  private FileSystem createFileSystem(Configuration hconf, String configFile, boolean forceNew) throws IOException {
+    FileSystem filesystem = null; 
+    
+      // load hdfs client config file if specified. The path is on local file
+      // system
+      if (configFile != null) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("{}Adding resource config file to hdfs configuration:" + configFile, logPrefix);
+        }
+        hconf.addResource(new Path(configFile));
+        
+        if (! new File(configFile).exists()) {
+          logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_CLIENT_CONFIG_FILE_ABSENT, configFile));
+        }
+      }
+      
+      // This setting disables shutdown hook for file system object. Shutdown
+      // hook may cause FS object to close before the cache or store and
+      // unpredictable behavior. This setting is provided for GFXD like server
+      // use cases where FS close is managed by a server. This setting is not
+      // supported by old versions of hadoop, HADOOP-4829
+      hconf.setBoolean("fs.automatic.close", false);
+      
+      // Hadoop has a configuration parameter io.serializations that is a list of serialization 
+      // classes which can be used for obtaining serializers and deserializers. This parameter 
+      // by default contains avro classes. When a sequence file is created, it calls 
+      // SerializationFactory.getSerializer(keyclass). This internally creates objects using 
+      // reflection of all the classes that were part of io.serializations. But since, there is 
+      // no avro class available it throws an exception. 
+      // Before creating a sequenceFile, override the io.serializations parameter and pass only the classes 
+      // that are important to us. 
+      hconf.setStrings("io.serializations",
+          new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
+      // create writer
+
+      SchemaMetrics.configureGlobally(hconf);
+      
+      String nameNodeURL = null;
+      if ((nameNodeURL = getNameNodeURL()) == null) {
+          nameNodeURL = hconf.get("fs.default.name");
+      }
+      
+      URI namenodeURI = URI.create(nameNodeURL);
+    
+    //if (! GemFireCacheImpl.getExisting().isHadoopGfxdLonerMode()) {
+      String authType = hconf.get("hadoop.security.authentication");
+      
+      //The following code handles Gemfire XD with secure HDFS
+      //A static set is used to cache all known secure HDFS NameNode urls.
+      UserGroupInformation.setConfiguration(hconf);
+
+      //Compare authentication method ignoring case to make GFXD future version complaint
+      //At least version 2.0.2 starts complaining if the string "kerberos" is not in all small case.
+      //However it seems current version of hadoop accept the authType in any case
+      if (authType.equalsIgnoreCase("kerberos")) {
+        
+        String principal = hconf.get(HoplogConfig.KERBEROS_PRINCIPAL);
+        String keyTab = hconf.get(HoplogConfig.KERBEROS_KEYTAB_FILE);
+       
+        if (!PERFORM_SECURE_HDFS_CHECK) {
+          if (logger.isDebugEnabled())
+            logger.debug("{}Ignore secure hdfs check", logPrefix);
+        } else {
+          if (!secureNameNodes.contains(nameNodeURL)) {
+            if (logger.isDebugEnabled())
+              logger.debug("{}Executing secure hdfs check", logPrefix);
+             try{
+              filesystem = FileSystem.newInstance(namenodeURI, hconf);
+              //Make sure no IOExceptions are generated when accessing insecure HDFS. 
+              filesystem.listFiles(new Path("/"),false);
+              throw new HDFSIOException("Gemfire XD HDFS client and HDFS cluster security levels do not match. The configured HDFS Namenode is not secured.");
+             } catch (IOException ex) {
+               secureNameNodes.add(nameNodeURL);
+             } finally {
+             //Close filesystem to avoid resource leak
+               if(filesystem != null) {
+                 closeFileSystemIgnoreError(filesystem);
+               }
+             }
+          }
+        }
+
+        // check to ensure the namenode principal is defined
+        String nameNodePrincipal = hconf.get("dfs.namenode.kerberos.principal");
+        if (nameNodePrincipal == null) {
+          throw new IOException(LocalizedStrings.GF_KERBEROS_NAMENODE_PRINCIPAL_UNDEF.toLocalizedString());
+        }
+        
+        // ok, the user specified a gfxd principal so we will try to login
+        if (principal != null) {
+          //If NameNode principal is the same as Gemfire XD principal, there is a 
+          //potential security hole
+          String regex = "[/@]";
+          if (nameNodePrincipal != null) {
+            String HDFSUser = nameNodePrincipal.split(regex)[0];
+            String GFXDUser = principal.split(regex)[0];
+            if (HDFSUser.equals(GFXDUser)) {
+              logger.warn(LocalizedMessage.create(LocalizedStrings.HDFS_USER_IS_SAME_AS_GF_USER, GFXDUser));
+            }
+          }
+          
+          // a keytab must exist if the user specifies a principal
+          if (keyTab == null) {
+            throw new IOException(LocalizedStrings.GF_KERBEROS_KEYTAB_UNDEF.toLocalizedString());
+          }
+          
+          // the keytab must exist as well
+          File f = new File(keyTab);
+          if (!f.exists()) {
+            throw new FileNotFoundException(LocalizedStrings.GF_KERBEROS_KEYTAB_FILE_ABSENT.toLocalizedString(f.getAbsolutePath()));
+          }
+
+          //Authenticate Gemfire XD principal to Kerberos KDC using Gemfire XD keytab file
+          String principalWithValidHost = SecurityUtil.getServerPrincipal(principal, "");
+          UserGroupInformation.loginUserFromKeytab(principalWithValidHost, keyTab);
+        } else {
+          logger.warn(LocalizedMessage.create(LocalizedStrings.GF_KERBEROS_PRINCIPAL_UNDEF));
+        }
+      }
+    //}
+
+    filesystem = getFileSystemFactory().create(namenodeURI, hconf, forceNew);
+    
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Initialized FileSystem linked to " + filesystem.getUri()
+          + " " + filesystem.hashCode(), logPrefix);
+    }
+    return filesystem;
+  }
+
+  public FileSystem getFileSystem() throws IOException {
+    return fs.get();
+  }
+  
+  public FileSystem getCachedFileSystem() {
+    return fs.getCachedValue();
+  }
+
+  public SingletonCallable<HoplogWriter> getSingletonWriter() {
+    return this.singletonWriter;
+  }
+
+  private final SingletonCallable<Boolean> fsExists = new SingletonCallable<Boolean>();
+
+  public boolean checkFileSystemExists() throws IOException {
+    try {
+      return fsExists.runSerially(new Callable<Boolean>() {
+        @Override
+        public Boolean call() throws Exception {
+          FileSystem fileSystem = getCachedFileSystem();
+          if (fileSystem == null) {
+            return false;
+          }
+          return fileSystem.exists(new Path("/"));
+        }
+      });
+    } catch (Exception e) {
+      if (e instanceof IOException) {
+        throw (IOException)e;
+      }
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * This method executes a query on namenode. If the query succeeds, FS
+   * instance is healthy. If it fails, the old instance is closed and a new
+   * instance is created.
+   */
+  public void checkAndClearFileSystem() {
+    FileSystem fileSystem = getCachedFileSystem();
+    
+    if (fileSystem != null) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Checking file system at " + fileSystem.getUri(), logPrefix);
+      }
+      try {
+        checkFileSystemExists();
+        if (logger.isDebugEnabled()) {
+          logger.debug("{}FS client is ok: " + fileSystem.getUri() + " "
+              + fileSystem.hashCode(), logPrefix);
+        }
+        return;
+      } catch (ConnectTimeoutException e) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("{}Hdfs unreachable, FS client is ok: "
+              + fileSystem.getUri() + " " + fileSystem.hashCode(), logPrefix);
+        }
+        return;
+      } catch (IOException e) {
+        logger.debug("IOError in filesystem checkAndClear ", e);
+        
+        // The file system is closed or NN is not reachable. It is safest to
+        // create a new FS instance. If the NN continues to remain unavailable,
+        // all subsequent read/write request will cause HDFSIOException. This is
+        // similar to the way hbase manages failures. This has a drawback
+        // though. A network blip will result in all connections to be
+        // recreated. However trying to preserve the connections and waiting for
+        // FS to auto-recover is not deterministic.
+        if (e instanceof RemoteException) {
+          e = ((RemoteException) e).unwrapRemoteException();
+        }
+
+        logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_UNREACHABLE,
+            fileSystem.getUri()), e);
+      }
+
+      // compare and clear FS container. The fs container needs to be reusable
+      boolean result = fs.clear(fileSystem, true);
+      if (!result) {
+        // the FS instance changed after this call was initiated. Check again
+        logger.debug("{}Failed to clear FS ! I am inconsistent so retrying ..", logPrefix);
+        checkAndClearFileSystem();
+      } else {
+        closeFileSystemIgnoreError(fileSystem);
+      }      
+    }
+  }
+
+  private void closeFileSystemIgnoreError(FileSystem fileSystem) {
+    if (fileSystem == null) {
+      logger.debug("{}Trying to close null file system", logPrefix);
+      return;
+    }
+
+    try {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Closing file system at " + fileSystem.getUri() + " "
+            + fileSystem.hashCode(), logPrefix);
+      }
+      fileSystem.close();
+    } catch (Exception e) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("Failed to close file system at " + fileSystem.getUri()
+            + " " + fileSystem.hashCode(), e);
+      }
+    }
+  }
+
+  public HFileStoreStatistics getStats() {
+    return stats;
+  }
+  
+  public BlockCache getBlockCache() {
+    return blockCache;
+  }
+
+  public void close() {
+    logger.debug("{}Closing file system: " + getName(), logPrefix);
+    stats.close();
+    blockCache.shutdown();
+    //Might want to clear the block cache, but it should be dereferenced.
+    
+    // release DDL hoplog organizer for this store. Also shutdown compaction
+    // threads. These two resources hold references to GemfireCacheImpl
+    // instance. Any error is releasing this resources is not critical and needs
+    // be ignored.
+    try {
+      HDFSCompactionManager manager = HDFSCompactionManager.getInstance(this);
+      if (manager != null) {
+        manager.reset();
+      }
+    } catch (Exception e) {
+      logger.info(e);
+    }
+    
+    // once this store is closed, this store should not be used again
+    FileSystem fileSystem = fs.clear(false);
+    if (fileSystem != null) {
+      closeFileSystemIgnoreError(fileSystem);
+    }    
+  }
+  
+  /**
+   * Test hook to remove all of the contents of the the folder
+   * for this HDFS store from HDFS.
+   * @throws IOException 
+   */
+  public void clearFolder() throws IOException {
+    getFileSystem().delete(new Path(getHomeDir()), true);
+  }
+  
+  @Override
+  public void destroy() {
+    Collection<String> regions = HDFSRegionDirector.getInstance().getRegionsInStore(this);
+    if(!regions.isEmpty()) {
+      throw new IllegalStateException("Cannot destroy a HDFS store that still contains regions: " + regions); 
+    }
+    close();
+    HDFSStoreDirector.getInstance().removeHDFSStore(this.getName());
+  }
+
+  @Override
+  public synchronized HDFSStore alter(HDFSStoreMutator mutator) {
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Altering hdfsStore " + this, logPrefix);
+      logger.debug("{}Mutator " + mutator, logPrefix);
+    }
+    HDFSStoreConfigHolder newHolder = new HDFSStoreConfigHolder(configHolder);
+    newHolder.copyFrom(mutator);
+    newHolder.validate();
+    HDFSStore oldStore = configHolder;
+    configHolder = newHolder;
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Resuult of Alter " + this, logPrefix);
+    }
+    return (HDFSStore) oldStore;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("HDFSStoreImpl [");
+    if (configHolder != null) {
+      builder.append("configHolder=");
+      builder.append(configHolder);
+    }
+    builder.append("]");
+    return builder.toString();
+  }
+
+  @Override
+  public String getName() {
+    return configHolder.getName();
+  }
+
+  @Override
+  public String getNameNodeURL() {
+    return configHolder.getNameNodeURL();
+  }
+
+  @Override
+  public String getHomeDir() {
+    return configHolder.getHomeDir();
+  }
+
+  @Override
+  public String getHDFSClientConfigFile() {
+    return configHolder.getHDFSClientConfigFile();
+  }
+
+  @Override
+  public float getBlockCacheSize() {
+    return configHolder.getBlockCacheSize();
+  }
+
+  @Override
+  public int getWriteOnlyFileRolloverSize() {
+    return configHolder.getWriteOnlyFileRolloverSize();
+  }
+
+  @Override
+  public int getWriteOnlyFileRolloverInterval() {
+    return configHolder.getWriteOnlyFileRolloverInterval();
+  }
+
+  @Override
+  public boolean getMinorCompaction() {
+    return configHolder.getMinorCompaction();
+  }
+
+  @Override
+  public int getMinorCompactionThreads() {
+    return configHolder.getMinorCompactionThreads();
+  }
+
+  @Override
+  public boolean getMajorCompaction() {
+    return configHolder.getMajorCompaction();
+  }
+
+  @Override
+  public int getMajorCompactionInterval() {
+    return configHolder.getMajorCompactionInterval();
+  }
+
+  @Override
+  public int getMajorCompactionThreads() {
+    return configHolder.getMajorCompactionThreads();
+  }
+
+
+  @Override
+  public int getInputFileSizeMax() {
+    return configHolder.getInputFileSizeMax();
+  }
+
+  @Override
+  public int getInputFileCountMin() {
+    return configHolder.getInputFileCountMin();
+  }
+
+  @Override
+  public int getInputFileCountMax() {
+    return configHolder.getInputFileCountMax();
+  }
+
+  @Override
+  public int getPurgeInterval() {
+    return configHolder.getPurgeInterval();
+  }
+
+  @Override
+  public String getDiskStoreName() {
+    return configHolder.getDiskStoreName();
+  }
+
+  @Override
+  public int getMaxMemory() {
+    return configHolder.getMaxMemory();
+  }
+
+  @Override
+  public int getBatchSize() {
+    return configHolder.getBatchSize();
+  }
+
+  @Override
+  public int getBatchInterval() {
+    return configHolder.getBatchInterval();
+  }
+
+  @Override
+  public boolean getBufferPersistent() {
+    return configHolder.getBufferPersistent();
+  }
+
+  @Override
+  public boolean getSynchronousDiskWrite() {
+    return configHolder.getSynchronousDiskWrite();
+  }
+
+  @Override
+  public int getDispatcherThreads() {
+    return configHolder.getDispatcherThreads();
+  }
+  
+  @Override
+  public HDFSStoreMutator createHdfsStoreMutator() {
+    return new HDFSStoreMutatorImpl();
+  }
+
+  public FileSystemFactory getFileSystemFactory() {
+    return new DistributedFileSystemFactory();
+  }
+
+  /*
+   * Factory to create HDFS file system instances
+   */
+  static public interface FileSystemFactory {
+    public FileSystem create(URI namenode, Configuration conf, boolean forceNew) throws IOException;
+  }
+
+  /*
+   * File system factory implementations for creating instances of file system
+   * connected to distributed HDFS cluster
+   */
+  public class DistributedFileSystemFactory implements FileSystemFactory {
+    private final boolean ALLOW_TEST_FILE_SYSTEM = Boolean.getBoolean(HoplogConfig.ALLOW_LOCAL_HDFS_PROP);
+    private final boolean USE_FS_CACHE = Boolean.getBoolean(HoplogConfig.USE_FS_CACHE);
+
+    @Override
+    public FileSystem create(URI nn, Configuration conf, boolean create) throws IOException {
+      FileSystem filesystem;
+
+      if (USE_FS_CACHE && !create) {
+        filesystem = FileSystem.get(nn, conf);
+      } else {
+        filesystem = FileSystem.newInstance(nn, conf);
+      }
+
+      if (filesystem instanceof LocalFileSystem && !ALLOW_TEST_FILE_SYSTEM) {
+        closeFileSystemIgnoreError(filesystem);
+        throw new IllegalStateException(
+            LocalizedStrings.HOPLOG_TRYING_TO_CREATE_STANDALONE_SYSTEM.toLocalizedString(getNameNodeURL()));
+      }
+
+      return filesystem;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
new file mode 100644
index 0000000..203e623
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSStoreMutatorImpl.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+
+public class HDFSStoreMutatorImpl implements HDFSStoreMutator {
+  private HDFSStoreConfigHolder configHolder;
+  private Boolean autoCompact;
+  private Boolean autoMajorCompact;
+
+  public HDFSStoreMutatorImpl() {
+    configHolder = new HDFSStoreConfigHolder();
+    configHolder.resetDefaultValues();
+  }
+
+  public HDFSStoreMutatorImpl(HDFSStore store) {
+    configHolder = new HDFSStoreConfigHolder(store);
+  }
+  
+  public HDFSStoreMutator setWriteOnlyFileRolloverSize(int maxFileSize) {
+    configHolder.setWriteOnlyFileRolloverSize(maxFileSize);
+    return this;
+  }
+  @Override
+  public int getWriteOnlyFileRolloverSize() {
+    return configHolder.getWriteOnlyFileRolloverSize();
+  }
+
+  @Override
+  public HDFSStoreMutator setWriteOnlyFileRolloverInterval(int count) {
+    configHolder.setWriteOnlyFileRolloverInterval(count);
+    return this;
+  }
+  @Override
+  public int getWriteOnlyFileRolloverInterval() {
+    return configHolder.getWriteOnlyFileRolloverInterval();
+  }
+
+  @Override
+  public HDFSStoreMutator setMinorCompaction(boolean auto) {
+    autoCompact = Boolean.valueOf(auto);
+    configHolder.setMinorCompaction(auto);
+    return null;
+  }
+  @Override
+  public Boolean getMinorCompaction() {
+    return autoCompact;
+  }
+  
+  @Override
+  public HDFSStoreMutator setMinorCompactionThreads(int count) {
+    configHolder.setMinorCompactionThreads(count);
+    return this;
+  }
+  @Override
+  public int getMinorCompactionThreads() {
+    return configHolder.getMinorCompactionThreads();
+  }
+  
+  @Override
+  public HDFSStoreMutator setMajorCompaction(boolean auto) {
+    autoMajorCompact = Boolean.valueOf(auto);
+    configHolder.setMajorCompaction(auto);
+    return this;
+  }
+  @Override
+  public Boolean getMajorCompaction() {
+    return autoMajorCompact;
+  }
+
+  @Override
+  public HDFSStoreMutator setMajorCompactionInterval(int count) {
+    configHolder.setMajorCompactionInterval(count);
+    return this;
+  }
+  @Override
+  public int getMajorCompactionInterval() {
+    return configHolder.getMajorCompactionInterval();
+  }
+
+  @Override
+  public HDFSStoreMutator setMajorCompactionThreads(int count) {
+    configHolder.setMajorCompactionThreads(count);
+    return this;
+  }
+  @Override
+  public int getMajorCompactionThreads() {
+    return configHolder.getMajorCompactionThreads();
+  }
+
+  @Override
+  public HDFSStoreMutator setInputFileSizeMax(int size) {
+    configHolder.setInputFileSizeMax(size);
+    return this;
+  }
+  @Override
+  public int getInputFileSizeMax() {
+    return configHolder.getInputFileSizeMax();
+  }
+  
+  @Override
+  public HDFSStoreMutator setInputFileCountMin(int count) {
+    configHolder.setInputFileCountMin(count);
+    return this;
+  }
+  @Override
+  public int getInputFileCountMin() {
+    return configHolder.getInputFileCountMin();
+  }
+  
+  @Override
+  public HDFSStoreMutator setInputFileCountMax(int count) {
+    configHolder.setInputFileCountMax(count);
+    return this;
+  }
+  @Override
+  public int getInputFileCountMax() {
+    return configHolder.getInputFileCountMax();
+  }
+  
+  @Override
+  public HDFSStoreMutator setPurgeInterval(int interval) {
+    configHolder.setPurgeInterval(interval);
+    return this;
+  }
+  @Override
+  public int getPurgeInterval() {
+    return configHolder.getPurgeInterval();
+  }
+
+  @Override
+  public int getBatchSize() {
+    return configHolder.batchSize;
+  }
+  @Override
+  public HDFSStoreMutator setBatchSize(int size) {
+    configHolder.setBatchSize(size);
+    return this;
+  }
+
+  
+  @Override
+  public int getBatchInterval() {
+    return configHolder.batchIntervalMillis;
+  }
+  @Override
+  public HDFSStoreMutator setBatchInterval(int interval) {
+    configHolder.setBatchInterval(interval);
+    return this;
+  }
+    
+  public static void assertIsPositive(String name, int count) {
+    if (count < 1) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
+              .toLocalizedString(new Object[] { name, count }));
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("HDFSStoreMutatorImpl [");
+    if (configHolder != null) {
+      builder.append("configHolder=");
+      builder.append(configHolder);
+      builder.append(", ");
+    }
+    if (autoCompact != null) {
+      builder.append("MinorCompaction=");
+      builder.append(autoCompact);
+      builder.append(", ");
+    }
+    if (getMajorCompaction() != null) {
+      builder.append("autoMajorCompaction=");
+      builder.append(getMajorCompaction());
+      builder.append(", ");
+    }
+    builder.append("]");
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java
new file mode 100644
index 0000000..0298523
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSWriteOnlyStoreEventListener.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
+import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+
+/**
+ * Listener that persists events to a write only HDFS store
+ *
+ */
+public class HDFSWriteOnlyStoreEventListener implements
+    AsyncEventListener {
+
+  private final LogWriterI18n logger;
+  private volatile boolean senderStopped = false; 
+  private final FailureTracker failureTracker = new FailureTracker(10L, 60 * 1000L, 1.5f);
+  
+  
+  public HDFSWriteOnlyStoreEventListener(LogWriterI18n logger) {
+    this.logger = logger;
+  }
+  
+  @Override
+  public void close() {
+    senderStopped = true;
+  }
+
+  @Override
+  public boolean processEvents(List<AsyncEvent> events) {
+    if (Hoplog.NOP_WRITE) {
+      return true;
+    }
+
+    if (logger.fineEnabled())
+      logger.fine("HDFSWriteOnlyStoreEventListener: A total of " + events.size() + " events are sent from GemFire to persist on HDFS");
+    boolean success = false;
+    try {
+      failureTracker.sleepIfRetry();
+      HDFSGatewayEventImpl hdfsEvent = null;
+      int previousBucketId = -1;
+      BatchManager bm = null;
+      for (AsyncEvent asyncEvent : events) {
+        if (senderStopped){
+          if (logger.fineEnabled()) {
+            logger.fine("HDFSWriteOnlyStoreEventListener.processEvents: Cache is closing down. Ignoring the batch of data.");
+          }
+          return false;
+        }
+        hdfsEvent = (HDFSGatewayEventImpl)asyncEvent;
+        if (previousBucketId != hdfsEvent.getBucketId()){
+          if (previousBucketId != -1) 
+            persistBatch(bm, previousBucketId);
+          
+          previousBucketId = hdfsEvent.getBucketId();
+          bm = new BatchManager();
+        }
+        bm.addEvent(hdfsEvent);
+      }
+      try {
+        persistBatch(bm, hdfsEvent.getBucketId());
+      } catch (BucketMovedException e) {
+        logger.fine("Batch could not be written to HDFS as the bucket moved. bucket id: " + 
+            hdfsEvent.getBucketId() + " Exception: " + e);
+        return false;
+      }
+      success = true;
+    } catch (IOException e) {
+      logger.warning(LocalizedStrings.HOPLOG_FLUSH_FOR_BATCH_FAILED, e);
+      return false;
+    }
+    catch (ClassNotFoundException e) {
+      logger.warning(LocalizedStrings.HOPLOG_FLUSH_FOR_BATCH_FAILED, e);
+      return false;
+    }
+    catch (CacheClosedException e) {
+      // exit silently
+      if (logger.fineEnabled())
+        logger.fine(e);
+      return false;
+    } catch (ForceReattemptException e) {
+      if (logger.fineEnabled())
+        logger.fine(e);
+      return false;
+    } catch (InterruptedException e1) {
+      // TODO Auto-generated catch block
+      e1.printStackTrace();
+    } finally {
+      failureTracker.record(success);
+    }
+    return true;
+  }
+  
+  /**
+   * Persists batches of multiple regions specified by the batch manager
+   * 
+   */
+  private void persistBatch(BatchManager bm, int bucketId) throws IOException, ForceReattemptException {
+    Iterator<Map.Entry<LocalRegion,ArrayList<QueuedPersistentEvent>>> eventsPerRegion = 
+        bm.iterator();
+    HoplogOrganizer bucketOrganizer = null; 
+    while (eventsPerRegion.hasNext()) {
+      Map.Entry<LocalRegion, ArrayList<QueuedPersistentEvent>> eventsForARegion = eventsPerRegion.next();
+      bucketOrganizer = getOrganizer((PartitionedRegion) eventsForARegion.getKey(), bucketId);
+      // bucket organizer cannot be null. 
+      if (bucketOrganizer == null)
+        throw new BucketMovedException("Bucket moved. BucketID: " + bucketId + "  HdfsRegion: " +  eventsForARegion.getKey().getName());
+      bucketOrganizer.flush(eventsForARegion.getValue().iterator(), eventsForARegion.getValue().size());
+      if (logger.fineEnabled()) {
+        logger.fine("Batch written to HDFS of size " +  eventsForARegion.getValue().size() + 
+            " for region " + eventsForARegion.getKey());
+      }
+    }
+  }
+
+  private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
+    BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
+    if (br == null) {
+      // got rebalanced or something
+      throw new BucketMovedException("Bucket region is no longer available. BucketId: "+
+          bucketId + " HdfsRegion: " +  region.getName());
+    }
+
+    return br.getHoplogOrganizer();
+  }
+  
+  /**
+   * Sorts out events of the multiple regions into lists per region 
+   *
+   */
+  private class BatchManager implements Iterable<Map.Entry<LocalRegion,ArrayList<QueuedPersistentEvent>>> {
+    private HashMap<LocalRegion, ArrayList<QueuedPersistentEvent>> regionBatches = 
+        new HashMap<LocalRegion, ArrayList<QueuedPersistentEvent>>();
+    
+    public void addEvent (HDFSGatewayEventImpl hdfsEvent) throws IOException, ClassNotFoundException {
+      LocalRegion region = (LocalRegion) hdfsEvent.getRegion();
+      ArrayList<QueuedPersistentEvent> regionList = regionBatches.get(region);
+      if (regionList == null) {
+        regionList = new ArrayList<QueuedPersistentEvent>();
+        regionBatches.put(region, regionList);
+      }
+      regionList.add(new UnsortedHDFSQueuePersistedEvent(hdfsEvent));
+    }
+
+    @Override
+    public Iterator<Map.Entry<LocalRegion,ArrayList<QueuedPersistentEvent>>> iterator() {
+      return regionBatches.entrySet().iterator();
+    }
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java
new file mode 100644
index 0000000..c7ba23f
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HoplogListenerForRegion.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogListener;
+
+/**
+ * Objects of this class needs to be created for every region. These objects 
+ * listen to the oplog events and take appropriate action.   
+ *
+ */
+public class HoplogListenerForRegion implements HoplogListener {
+
+  private List<HoplogListener> otherListeners = new CopyOnWriteArrayList<HoplogListener>();
+
+  public HoplogListenerForRegion() {
+    
+  }
+
+  @Override
+  public void hoplogCreated(String regionFolder, int bucketId,
+      Hoplog... oplogs) throws IOException {
+    for (HoplogListener listener : this.otherListeners) {
+      listener.hoplogCreated(regionFolder, bucketId, oplogs);
+    }
+  }
+
+  @Override
+  public void hoplogDeleted(String regionFolder, int bucketId,
+      Hoplog... oplogs) {
+    for (HoplogListener listener : this.otherListeners) {
+      try {
+        listener.hoplogDeleted(regionFolder, bucketId, oplogs);
+      } catch (IOException e) {
+        // TODO handle
+        throw new HDFSIOException(e.getLocalizedMessage(), e);
+      }
+    }
+  }
+
+  public void addListener(HoplogListener listener) {
+    this.otherListeners.add(listener);
+  }
+
+  @Override
+  public void compactionCompleted(String region, int bucket, boolean isMajor) {
+    for (HoplogListener listener : this.otherListeners) {
+      listener.compactionCompleted(region, bucket, isMajor);
+    }
+  }
+}



[22/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java
new file mode 100644
index 0000000..82e2bf9
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.internal.DataSerializableFixedID;
+import com.gemstone.gemfire.internal.cache.CachedDeserializable;
+import com.gemstone.gemfire.internal.cache.CachedDeserializableFactory;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.lru.Sizeable;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.internal.Version;
+
+/**
+ * Event that is persisted in HDFS. As we need to persist some of the EntryEventImpl
+ * variables, we have created this class and have overridden toData and fromData functions.  
+ * 
+ *  There are subclasses of this class of the different types of persisted events
+ *  sorted vs. unsorted, and the persisted events we keep in the region
+ *  queue, which need to hold the region key.
+ *   
+ *
+ */
+public abstract class PersistedEventImpl {
+  protected Operation op = Operation.UPDATE;
+  
+  protected Object valueObject;
+
+  /**
+   * A field with flags decribing the event
+   */
+  protected byte flags;
+
+   //FLags indicating the type of value
+   //if the value is not a byte array or object, is is an internal delta.
+  private static final byte VALUE_IS_BYTE_ARRAY= 0x01;
+  private static final byte VALUE_IS_OBJECT= (VALUE_IS_BYTE_ARRAY << 1);
+  private static final byte POSSIBLE_DUPLICATE = (VALUE_IS_OBJECT << 1);
+  private static final byte HAS_VERSION_TAG = (POSSIBLE_DUPLICATE << 1);
+  
+
+  /** for deserialization */
+  public PersistedEventImpl() {
+  }
+  
+  public PersistedEventImpl(Object value, Operation op, byte valueIsObject,
+      boolean isPossibleDuplicate, boolean hasVersionTag) throws IOException,
+      ClassNotFoundException {
+    this.op = op;
+    this.valueObject = value;
+    setFlag(VALUE_IS_BYTE_ARRAY, valueIsObject == 0x00);
+    setFlag(VALUE_IS_OBJECT, valueIsObject == 0x01);
+    setFlag(POSSIBLE_DUPLICATE, isPossibleDuplicate);
+    setFlag(HAS_VERSION_TAG, hasVersionTag);
+  }
+  
+  private void setFlag(byte flag, boolean set) {
+    flags = (byte) (set ?  flags | flag :  flags & ~flag);
+  }
+  
+  private boolean getFlag(byte flag) {
+    return (flags & flag) != 0x0;
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeByte(this.op.ordinal);
+    out.writeByte(this.flags);
+    
+    if (getFlag(VALUE_IS_BYTE_ARRAY)) { 
+      DataSerializer.writeByteArray((byte[])this.valueObject, out);
+    } else if (getFlag(VALUE_IS_OBJECT)) {
+      if(valueObject instanceof CachedDeserializable) {
+        CachedDeserializable cd = (CachedDeserializable)valueObject;
+        DataSerializer.writeObjectAsByteArray(cd.getValue(), out);
+      } else {
+        DataSerializer.writeObjectAsByteArray(valueObject, out);
+      }
+    }
+    else {
+      DataSerializer.writeObject(valueObject, out);
+    }
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.op = Operation.fromOrdinal(in.readByte());
+    this.flags = in.readByte();
+    
+    if (getFlag(VALUE_IS_BYTE_ARRAY)) { 
+      this.valueObject = DataSerializer.readByteArray(in);
+    } else if (getFlag(VALUE_IS_OBJECT)) {
+      byte[] newValueBytes = DataSerializer.readByteArray(in);
+      if(newValueBytes == null) {
+        this.valueObject = null;
+      } else {
+        if(CachedDeserializableFactory.preferObject()) {
+          this.valueObject =  EntryEventImpl.deserialize(newValueBytes);
+        } else {
+          this.valueObject = CachedDeserializableFactory.create(newValueBytes);
+        }
+      }
+    }
+    else {
+      this.valueObject = DataSerializer.readObject(in);
+    }
+    
+  }
+  
+  /**
+   * Return the timestamp of this event. Depending on the subclass,
+   * this may be part of the version tag, or a separate field.
+   */
+  public abstract long getTimstamp();
+
+  protected boolean hasVersionTag() {
+    return getFlag(HAS_VERSION_TAG);
+  }
+
+  public Operation getOperation()
+  {
+    return this.op;
+  }
+  
+  public Object getValue() {
+    return this.valueObject;
+  }
+  
+  public boolean isPossibleDuplicate()
+  {
+    return getFlag(POSSIBLE_DUPLICATE);
+  }
+
+  /**
+   * returns deserialized value. 
+   * 
+   */
+  public Object getDeserializedValue() throws IOException, ClassNotFoundException {
+    Object retVal = null;
+    if (getFlag(VALUE_IS_BYTE_ARRAY)) { 
+      // value is a byte array
+      retVal = this.valueObject;
+    } else if (getFlag(VALUE_IS_OBJECT)) {
+      if(valueObject instanceof CachedDeserializable) {
+        retVal = ((CachedDeserializable)valueObject).getDeserializedForReading();
+      } else {
+        retVal = valueObject;
+      }
+    }
+    else {
+      // value is a object
+      retVal = this.valueObject;
+    }
+    return retVal;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder str = new StringBuilder(PersistedEventImpl.class.getSimpleName());
+    str.append("@").append(System.identityHashCode(this))
+    .append(" op:").append(op)
+    .append(" valueObject:").append(valueObject)
+    .append(" isPossibleDuplicate:").append(getFlag(POSSIBLE_DUPLICATE));
+    return str.toString();
+  }
+
+  public void copy(PersistedEventImpl usersValue) {
+    this.op = usersValue.op;
+    this.valueObject = usersValue.valueObject;
+    this.flags = usersValue.flags;
+  }
+  
+  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
+    int size = 0;
+    
+    // value length
+    size += valueSize; 
+
+    // one byte for op and one byte for flag
+    size += 2;
+    
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java
new file mode 100644
index 0000000..bd7994c
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/QueuedPersistentEvent.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataOutput;
+import java.io.IOException;
+
+public interface QueuedPersistentEvent {
+  
+  public byte[] getRawKey();
+  
+  public void toHoplogEventBytes(DataOutput out) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java
new file mode 100644
index 0000000..b97bdb7
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserver.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Tracks flushes using a queue of latches.
+ * 
+ */
+public class SignalledFlushObserver implements FlushObserver {
+  private static class FlushLatch extends CountDownLatch {
+    private final long seqnum;
+    
+    public FlushLatch(long seqnum) {
+      super(1);
+      this.seqnum = seqnum;
+    }
+    
+    public long getSequence() {
+      return seqnum;
+    }
+  }
+  
+  // assume the number of outstanding flush requests is small so we don't
+  // need to organize by seqnum
+  private final List<FlushLatch> signals;
+  
+  private final AtomicLong eventsReceived;
+  private final AtomicLong eventsDelivered;
+  
+  public SignalledFlushObserver() {
+    signals = new ArrayList<FlushLatch>();
+    eventsReceived = new AtomicLong(0);
+    eventsDelivered = new AtomicLong(0);
+  }
+  
+  @Override
+  public boolean shouldDrainImmediately() {
+    synchronized (signals) {
+      return !signals.isEmpty();
+    }
+  }
+  
+  @Override
+  public AsyncFlushResult flush() {
+    final long seqnum = eventsReceived.get();
+    synchronized (signals) {
+      final FlushLatch flush;
+      if (seqnum <= eventsDelivered.get()) {
+        flush = null;
+      } else {
+        flush = new FlushLatch(seqnum);
+        signals.add(flush);
+      }
+      
+      return new AsyncFlushResult() {
+        @Override
+        public boolean waitForFlush(long timeout, TimeUnit unit) throws InterruptedException {
+          return flush == null ? true : flush.await(timeout, unit);
+        }
+      };
+    }
+  }
+
+  /**
+   * Invoked when an event is received.
+   */
+  public void push() {
+    eventsReceived.incrementAndGet();
+  }
+
+  /**
+   * Invoked when a batch has been dispatched.
+   */
+  public void pop(int count) {
+    long highmark = eventsDelivered.addAndGet(count);
+    synchronized (signals) {
+      for (ListIterator<FlushLatch> iter = signals.listIterator(); iter.hasNext(); ) {
+        FlushLatch flush = iter.next();
+        if (flush.getSequence() <= highmark) {
+          flush.countDown();
+          iter.remove();
+        }
+      }
+    }
+  }
+  
+  /**
+   * Invoked when the queue is cleared.
+   */
+  public void clear() {
+    synchronized (signals) {
+      for (FlushLatch flush : signals) {
+        flush.countDown();
+      }
+
+      signals.clear();
+      eventsReceived.set(0);
+      eventsDelivered.set(0);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java
new file mode 100644
index 0000000..c725ce5
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.internal.DataSerializableFixedID;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.internal.Version;
+
+/**
+ * A persistent event that is stored in the hoplog queue. This class is only used
+ * temporarily to copy the data from the HDFSGatewayEventImpl to the persisted
+ * record in the file.
+ * 
+ *
+ */
+public class SortedHDFSQueuePersistedEvent extends SortedHoplogPersistedEvent implements QueuedPersistentEvent {
+  
+  
+  /**key stored in serialized form*/
+  protected byte[] keyBytes = null;
+  
+  public SortedHDFSQueuePersistedEvent(HDFSGatewayEventImpl in) throws IOException,
+  ClassNotFoundException {
+    this(in.getSerializedValue(), in.getOperation(), in.getValueIsObject(), in
+        .getPossibleDuplicate(), in.getVersionTag(), in.getSerializedKey(), in
+        .getCreationTime());
+  }
+
+  public SortedHDFSQueuePersistedEvent(Object valueObject, Operation operation,
+      byte valueIsObject, boolean possibleDuplicate, VersionTag versionTag,
+      byte[] serializedKey, long timestamp) throws ClassNotFoundException, IOException {
+    super(valueObject, operation, valueIsObject, possibleDuplicate, versionTag, timestamp);
+    this.keyBytes = serializedKey;
+    // TODO Auto-generated constructor stub
+  }
+
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    super.toData(out);
+    DataSerializer.writeByteArray(this.keyBytes, out);
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    super.fromData(in);
+    this.keyBytes = DataSerializer.readByteArray(in);
+  }
+
+  @Override
+  public void toHoplogEventBytes(DataOutput out) throws IOException {
+    super.toData(out);
+  }
+
+  public byte[] getRawKey() {
+    return this.keyBytes;
+  }
+  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
+    
+    int size = SortedHoplogPersistedEvent.getSizeInBytes(keySize, valueSize, versionTag);
+    
+    size += keySize;
+    
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java
new file mode 100644
index 0000000..e8be7b8
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.internal.ByteArrayDataInput;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+
+/**
+ * A persistent event that is stored in a sorted hoplog. In addition
+ * to the fields of PersistentEventImpl, this event has a version tag.
+ * 
+ * This class should only be serialized by directly calling toData,
+ * which is why it does not implement DataSerializable
+ * 
+ */
+public class SortedHoplogPersistedEvent extends PersistedEventImpl {
+  /** version tag for concurrency checks */
+  protected VersionTag versionTag;
+
+  /** timestamp of the event. Used when version checks are disabled*/
+  protected long timestamp;
+
+  public SortedHoplogPersistedEvent(Object valueObject, Operation operation,
+      byte valueIsObject, boolean possibleDuplicate, VersionTag tag, long timestamp) throws ClassNotFoundException, IOException {
+    super(valueObject, operation, valueIsObject, possibleDuplicate, tag != null);
+    this.versionTag = tag;
+    this.timestamp = timestamp;
+  }
+
+  public SortedHoplogPersistedEvent() {
+    //for deserialization
+  }
+
+  @Override
+  public long getTimstamp() {
+    return versionTag == null ? timestamp : versionTag.getVersionTimeStamp();
+  }
+  
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    super.toData(out);
+    if (versionTag == null) {
+      out.writeLong(timestamp);
+    } else {
+      //TODO optimize these
+      DataSerializer.writeObject(this.versionTag, out);
+    }
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    super.fromData(in);
+    if (hasVersionTag()) {
+      this.versionTag = (VersionTag)DataSerializer.readObject(in);
+    } else {
+      this.timestamp = in.readLong();
+    }
+  }
+  
+  /**
+   * @return the concurrency versioning tag for this event, if any
+   */
+  public VersionTag getVersionTag() {
+    return this.versionTag;
+  }
+  
+  public static SortedHoplogPersistedEvent fromBytes(byte[] val)
+      throws IOException, ClassNotFoundException {
+    ByteArrayDataInput in = new ByteArrayDataInput();
+    in.initialize(val, null);
+    SortedHoplogPersistedEvent event = new SortedHoplogPersistedEvent();
+    event.fromData(in);
+    return event;
+  }
+  
+  public void copy(PersistedEventImpl usersValue) {
+    super.copy(usersValue);
+    this.versionTag = ((SortedHoplogPersistedEvent) usersValue).versionTag;
+    this.timestamp = ((SortedHoplogPersistedEvent) usersValue).timestamp;
+  }
+  
+  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
+    int size = PersistedEventImpl.getSizeInBytes(keySize, valueSize, versionTag);
+    
+    if (versionTag != null) {
+      size +=  versionTag.getSizeInBytes();
+    } else {
+      // size of Timestamp
+      size += 8;
+    }
+    
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java
new file mode 100644
index 0000000..93d596b
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+
+
+/**
+ * A persistent event that is stored in the hoplog queue. This class is only used
+ * temporarily to copy the data from the HDFSGatewayEventImpl to the persisted
+ * record in the file. 
+ * 
+ *
+ */
+public class UnsortedHDFSQueuePersistedEvent extends UnsortedHoplogPersistedEvent implements QueuedPersistentEvent {
+  
+  /**the bytes of the key for this entry */
+  protected byte[] keyBytes = null;
+  
+  public UnsortedHDFSQueuePersistedEvent(HDFSGatewayEventImpl in) throws IOException,
+  ClassNotFoundException {
+    super(in.getValue(), in.getOperation(), in.getValueIsObject(), in.getPossibleDuplicate(), 
+        in.getVersionTimeStamp() == 0 ? in.getCreationTime() : in.getVersionTimeStamp());
+    this.keyBytes = in.getSerializedKey();
+    
+  }
+
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    super.toData(out);
+    DataSerializer.writeByteArray(this.keyBytes, out);
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    super.fromData(in);
+    this.keyBytes = DataSerializer.readByteArray(in);
+  }
+  
+  @Override
+  public void toHoplogEventBytes(DataOutput out) throws IOException {
+    super.toData(out);
+  }
+  
+  public byte[] getRawKey() {
+    return this.keyBytes;
+  }
+  
+  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
+    
+    int size = UnsortedHoplogPersistedEvent.getSizeInBytes(keySize, valueSize, versionTag);
+    
+    size += keySize;
+    
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java
new file mode 100644
index 0000000..9b9a04d
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.internal.ByteArrayDataInput;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+
+/**
+ * A persisted event that is sorted in an unsorted (sequential hoplog). This
+ * does not have a version stamp, but just a timestamp for the entry.
+ * 
+ * This class should only be serialized by calling toData directly, which
+ * is why it does not implement DataSerializable.
+ * 
+ *
+ */
+public class UnsortedHoplogPersistedEvent extends PersistedEventImpl {
+  long timestamp;
+  
+  
+
+  public UnsortedHoplogPersistedEvent() {
+    //for deserialization
+  }
+
+  public UnsortedHoplogPersistedEvent(Object value, Operation op,
+      byte valueIsObject, boolean isPossibleDuplicate, long timestamp) throws IOException,
+      ClassNotFoundException {
+    super(value, op, valueIsObject, isPossibleDuplicate, false/*hasVersionTag*/);
+    this.timestamp = timestamp;
+  }
+
+  @Override
+  public long getTimstamp() {
+    return timestamp;
+  }
+
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    super.toData(out);
+    DataSerializer.writeLong(timestamp, out);
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    super.fromData(in);
+    this.timestamp = DataSerializer.readLong(in);
+  }
+  
+  public static UnsortedHoplogPersistedEvent fromBytes(byte[] val)
+      throws IOException, ClassNotFoundException {
+    ByteArrayDataInput in = new ByteArrayDataInput();
+    in.initialize(val, null);
+    UnsortedHoplogPersistedEvent event = new UnsortedHoplogPersistedEvent();
+    event.fromData(in);
+    return event;
+  }
+  
+  public void copy(PersistedEventImpl usersValue) {
+    super.copy(usersValue);
+    this.timestamp = ((UnsortedHoplogPersistedEvent) usersValue).timestamp;
+  }
+  
+  public static int getSizeInBytes(int keySize, int valueSize, VersionTag versionTag) {
+    int size = PersistedEventImpl.getSizeInBytes(keySize, valueSize, versionTag);
+    
+    // size of Timestamp
+    size += 8;
+    
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java
new file mode 100644
index 0000000..d2fdbe7
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplog.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.regex.Matcher;
+
+import com.gemstone.gemfire.internal.hll.ICardinality;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.io.compress.Lz4Codec;
+import org.apache.hadoop.io.compress.SnappyCodec;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile;
+import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.CompressionType;
+import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.Version;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import org.apache.logging.log4j.Logger;
+
+/**
+ * Abstract class for {@link Hoplog} with common functionality
+ */
+public abstract class AbstractHoplog implements Hoplog {
+  protected final FSProvider fsProvider;
+  
+  // path of the oplog file
+  protected volatile Path path;
+  private volatile HoplogDescriptor hfd;
+  protected Configuration conf;
+  protected SortedOplogStatistics stats;
+  protected Long hoplogModificationTime;
+  protected Long hoplogSize;
+
+  protected HoplogReaderActivityListener readerListener;
+  
+  // logger instance
+  protected static final Logger logger = LogService.getLogger();
+  
+  protected static String logPrefix;
+  // THIS CONSTRUCTOR SHOULD BE USED FOR LONER ONLY
+  AbstractHoplog(FileSystem inputFS, Path filePath, SortedOplogStatistics stats)
+      throws IOException {
+    logPrefix = "<" + filePath.getName() + "> ";
+    this.fsProvider = new FSProvider(inputFS);
+    initialize(filePath, stats, inputFS);
+  }
+
+  public AbstractHoplog(HDFSStoreImpl store, Path filePath,
+      SortedOplogStatistics stats) throws IOException {
+    logPrefix = "<" + filePath.getName() + "> ";
+    this.fsProvider = new FSProvider(store);
+    initialize(filePath, stats, store.getFileSystem());
+  }
+
+  private void initialize(Path path, SortedOplogStatistics stats, FileSystem fs) {
+    this.conf = fs.getConf();
+    this.stats = stats;
+    this.path = fs.makeQualified(path);
+    this.hfd = new HoplogDescriptor(this.path.getName());
+  }
+  
+  @Override
+  public abstract void close() throws IOException; 
+  @Override
+  public abstract HoplogReader getReader() throws IOException;
+
+  @Override
+  public abstract HoplogWriter createWriter(int keys) throws IOException;
+
+  @Override
+  abstract public void close(boolean clearCache) throws IOException;
+
+  @Override
+  public void setReaderActivityListener(HoplogReaderActivityListener listener) {
+    this.readerListener = listener;
+  }
+  
+  @Override
+  public String getFileName() {
+    return this.hfd.getFileName();
+  }
+  
+  public final int compareTo(Hoplog o) {
+    return hfd.compareTo( ((AbstractHoplog)o).hfd);
+  }
+
+  @Override
+  public ICardinality getEntryCountEstimate() throws IOException {
+    return null;
+  }
+  
+  @Override
+  public synchronized void rename(String name) throws IOException {
+    if (logger.isDebugEnabled())
+      logger.debug("{}Renaming hoplog to " + name, logPrefix);
+    Path parent = path.getParent();
+    Path newPath = new Path(parent, name);
+    fsProvider.getFS().rename(path, new Path(parent, newPath));
+
+    // close the old reader and let the new one get created lazily
+    close();
+    
+    // update path to point to the new path
+    path = newPath;
+    this.hfd = new HoplogDescriptor(this.path.getName());
+    logPrefix = "<" + path.getName() + "> ";
+  }
+  
+  @Override
+  public synchronized void delete() throws IOException {
+    if (logger.isDebugEnabled())
+      logger.debug("{}Deleting hoplog", logPrefix);
+    close();
+    this.hoplogModificationTime = null;
+    this.hoplogSize = null;
+    fsProvider.getFS().delete(path, false);
+  }
+
+  @Override
+  public long getModificationTimeStamp() {
+    initHoplogSizeTimeInfo();
+
+    // modification time will not be null if this hoplog is existing. Otherwise
+    // invocation of this method should is invalid
+    if (hoplogModificationTime == null) {
+      throw new IllegalStateException();
+    }
+    
+    return hoplogModificationTime;
+  }
+
+  @Override
+  public long getSize() {
+    initHoplogSizeTimeInfo();
+    
+    // size will not be null if this hoplog is existing. Otherwise
+    // invocation of this method should is invalid
+    if (hoplogSize == null) {
+      throw new IllegalStateException();
+    }
+    
+    return hoplogSize;
+  }
+  
+  private synchronized void initHoplogSizeTimeInfo() {
+    if (hoplogSize != null && hoplogModificationTime != null) {
+      // time and size info is already initialized. no work needed here
+      return;
+    }
+
+    try {
+      FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
+      if (filesInfo != null && filesInfo.length == 1) {
+        this.hoplogModificationTime = filesInfo[0].getModificationTime();
+        this.hoplogSize = filesInfo[0].getLen();
+      }
+      // TODO else condition may happen if user deletes hoplog from the file system.
+    } catch (IOException e) {
+      logger.error(LocalizedMessage.create(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path), e);
+      throw new HDFSIOException(
+          LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
+    }
+  }
+  public static SequenceFile.Writer getSequenceFileWriter(Path path, 
+      Configuration conf, Logger logger) throws IOException {
+    return getSequenceFileWriter(path,conf, logger, null); 
+  }
+  
+  /**
+   * 
+   * @param path
+   * @param conf
+   * @param logger
+   * @param version - is being used only for testing. Should be passed as null for other purposes. 
+   * @return SequenceFile.Writer 
+   * @throws IOException
+   */
+  public static SequenceFile.Writer getSequenceFileWriter(Path path, 
+    Configuration conf, Logger logger, Version version) throws IOException {
+    Option optPath = SequenceFile.Writer.file(path);
+    Option optKey = SequenceFile.Writer.keyClass(BytesWritable.class);
+    Option optVal = SequenceFile.Writer.valueClass(BytesWritable.class);
+    Option optCom = withCompression(logger);
+    if (logger.isDebugEnabled())
+      logger.debug("{}Started creating hoplog " + path, logPrefix);
+    
+    if (version == null)
+      version = Version.CURRENT;
+    //Create a metadata option with the gemfire version, for future versioning
+    //of the key and value format
+    SequenceFile.Metadata metadata = new SequenceFile.Metadata();
+    metadata.set(new Text(Meta.GEMFIRE_VERSION.name()), new Text(String.valueOf(version.ordinal())));
+    Option optMeta = SequenceFile.Writer.metadata(metadata);
+    
+    SequenceFile.Writer writer = SequenceFile.createWriter(conf, optPath, optKey, optVal, optCom, optMeta);
+    
+    return writer;
+  }
+  
+  private static Option withCompression(Logger logger) {
+    String prop = System.getProperty(HoplogConfig.COMPRESSION);
+    if (prop != null) {
+      CompressionCodec codec;
+      if (prop.equalsIgnoreCase("SNAPPY")) {
+        codec = new SnappyCodec();
+      } else if (prop.equalsIgnoreCase("LZ4")) {
+        codec = new Lz4Codec();
+      } else if (prop.equals("GZ")) {
+        codec = new GzipCodec();
+      } else {
+        throw new IllegalStateException("Unsupported codec: " + prop);
+      }
+      if (logger.isDebugEnabled())
+        logger.debug("{}Using compression codec " + codec, logPrefix);
+      return SequenceFile.Writer.compression(CompressionType.BLOCK, codec);
+    }
+    return SequenceFile.Writer.compression(CompressionType.NONE, null);
+  }
+  
+  public static final class HoplogDescriptor implements Comparable<HoplogDescriptor> {
+     private final String fileName;
+     private final String bucket;
+     private final int sequence;
+     private final long timestamp;
+     private final String extension;
+     
+     HoplogDescriptor(final String fileName) {
+       this.fileName = fileName;
+       final Matcher matcher = AbstractHoplogOrganizer.HOPLOG_NAME_PATTERN.matcher(fileName);
+       final boolean matched = matcher.find();
+       assert matched;
+       this.bucket = matcher.group(1);
+       this.sequence = Integer.valueOf(matcher.group(3));
+       this.timestamp = Long.valueOf(matcher.group(2)); 
+       this.extension = matcher.group(4);
+     }
+     
+     public final String getFileName() {
+       return fileName;
+     }
+     
+     @Override
+     public boolean equals(Object o) {
+       if (this == o) {
+         return true;
+       }
+       
+       if (!(o instanceof HoplogDescriptor)) {
+         return false;
+       }
+       
+       final HoplogDescriptor other = (HoplogDescriptor)o;
+       // the two files should belong to same bucket
+       assert this.bucket.equals(other.bucket);
+       
+       // compare sequence first
+       if (this.sequence != other.sequence) {
+         return false;
+       }
+       
+       // sequence is same, compare timestamps
+       if (this.timestamp != other.timestamp) {
+         return false;
+       }
+       
+       return extension.equals(other.extension);
+     }
+
+    @Override
+    public int compareTo(HoplogDescriptor o) {
+      if (this == o) {
+        return 0;
+      }
+      
+      // the two files should belong to same bucket
+      assert this.bucket.equals(o.bucket);
+      
+      // compare sequence first
+      if (sequence > o.sequence) {
+        return -1;
+      } else if (sequence < o.sequence) {
+        return 1;
+      }
+      
+      // sequence is same, compare timestamps
+      if(timestamp > o.timestamp) {
+        return -1; 
+      } else if (timestamp < o.timestamp) {
+        return 1;
+      }
+      
+      //timestamp is the same, compare the file extension. It's
+      //possible a major compaction and minor compaction could finish
+      //at the same time and create the same timestamp and sequence number
+      //it doesn't matter which file we look at first in that case.
+      return extension.compareTo(o.extension);
+    }
+     
+     
+  }
+  
+  protected static final class FSProvider {
+    final FileSystem fs;
+    final HDFSStoreImpl store;
+    
+    // THIS METHOD IS FOR TESTING ONLY
+    FSProvider(FileSystem fs) {
+      this.fs = fs;
+      this.store = null;
+    }
+    
+    FSProvider(HDFSStoreImpl store) {
+      this.store = store;
+      fs = null;
+    }
+    
+    public FileSystem getFS() throws IOException {
+      if (store != null) {
+        return store.getFileSystem();
+      }
+      return fs;
+    }
+
+    public FileSystem checkFileSystem() {
+      store.checkAndClearFileSystem();
+      return store.getCachedFileSystem();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java
new file mode 100644
index 0000000..4f078d8
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/AbstractHoplogOrganizer.java
@@ -0,0 +1,430 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog.HoplogDescriptor;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.internal.Assert;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import org.apache.logging.log4j.Logger;
+
+
+public abstract class AbstractHoplogOrganizer<T extends PersistedEventImpl> implements HoplogOrganizer<T> {
+
+  public static final String MINOR_HOPLOG_EXTENSION = ".ihop";
+  public static final String MAJOR_HOPLOG_EXTENSION = ".chop";
+  public static final String EXPIRED_HOPLOG_EXTENSION = ".exp";
+  public static final String TEMP_HOPLOG_EXTENSION = ".tmp";
+
+  public static final String FLUSH_HOPLOG_EXTENSION = ".hop";
+  public static final String SEQ_HOPLOG_EXTENSION = ".shop";
+
+  // all valid hoplogs will follow the following name pattern
+  public static final String HOPLOG_NAME_REGEX = "(.+?)-(\\d+?)-(\\d+?)";
+  public static final Pattern HOPLOG_NAME_PATTERN = Pattern.compile(HOPLOG_NAME_REGEX
+      + "\\.(.*)");
+  
+  public static boolean JUNIT_TEST_RUN = false; 
+
+  protected static final boolean ENABLE_INTEGRITY_CHECKS = Boolean
+      .getBoolean("gemfire.HdfsSortedOplogOrganizer.ENABLE_INTEGRITY_CHECKS")
+      || assertionsEnabled();
+
+  private static boolean assertionsEnabled() {
+    boolean enabled = false;
+    assert enabled = true;
+    return enabled;
+  }
+
+  protected HdfsRegionManager regionManager;
+  // name or id of bucket managed by this organizer
+  protected final String regionFolder;
+  protected final int bucketId;
+
+  // path of the region directory
+  protected final Path basePath;
+  // identifies path of directory containing a bucket's oplog files
+  protected final Path bucketPath;
+
+  protected final HDFSStoreImpl store;
+
+  // assigns a unique increasing number to each oplog file
+  protected AtomicInteger sequence;
+
+  //logger instance
+  protected static final Logger logger = LogService.getLogger();
+  protected final String logPrefix;
+
+  protected SortedOplogStatistics stats;
+  AtomicLong bucketDiskUsage = new AtomicLong(0);
+
+  // creation of new files and expiration of files will be synchronously
+  // notified to the listener
+  protected HoplogListener listener;
+
+  private volatile boolean closed = false;
+  
+  protected Object changePrimarylockObject = new Object();
+  
+  public AbstractHoplogOrganizer(HdfsRegionManager region, int bucketId) {
+
+    assert region != null;
+
+    this.regionManager = region;
+    this.regionFolder = region.getRegionFolder();
+    this.store = region.getStore();
+    this.listener = region.getListener();
+    this.stats = region.getHdfsStats();
+    
+    this.bucketId = bucketId;
+
+    this.basePath = new Path(store.getHomeDir());
+    this.bucketPath = new Path(basePath, regionFolder + "/" + bucketId);
+
+    this.logPrefix = "<" + getRegionBucketStr() + "> ";
+    
+  }
+
+  @Override
+  public boolean isClosed() {
+    return closed || regionManager.isClosed();
+  }
+  
+  @Override
+  public void close() throws IOException {
+    closed = true;
+    
+    // this bucket is closed and may be owned by a new node. So reduce the store
+    // usage stat, as the new owner adds the usage metric
+    incrementDiskUsage((-1) * bucketDiskUsage.get());
+  }
+
+  @Override
+  public abstract void flush(Iterator<? extends QueuedPersistentEvent> bufferIter,
+      int count) throws IOException, ForceReattemptException;
+
+  @Override
+  public abstract void clear() throws IOException;
+
+  protected abstract Hoplog getHoplog(Path hoplogPath) throws IOException;
+
+  @Override
+  public void hoplogCreated(String region, int bucketId, Hoplog... oplogs)
+      throws IOException {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public void hoplogDeleted(String region, int bucketId, Hoplog... oplogs)
+      throws IOException {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public void compactionCompleted(String region, int bucket, boolean isMajor) {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+  
+  @Override
+  public T read(byte[] key) throws IOException {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public HoplogIterator<byte[], T> scan() throws IOException {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public HoplogIterator<byte[], T> scan(byte[] from, byte[] to)
+      throws IOException {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public HoplogIterator<byte[], T> scan(byte[] from,
+      boolean fromInclusive, byte[] to, boolean toInclusive) throws IOException {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public long sizeEstimate() {
+    throw new UnsupportedOperationException("Not supported for "
+        + this.getClass().getSimpleName());
+  }
+
+  /**
+   * @return returns an oplogs full path after prefixing bucket path to the file
+   *         name
+   */
+  protected String getPathStr(Hoplog oplog) {
+    return bucketPath.toString() + "/" + oplog.getFileName();
+  }
+
+  protected String getRegionBucketStr() {
+    return regionFolder + "/" + bucketId;
+  }
+
+  protected SortedHoplogPersistedEvent deserializeValue(byte[] val) throws IOException {
+    try {
+      return SortedHoplogPersistedEvent.fromBytes(val);
+    } catch (ClassNotFoundException e) {
+      logger
+          .error(
+              LocalizedStrings.GetMessage_UNABLE_TO_DESERIALIZE_VALUE_CLASSNOTFOUNDEXCEPTION,
+              e);
+      return null;
+    }
+  }
+
+  /**
+   * @return true if the entry belongs to an destroy event
+   */
+  protected boolean isDeletedEntry(byte[] value, int offset) throws IOException {
+    // Read only the first byte of PersistedEventImpl for the operation
+    assert value != null && value.length > 0 && offset >= 0 && offset < value.length;
+    Operation op = Operation.fromOrdinal(value[offset]);
+
+    if (op.isDestroy() || op.isInvalidate()) {
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * @param seqNum
+   *          desired sequence number of the hoplog. If null a highest number is
+   *          choosen
+   * @param extension
+   *          file extension representing the type of file, e.g. ihop for
+   *          intermediate hoplog
+   * @return a new temporary file for a new sorted oplog. The name consists of
+   *         bucket name, a sequence number for ordering the files followed by a
+   *         timestamp
+   */
+  Hoplog getTmpSortedOplog(Integer seqNum, String extension) throws IOException {
+    if (seqNum == null) {
+      seqNum = sequence.incrementAndGet();
+    }
+    String name = bucketId + "-" + System.currentTimeMillis() + "-" + seqNum 
+        + extension;
+    Path soplogPath = new Path(bucketPath, name + TEMP_HOPLOG_EXTENSION);
+    return getHoplog(soplogPath);
+  }
+  
+  /**
+   * renames a temporary hoplog file to a legitimate name.
+   */
+  static void makeLegitimate(Hoplog so) throws IOException {
+    String name = so.getFileName();
+    assert name.endsWith(TEMP_HOPLOG_EXTENSION);
+
+    int index = name.lastIndexOf(TEMP_HOPLOG_EXTENSION);
+    name = name.substring(0, index);
+    so.rename(name);
+  }
+
+  /**
+   * creates a expiry marker for a file on file system
+   * 
+   * @param hoplog
+   * @throws IOException
+   */
+  protected void addExpiryMarkerForAFile(Hoplog hoplog) throws IOException {
+    FileSystem fs = store.getFileSystem();
+
+    // TODO optimization needed here. instead of creating expired marker
+    // file per file, create a meta file. the main thing to worry is
+    // compaction of meta file itself
+    Path expiryMarker = getExpiryMarkerPath(hoplog.getFileName());
+
+    // uh-oh, why are we trying to expire an already expired file?
+    if (ENABLE_INTEGRITY_CHECKS) {
+      Assert.assertTrue(!fs.exists(expiryMarker),
+          "Expiry marker already exists: " + expiryMarker);
+    }
+
+    FSDataOutputStream expiryMarkerFile = fs.create(expiryMarker);
+    expiryMarkerFile.close();
+
+    if (logger.isDebugEnabled())
+      logger.debug("Hoplog marked expired: " + getPathStr(hoplog));
+  }
+
+  protected Path getExpiryMarkerPath(String name) {
+    return new Path(bucketPath, name + EXPIRED_HOPLOG_EXTENSION);
+  }
+  
+  protected String truncateExpiryExtension(String name) {
+    if (name.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
+      return name.substring(0, name.length() - EXPIRED_HOPLOG_EXTENSION.length());
+    }
+    
+    return name;
+  }
+  
+  /**
+   * updates region stats and a local copy of bucket level store usage metric.
+   * 
+   * @param delta
+   */
+  protected void incrementDiskUsage(long delta) {
+    long newSize = bucketDiskUsage.addAndGet(delta);
+    if (newSize < 0 && delta < 0) {
+      if (logger.isDebugEnabled()){
+        logger.debug("{}Invalid diskUsage size:" + newSize + " caused by delta:"
+            + delta + ", parallel del & close?" + isClosed(), logPrefix);
+      }
+      if (isClosed()) {
+        // avoid corrupting disk usage size during close by reducing residue
+        // size only
+        delta = delta + (-1 * newSize);
+      }
+    }
+    stats.incStoreUsageBytes(delta);
+  }
+
+  /**
+   * Utility method to remove a file from valid file list if a expired marker
+   * for the file exists
+   * 
+   * @param valid
+   *          list of valid files
+   * @param expired
+   *          list of expired file markers
+   * @return list f valid files that do not have a expired file marker
+   */
+  public static FileStatus[] filterValidHoplogs(FileStatus[] valid,
+      FileStatus[] expired) {
+    if (valid == null) {
+      return null;
+    }
+
+    if (expired == null) {
+      return valid;
+    }
+
+    ArrayList<FileStatus> result = new ArrayList<FileStatus>();
+    for (FileStatus vs : valid) {
+      boolean found = false;
+      for (FileStatus ex : expired) {
+        if (ex
+            .getPath()
+            .getName()
+            .equals(
+                vs.getPath().getName()
+                    + HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
+          found = true;
+        }
+      }
+      if (!found) {
+        result.add(vs);
+      }
+    }
+
+    return result.toArray(new FileStatus[result.size()]);
+  }
+
+  protected void pingSecondaries() throws ForceReattemptException {
+
+    if (JUNIT_TEST_RUN)
+      return;
+    BucketRegion br = ((PartitionedRegion)this.regionManager.getRegion()).getDataStore().getLocalBucketById(this.bucketId);
+    boolean secondariesPingable = false;
+    try {
+      secondariesPingable = br.areSecondariesPingable();
+    } catch (Throwable e) {
+      throw new ForceReattemptException("Failed to ping secondary servers of bucket: " + 
+          this.bucketId + ", region: " + ((PartitionedRegion)this.regionManager.getRegion()), e);
+    }
+    if (!secondariesPingable)
+      throw new ForceReattemptException("Failed to ping secondary servers of bucket: " + 
+          this.bucketId + ", region: " + ((PartitionedRegion)this.regionManager.getRegion()));
+  }
+
+  
+
+  
+  /**
+   * A comparator for ordering soplogs based on the file name. The file names
+   * are assigned incrementally and hint at the age of the file
+   */
+  public static final class HoplogComparator implements
+      Comparator<TrackedReference<Hoplog>> {
+    /**
+     * a file with a higher sequence or timestamp is the younger and hence the
+     * smaller
+     */
+    @Override
+    public int compare(TrackedReference<Hoplog> o1, TrackedReference<Hoplog> o2) {
+      return o1.get().compareTo(o2.get());
+    }
+
+    /**
+     * Compares age of files based on file names and returns 1 if name1 is
+     * older, -1 if name1 is yonger and 0 if the two files are same age
+     */
+    public static int compareByName(String name1, String name2) {
+      HoplogDescriptor hd1 = new HoplogDescriptor(name1);
+      HoplogDescriptor hd2 = new HoplogDescriptor(name2);
+      
+      return hd1.compareTo(hd2);
+    }
+  }
+
+  /**
+   * @param matcher
+   *          A preinitialized / matched regex pattern
+   * @return Timestamp of the
+   */
+  public static long getHoplogTimestamp(Matcher matcher) {
+    return Long.valueOf(matcher.group(2));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java
new file mode 100644
index 0000000..86e66a1
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BloomFilter.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+public interface BloomFilter {
+  /**
+   * Returns true if the bloom filter might contain the supplied key. The nature of the bloom filter
+   * is such that false positives are allowed, but false negatives cannot occur.
+   */
+  boolean mightContain(byte[] key);
+
+  /**
+   * Returns true if the bloom filter might contain the supplied key. The nature of the bloom filter
+   * is such that false positives are allowed, but false negatives cannot occur.
+   */
+  boolean mightContain(byte[] key, int keyOffset, int keyLength);
+
+  /**
+   * @return Size of the bloom, in bytes
+   */
+  long getBloomSize();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java
new file mode 100644
index 0000000..3f67de8
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CloseTmpHoplogsTimerTask.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.Collection;
+
+import org.apache.hadoop.fs.FileSystem;
+
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.internal.SystemTimer.SystemTimerTask;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+import org.apache.logging.log4j.Logger;
+
+/**
+ * For streaming case, if the bucket traffic goes down after writing few batches of data, 
+ * the flush doesn't get called. In that case, the file is left in tmp state
+ * until the flush restarts. To avoid this issue, added this timer task 
+ * that periodically iterates over the buckets and closes their writer 
+ * if the time for rollover has passed.
+ * 
+ * It also has got an extra responsibility of fixing the file sizes of the files 
+ * that weren't closed properly last time. 
+ *
+ *
+ */
+class CloseTmpHoplogsTimerTask extends SystemTimerTask {
+  
+  private HdfsRegionManager hdfsRegionManager;
+  private static final Logger logger = LogService.getLogger();
+  private FileSystem filesystem; 
+  
+  public CloseTmpHoplogsTimerTask(HdfsRegionManager hdfsRegionManager) {
+    this.hdfsRegionManager = hdfsRegionManager;
+    
+    // Create a new filesystem 
+    // This is added for the following reason:
+    // For HDFS, if a file wasn't closed properly last time, 
+    // while calling FileSystem.append for this file, FSNamesystem.startFileInternal->
+    // FSNamesystem.recoverLeaseInternal function gets called. 
+    // This function throws AlreadyBeingCreatedException if there is an open handle, to any other file, 
+    // created using the same FileSystem object. This is a bug and is being tracked at: 
+    // https://issues.apache.org/jira/browse/HDFS-3848?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
+    // 
+    // The fix for this bug is not yet part of Pivotal HD. So to overcome the bug, 
+    // we create a new file system for the timer task so that it does not encounter the bug. 
+    this.filesystem = this.hdfsRegionManager.getStore().createFileSystem();
+    if (logger.isDebugEnabled()) 
+      logger.debug("created a new file system specifically for timer task");
+  }
+
+  
+  /**
+   * Iterates over all the bucket organizers and closes their writer if the time for 
+   * rollover has passed. It also has the additional responsibility of fixing the tmp
+   * files that were left over in the last unsuccessful run. 
+   */
+  @Override
+  public void run2() {
+    Collection<HoplogOrganizer> organizers =  hdfsRegionManager.getBucketOrganizers();
+    if (logger.isDebugEnabled()) 
+      logger.debug("Starting the close temp logs run.");
+    
+    for (HoplogOrganizer organizer: organizers) {
+      
+      HDFSUnsortedHoplogOrganizer unsortedOrganizer = (HDFSUnsortedHoplogOrganizer)organizer;
+      long timeSinceLastFlush = (System.currentTimeMillis() - unsortedOrganizer.getLastFlushTime())/1000 ;
+      try {
+        this.hdfsRegionManager.getRegion().checkReadiness();
+      } catch (Exception e) {
+        break;
+      }
+      
+      try {
+        // the time since last flush has exceeded file rollover interval, roll over the 
+        // file. 
+        if (timeSinceLastFlush >= unsortedOrganizer.getfileRolloverInterval()) {
+          if (logger.isDebugEnabled()) 
+            logger.debug("Closing writer for bucket: " + unsortedOrganizer.bucketId);
+          unsortedOrganizer.synchronizedCloseWriter(false, timeSinceLastFlush, 0);
+        }
+        
+        // fix the tmp hoplogs, if any. Pass the new file system here. 
+        unsortedOrganizer.identifyAndFixTmpHoplogs(this.filesystem);
+        
+      } catch (Exception e) {
+        logger.warn(LocalizedStrings.HOPLOG_CLOSE_FAILED, e);
+      }
+    }
+    
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java
new file mode 100644
index 0000000..55d8f87
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.internal.VersionedDataSerializable;
+import com.gemstone.gemfire.internal.Version;
+
+/**
+ * Status of the compaction task reported in the future
+ * 
+ */
+public class CompactionStatus implements VersionedDataSerializable {
+  /**MergeGemXDHDFSToGFE check and verify serializationversions **/
+ 
+  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
+  private int bucketId;
+  private boolean status;
+
+  public CompactionStatus() {
+  }
+
+  public CompactionStatus(int bucketId, boolean status) {
+    this.bucketId = bucketId;
+    this.status = status;
+  }
+  public int getBucketId() {
+    return bucketId;
+  }
+  public boolean isStatus() {
+    return status;
+  }
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(bucketId);
+    out.writeBoolean(status);
+  }
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.bucketId = in.readInt();
+    this.status = in.readBoolean();
+  }
+  @Override
+  public Version[] getSerializationVersions() {
+    return serializationVersions;
+  }
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getCanonicalName()).append("@")
+    .append(System.identityHashCode(this)).append(" Bucket:")
+    .append(bucketId).append(" status:").append(status);
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java
new file mode 100644
index 0000000..84beded
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.internal.VersionedDataSerializable;
+import com.gemstone.gemfire.internal.Version;
+
+/**
+ * Reports the result of a flush request.
+ * 
+ */
+public class FlushStatus implements VersionedDataSerializable {
+  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
+  private int bucketId;
+
+  private final static int LAST = -1;
+  
+  public FlushStatus() {
+  }
+
+  public static FlushStatus last() {
+    return new FlushStatus(LAST);
+  }
+  
+  public FlushStatus(int bucketId) {
+    this.bucketId = bucketId;
+  }
+  public int getBucketId() {
+    return bucketId;
+  }
+  public boolean isLast() {
+    return bucketId == LAST;
+  }
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(bucketId);
+  }
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.bucketId = in.readInt();
+  }
+  @Override
+  public Version[] getSerializationVersions() {
+    return serializationVersions;
+  }
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getCanonicalName()).append("@")
+    .append(System.identityHashCode(this)).append(" Bucket:")
+    .append(bucketId);
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java
new file mode 100644
index 0000000..ba191c2
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManager.java
@@ -0,0 +1,330 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+
+/**
+ * A singleton which schedules compaction of hoplogs owned by this node as primary and manages
+ * executor of ongoing compactions. Ideally the number of pending request will not exceed the number
+ * of buckets in the node as hoplog organizer avoids creating a new request if compaction on the
+ * bucket is active. Moreover separate queues for major and minor compactions are maintained to
+ * prevent long running major compactions from preventing minor compactions.
+ */
+public class HDFSCompactionManager {
+  /*
+   * Each hdfs store has its own concurrency configuration. Concurrency
+   * configuration is used by compaction manager to manage threads. This member
+   * holds hdsf-store to compaction manager mapping
+   */
+  private static final ConcurrentHashMap<String, HDFSCompactionManager> storeToManagerMap = 
+                                        new ConcurrentHashMap<String, HDFSCompactionManager>();
+
+  // hdfs store configuration used to initialize this instance
+  HDFSStore storeConfig;
+  
+  // Executor for ordered execution of minor compaction requests.
+  private final CompactionExecutor minorCompactor;
+  // Executor for ordered execution of major compaction requests.
+  private final CompactionExecutor majorCompactor;
+
+  private static final Logger logger = LogService.getLogger();
+  protected final static String logPrefix =  "<" + "HDFSCompactionManager" + "> ";;
+  
+  private HDFSCompactionManager(HDFSStore config) {
+    this.storeConfig = config;
+    // configure hdfs compaction manager
+    int capacity = Integer.getInteger(HoplogConfig.COMPCATION_QUEUE_CAPACITY,
+        HoplogConfig.COMPCATION_QUEUE_CAPACITY_DEFAULT);
+
+    minorCompactor = new CompactionExecutor(config.getMinorCompactionThreads(), capacity, "MinorCompactor_"
+        + config.getName());
+
+    majorCompactor = new CompactionExecutor(config.getMajorCompactionThreads(), capacity, "MajorCompactor_"
+        + config.getName());
+
+    minorCompactor.allowCoreThreadTimeOut(true);
+    majorCompactor.allowCoreThreadTimeOut(true);
+  }
+
+  public static synchronized HDFSCompactionManager getInstance(HDFSStore config) {
+    HDFSCompactionManager instance = storeToManagerMap.get(config.getName());
+    if (instance == null) {
+      instance = new HDFSCompactionManager(config);
+      storeToManagerMap.put(config.getName(), instance);
+    }
+    
+    return instance;
+  }
+
+  /**
+   * Accepts compaction request for asynchronous compaction execution.
+   * 
+   * @param request
+   *          compaction request with region and bucket id
+   * @return true if the request is accepted, false if the compactor is overlaoded and there is a
+   *         long wait queue
+   */
+  public synchronized Future<CompactionStatus> submitRequest(CompactionRequest request) {
+    if (!request.isForced && request.compactor.isBusy(request.isMajor)) {
+      if (logger.isDebugEnabled()) {
+        fineLog("Compactor is busy. Ignoring ", request);
+      }
+      return null;
+    }
+    
+    CompactionExecutor executor = request.isMajor ? majorCompactor : minorCompactor;
+    
+    try {
+      return executor.submit(request);
+    } catch (Throwable e) {
+      if (e instanceof CompactionIsDisabled) {
+        if (logger.isDebugEnabled()) {
+          fineLog("{}" +e.getMessage(), logPrefix);
+        }
+      } else {
+        logger.info(LocalizedMessage.create(LocalizedStrings.ONE_ARG, "Compaction request submission failed"), e);
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Removes all pending compaction requests. Programmed for TESTING ONLY
+   */
+  public void reset() {
+    minorCompactor.shutdownNow();
+    majorCompactor.shutdownNow();
+    HDFSCompactionManager.storeToManagerMap.remove(storeConfig.getName());
+  }
+  
+  /**
+   * Returns minor compactor. Programmed for TESTING AND MONITORING ONLY  
+   */
+  public ThreadPoolExecutor getMinorCompactor() {
+    return minorCompactor;
+  }
+
+  /**
+   * Returns major compactor. Programmed for TESTING AND MONITORING ONLY  
+   */
+  public ThreadPoolExecutor getMajorCompactor() {
+    return majorCompactor;
+  }
+  
+  /**
+   * Contains important details needed for executing a compaction cycle.
+   */
+  public static class CompactionRequest implements Callable<CompactionStatus> {
+    String regionFolder;
+    int bucket;
+    Compactor compactor;
+    boolean isMajor;
+    final boolean isForced;
+    final boolean versionUpgrade;
+
+    public CompactionRequest(String regionFolder, int bucket, Compactor compactor, boolean major) {
+      this(regionFolder, bucket, compactor, major, false);
+    }
+
+    public CompactionRequest(String regionFolder, int bucket, Compactor compactor, boolean major, boolean isForced) {
+      this(regionFolder, bucket, compactor, major, isForced, false);
+    }
+
+    public CompactionRequest(String regionFolder, int bucket, Compactor compactor, boolean major, boolean isForced, boolean versionUpgrade) {
+      this.regionFolder = regionFolder;
+      this.bucket = bucket;
+      this.compactor = compactor;
+      this.isMajor = major;
+      this.isForced = isForced;
+      this.versionUpgrade = versionUpgrade;
+    }
+
+    @Override
+    public CompactionStatus call() throws Exception {
+      HDFSStore store = compactor.getHdfsStore();
+      if (!isForced) {
+        // this is a auto generated compaction request. If auto compaction is
+        // disabled, ignore this call.
+        if (isMajor && !store.getMajorCompaction()) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("{}Major compaction is disabled. Ignoring request",logPrefix);
+          }
+          return new CompactionStatus(bucket, false);
+        } else if (!isMajor && !store.getMinorCompaction()) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("{}Minor compaction is disabled. Ignoring request", logPrefix);
+          }
+          return new CompactionStatus(bucket, false);
+        }
+      }
+
+      // all hurdles passed, execute compaction now
+      try {
+        boolean status = compactor.compact(isMajor, versionUpgrade);
+        return new CompactionStatus(bucket, status);
+      } catch (IOException e) {
+        logger.error(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_COMPACTION_ERROR, bucket), e);
+      }
+      return new CompactionStatus(bucket, false);
+    }
+
+    @Override
+    public int hashCode() {
+      final int prime = 31;
+      int result = 1;
+      result = prime * result + bucket;
+      result = prime * result
+          + ((regionFolder == null) ? 0 : regionFolder.hashCode());
+      return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj)
+        return true;
+      if (obj == null)
+        return false;
+      if (getClass() != obj.getClass())
+        return false;
+      CompactionRequest other = (CompactionRequest) obj;
+      if (bucket != other.bucket)
+        return false;
+      if (regionFolder == null) {
+        if (other.regionFolder != null)
+          return false;
+      } else if (!regionFolder.equals(other.regionFolder))
+        return false;
+      return true;
+    }
+
+    @Override
+    public String toString() {
+      return "CompactionRequest [regionFolder=" + regionFolder + ", bucket="
+          + bucket + ", isMajor=" + isMajor + ", isForced="+isForced+"]";
+    }
+  }
+
+  /**
+   * Helper class for creating named instances of comapction threads and managing compaction
+   * executor. All threads wait infinitely
+   */
+  private class CompactionExecutor extends ThreadPoolExecutor implements ThreadFactory {
+    final AtomicInteger count = new AtomicInteger(1);
+    private String name;
+
+    CompactionExecutor(int max, int capacity, String name) {
+      super(max, max, 5, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(capacity));
+      allowCoreThreadTimeOut(true);
+      setThreadFactory(this);
+      this.name = name;
+    }
+    
+    private void throwIfStopped(CompactionRequest req, HDFSStore storeConfig) {
+      // check if compaction is enabled everytime. Alter may change this value
+      // so this check is needed everytime
+      boolean isEnabled = true;
+      isEnabled = storeConfig.getMinorCompaction();
+      if (req.isMajor) {
+        isEnabled = storeConfig.getMajorCompaction();
+      }
+      if (isEnabled || req.isForced) {
+        return;
+      }
+      throw new CompactionIsDisabled(name + " is disabled");
+    }
+
+    private void throwIfPoolSizeChanged(CompactionRequest task, HDFSStore config) {
+      int threadCount = config.getMinorCompactionThreads();
+      if (task.isMajor) {
+        threadCount = config.getMajorCompactionThreads();
+      }
+      
+      if (getCorePoolSize() < threadCount) {
+        setCorePoolSize(threadCount);
+      } else if (getCorePoolSize() > threadCount) {
+        setCorePoolSize(threadCount);
+      }
+      
+      if (!task.isForced && getActiveCount() > threadCount) {
+        // the number is active threads is more than new max pool size. Throw
+        // error is this is system generated compaction request
+        throw new CompactionIsDisabled(
+            "Rejecting to reduce the number of threads for " + name
+            + ", currently:" + getActiveCount() + " target:"
+            + threadCount);
+      }
+    }
+    
+    @Override
+    public <T> Future<T> submit(Callable<T> task) {
+      throwIfStopped((CompactionRequest) task, HDFSCompactionManager.this.storeConfig);
+      throwIfPoolSizeChanged((CompactionRequest) task, HDFSCompactionManager.this.storeConfig);
+      
+      if (logger.isDebugEnabled()) {
+        fineLog("New:", task, " pool:", getPoolSize(), " active:", getActiveCount());
+      }
+      return super.submit(task);
+    }
+
+    @Override
+    public Thread newThread(Runnable r) {
+      Thread thread = new Thread(r, name + ":" + count.getAndIncrement());
+      thread.setDaemon(true);
+      if (logger.isDebugEnabled()) {
+        fineLog("New thread:", name, " poolSize:", getPoolSize(),
+            " active:", getActiveCount());
+      }
+      return thread;
+    }
+  }
+  
+  public static class CompactionIsDisabled extends RejectedExecutionException {
+    private static final long serialVersionUID = 1L;
+    public CompactionIsDisabled(String name) {
+      super(name);
+    }
+  }
+  
+  
+  private void fineLog(Object... strings) {
+    if (logger.isDebugEnabled()) {
+      StringBuffer sb = new StringBuffer();
+      for (Object str : strings) {
+        sb.append(str.toString());
+      }
+      logger.debug("{}"+sb.toString(), logPrefix);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java
new file mode 100644
index 0000000..36e171b
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.internal.VersionedDataSerializable;
+import com.gemstone.gemfire.internal.Version;
+
+/**
+ * Defines the arguments to the flush queue request.
+ * 
+ */
+@SuppressWarnings("serial")
+public class HDFSFlushQueueArgs implements VersionedDataSerializable {
+
+  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
+
+  private HashSet<Integer> buckets;
+
+  private long maxWaitTimeMillis;
+
+  public HDFSFlushQueueArgs() {
+  }
+
+  public HDFSFlushQueueArgs(Set<Integer> buckets, long maxWaitTime) {
+    this.buckets = new HashSet<Integer>(buckets);
+    this.maxWaitTimeMillis = maxWaitTime;
+  }
+
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    DataSerializer.writeHashSet(buckets, out);
+    out.writeLong(maxWaitTimeMillis);
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException,
+      ClassNotFoundException {
+    this.buckets = DataSerializer.readHashSet(in);
+    this.maxWaitTimeMillis = in.readLong();
+  }
+
+  @Override
+  public Version[] getSerializationVersions() {
+    return serializationVersions;
+  }
+
+  public Set<Integer> getBuckets() {
+    return (Set<Integer>) buckets;
+  }
+
+  public void setBuckets(Set<Integer> buckets) {
+    this.buckets = new HashSet<Integer>(buckets);
+  }
+
+  public boolean isSynchronous() {
+    return maxWaitTimeMillis == 0;
+  }
+
+  public long getMaxWaitTime() {
+    return this.maxWaitTimeMillis;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getCanonicalName()).append("@")
+    .append(System.identityHashCode(this))
+    .append(" buckets:").append(buckets)
+    .append(" maxWaitTime:").append(maxWaitTimeMillis);
+    return sb.toString();
+  }
+}


[24/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java
new file mode 100644
index 0000000..f69b3dc
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FlushObserver.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Observes and reacts to flush events.
+ * 
+ */
+public interface FlushObserver {
+  public interface AsyncFlushResult {
+    /**
+     * Waits for the most recently enqueued batch to completely flush.
+     * 
+     * @param time the time to wait
+     * @param unit the time unit
+     * @return true if flushed before the timeout
+     * @throws InterruptedException interrupted while waiting
+     */
+    public boolean waitForFlush(long time, TimeUnit unit) throws InterruptedException;
+  }
+
+  /**
+   * Returns true when the queued events should be drained from the queue
+   * immediately.
+   * 
+   * @return true if draining
+   */
+  boolean shouldDrainImmediately();
+  
+  /**
+   * Begins the flushing the queued events.
+   * 
+   * @return the async result
+   */
+  public AsyncFlushResult flush();
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java
new file mode 100644
index 0000000..9127e4d
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSBucketRegionQueue.java
@@ -0,0 +1,1232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.gemstone.gemfire.InternalGemFireError;
+import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.EntryNotFoundException;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.SortedEventBuffer.BufferIterator;
+import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.internal.Assert;
+import com.gemstone.gemfire.internal.cache.AbstractBucketRegionQueue;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.InternalRegionArguments;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.RegionEventImpl;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.ByteComparator;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.CursorIterator;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * This class holds the sorted list required for HDFS. 
+ * 
+ * 
+ */
+public class HDFSBucketRegionQueue extends AbstractBucketRegionQueue {
+     private static final boolean VERBOSE = Boolean.getBoolean("hdfsBucketRegionQueue.VERBOSE");
+     private final int batchSize;
+     volatile HDFSEventQueue hdfsEventQueue = null;
+     
+     // set before releasing the primary lock. 
+     private final AtomicBoolean releasingPrimaryLock = new AtomicBoolean(true);
+     
+     // This is used to keep track of the current size of the queue in bytes. 
+     final AtomicLong queueSizeInBytes =  new AtomicLong(0);
+     public boolean isBucketSorted = true;
+     /**
+     * @param regionName
+     * @param attrs
+     * @param parentRegion
+     * @param cache
+     * @param internalRegionArgs
+     */
+    public HDFSBucketRegionQueue(String regionName, RegionAttributes attrs,
+        LocalRegion parentRegion, GemFireCacheImpl cache,
+        InternalRegionArguments internalRegionArgs) {
+      super(regionName, attrs, parentRegion, cache, internalRegionArgs);
+      
+      this.isBucketSorted = internalRegionArgs.getPartitionedRegion().getParallelGatewaySender().getBucketSorted();
+      if (isBucketSorted)
+        hdfsEventQueue = new MultiRegionSortedQueue();
+      else
+        hdfsEventQueue = new EventQueue();
+      
+      batchSize = internalRegionArgs.getPartitionedRegion().
+          getParallelGatewaySender().getBatchSize() * 1024 *1024;
+      this.keySet();
+    }
+    @Override
+    protected void initialize(InputStream snapshotInputStream,
+        InternalDistributedMember imageTarget,
+        InternalRegionArguments internalRegionArgs) throws TimeoutException,
+        IOException, ClassNotFoundException {
+
+      super.initialize(snapshotInputStream, imageTarget, internalRegionArgs);
+
+      loadEventsFromTempQueue();
+      
+      this.initialized = true;
+      notifyEventProcessor();
+    }
+
+    private TreeSet<Long> createSkipListFromMap(Set keySet) {
+      TreeSet<Long> sortedKeys = null;
+      if (!hdfsEventQueue.isEmpty())
+        return sortedKeys;
+      
+      if (!keySet.isEmpty()) {
+        sortedKeys = new TreeSet<Long>(keySet);
+        if (!sortedKeys.isEmpty())
+        {
+          for (Long key : sortedKeys) {
+            if (this.isBucketSorted) {
+              Object hdfsevent = getNoLRU(key, true, false, false);
+              if (hdfsevent == null) { // this can happen when tombstones are recovered. 
+                if (logger.isDebugEnabled() || VERBOSE) {
+                  logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Discarding key " + key + ", no event recovered"));
+                }
+              } else {
+                int eventSize = ((HDFSGatewayEventImpl)hdfsevent).
+                    getSizeOnHDFSInBytes(!this.isBucketSorted);
+                hdfsEventQueue.put(key,(HDFSGatewayEventImpl)hdfsevent, eventSize );
+                queueSizeInBytes.getAndAdd(eventSize);
+              }
+            }
+            else {
+              Object hdfsevent = getNoLRU(key, true, false, false);
+              if (hdfsevent != null) { // hdfs event can be null when tombstones are recovered.
+                queueSizeInBytes.getAndAdd(((HDFSGatewayEventImpl)hdfsevent).
+                    getSizeOnHDFSInBytes(!this.isBucketSorted));
+              }
+              ((EventQueue)hdfsEventQueue).put(key);
+            }
+              
+          }
+          getEventSeqNum().setIfGreater(sortedKeys.last());
+        }
+      
+      }
+      if (logger.isDebugEnabled() || VERBOSE) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG,
+            "For bucket " + getId() + ", total keys recovered are : " + keySet.size()
+                + " and the seqNo is " + getEventSeqNum()));
+      }
+      return sortedKeys;
+    }
+    
+    @Override
+    protected void basicClear(RegionEventImpl ev) {
+      super.basicClear(ev);
+      queueSizeInBytes.set(0);
+      if ( this.getBucketAdvisor().isPrimary()) {
+        this.hdfsEventQueue.clear();
+      }
+    }
+    
+    protected void clearQueues(){
+      queueSizeInBytes.set(0);
+      if ( this.getBucketAdvisor().isPrimary()) {
+        this.hdfsEventQueue.clear();
+      }
+    }
+   
+    @Override
+    protected void basicDestroy(final EntryEventImpl event,
+        final boolean cacheWrite, Object expectedOldValue)
+        throws EntryNotFoundException, CacheWriterException, TimeoutException {
+      super.basicDestroy(event, cacheWrite, expectedOldValue);
+    }
+    
+    ArrayList peekABatch() {
+      ArrayList result = new ArrayList();
+      hdfsEventQueue.peek(result);
+      return result;
+    }
+    
+    @Override
+    protected void addToEventQueue(Object key, boolean didPut, EntryEventImpl event, int sizeOfHDFSEvent) {
+      if (didPut &&  this.getBucketAdvisor().isPrimary()) {
+        HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)event.getValue();
+        if (sizeOfHDFSEvent == -1) { 
+          try {
+            // the size is calculated only on primary before event is inserted in the bucket. 
+            // If this node became primary after size was calculated, sizeOfHDFSEvent will be -1. 
+            // Try to get the size. #50016
+            sizeOfHDFSEvent = hdfsEvent.getSizeOnHDFSInBytes(!((HDFSBucketRegionQueue)this).isBucketSorted);
+          } catch (Throwable e) {
+           //   Ignore any exception while fetching the size.
+            sizeOfHDFSEvent = 0;
+          }
+        }
+        queueSizeInBytes.getAndAdd(sizeOfHDFSEvent);
+        if (this.initialized) {
+          Long longKey = (Long)key;
+          this.hdfsEventQueue.put(longKey, hdfsEvent, sizeOfHDFSEvent);
+        }
+        if (logger.isDebugEnabled()) {
+          logger.debug("Put successfully in the queue : " + hdfsEvent + " . Queue initialized: " 
+              + this.initialized);
+        }
+      }
+    }
+    
+    /**
+     * It removes the first key from the queue.
+     * 
+     * @return Returns the key for which value was destroyed.
+     * @throws ForceReattemptException
+     */
+    public Long remove() throws ForceReattemptException {
+      throw new UnsupportedOperationException("Individual entries cannot be removed in a HDFSBucketRegionQueue");
+    }
+    
+    /**
+     * It removes the first key from the queue.
+     * 
+     * @return Returns the value.
+     * @throws InterruptedException
+     * @throws ForceReattemptException
+     */
+    public Object take() throws InterruptedException, ForceReattemptException {
+      throw new UnsupportedOperationException("take() cannot be called for individual entries in a HDFSBucketRegionQueue");
+    }
+    
+    public void destroyKeys(ArrayList<HDFSGatewayEventImpl>  listToDestroy) {
+      
+      HashSet<Long> removedSeqNums = new HashSet<Long>();
+      
+      for (int index =0; index < listToDestroy.size(); index++) {
+        HDFSGatewayEventImpl entry = null;
+        if (this.isBucketSorted) {
+          // Remove the events in reverse order so that the events with higher sequence number
+          // are removed last to ensure consistency.
+          entry = listToDestroy.get(listToDestroy.size() - index -1);
+        } else {
+          entry = listToDestroy.get(index);
+        }
+       
+        try {
+          if (this.logger.isDebugEnabled())
+            logger.debug("destroying primary key " + entry.getShadowKey() + " bucket id: " + this.getId());
+          // removed from peeked list
+          boolean deleted = this.hdfsEventQueue.remove(entry);
+          if (deleted) {
+            // this is an onheap event so a call to size should be ok. 
+            long entrySize = entry.getSizeOnHDFSInBytes(!this.isBucketSorted);
+            destroyKey(entry.getShadowKey());
+            long queueSize = queueSizeInBytes.getAndAdd(-1*entrySize);
+            if (queueSize < 0) {
+              // In HA scenarios, queueSizeInBytes can go awry.
+              queueSizeInBytes.compareAndSet(queueSize, 0);
+            }
+            removedSeqNums.add(entry.getShadowKey());
+          }
+        }catch (ForceReattemptException e) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("ParallelGatewaySenderQueue#remove->HDFSBucketRegionQueue#destroyKeys: " + "Got ForceReattemptException for " + this
+            + " for bucket = " + this.getId());
+          }
+        }
+        catch(EntryNotFoundException e) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("ParallelGatewaySenderQueue#remove->HDFSBucketRegionQueue#destroyKeys: " + "Got EntryNotFoundException for " + this
+              + " for bucket = " + this.getId() + " and key " + entry.getShadowKey());
+          }
+        } finally {
+          entry.release();
+        }
+      }
+      
+      if (this.getBucketAdvisor().isPrimary()) {
+        hdfsEventQueue.handleRemainingElements(removedSeqNums);
+      }
+    }
+
+    
+    public boolean isReadyForPeek() {
+      return !this.isEmpty() && !this.hdfsEventQueue.isEmpty() && getBucketAdvisor().isPrimary();
+    }
+
+    public long getLastPeekTimeInMillis() {
+      return hdfsEventQueue.getLastPeekTimeInMillis();
+    }
+    
+    public long getQueueSizeInBytes() {
+      return queueSizeInBytes.get();
+    }
+    /*
+     * This function is called when the bucket takes as the role of primary.
+     */
+    @Override
+    public void beforeAcquiringPrimaryState() {
+      
+      queueSizeInBytes.set(0);
+      if (logger.isDebugEnabled() || VERBOSE) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG,
+            "This node has become primary for bucket " + this.getId()  +". " +
+            		"Creating sorted data structure for the async queue."));
+      }
+      releasingPrimaryLock.set(false);
+      
+      // clear the hdfs queue in case it has already elements left if it was a primary
+      // in the past
+      hdfsEventQueue.clear();
+      if (isBucketSorted)
+        hdfsEventQueue = new MultiRegionSortedQueue();
+      else
+        hdfsEventQueue = new EventQueue();
+      
+      TreeSet<Long> sortedKeys = createSkipListFromMap(this.keySet());
+      
+      if (sortedKeys != null && sortedKeys.size() > 0) {    
+        // Mark the events equal to batch size as duplicate. 
+        // calculate the batch size based on the number of events currently in the queue
+        // This is an approximation. 
+        long batchSizeMB =  this.getPartitionedRegion().getParallelGatewaySender().getBatchSize();
+        long batchSizeInBytes = batchSizeMB*1024*1024;
+        long totalBucketSize = queueSizeInBytes.get();
+        totalBucketSize = totalBucketSize >  0 ? totalBucketSize: 1;
+        long totalEntriesInBucket = this.entryCount();
+        totalEntriesInBucket =  totalEntriesInBucket > 0 ? totalEntriesInBucket: 1;
+        
+        long perEntryApproxSize = totalBucketSize/totalEntriesInBucket;
+        perEntryApproxSize = perEntryApproxSize >  0 ? perEntryApproxSize: 1;
+        
+        int batchSize  = (int)(batchSizeInBytes/perEntryApproxSize);
+        
+        if (logger.isDebugEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG,
+              "Calculating batch size " +  " batchSizeMB: " + batchSizeMB + " batchSizeInBytes: " + batchSizeInBytes + 
+              " totalBucketSize: " + totalBucketSize + " totalEntriesInBucket: " + totalEntriesInBucket + 
+              " perEntryApproxSize: " + perEntryApproxSize + " batchSize: " + batchSize ));
+        }
+        
+        markEventsAsDuplicate(batchSize, sortedKeys.iterator());
+      }
+    }
+    
+    @Override
+    public void beforeReleasingPrimaryLockDuringDemotion() {
+      queueSizeInBytes.set(0);
+      releasingPrimaryLock.set(true);
+      // release memory in case of a clean transition
+      hdfsEventQueue.clear();
+    }
+
+    /**
+     * This function searches the skip list and the peeked skip list for a given region key
+     * @param region 
+     * 
+     */
+    public HDFSGatewayEventImpl getObjectForRegionKey(Region region, byte[] regionKey) {
+      // get can only be called for a sorted queue.
+      // Calling get with Long.MIN_VALUE seq number ensures that 
+      // the list will return the key which has highest seq number. 
+      return hdfsEventQueue.get(region, regionKey, Long.MIN_VALUE);
+    }
+
+    /**
+     * Get an iterator on the queue, passing in the partitioned region
+     * we want to iterate over the events from.
+     */
+    public SortedEventQueueIterator iterator(Region region) {
+      return hdfsEventQueue.iterator(region);
+    }
+
+    public long totalEntries() {
+      return entryCount();
+    }
+    
+    /**
+     * Ideally this function should be called from a thread periodically to 
+     * rollover the skip list when it is above a certain size. 
+     * 
+     */
+    public void rolloverSkipList() {
+      // rollover can only be called for a sorted queue.
+      hdfsEventQueue.rollover();
+    }
+    
+    public boolean shouldDrainImmediately() {
+      return hdfsEventQueue.getFlushObserver().shouldDrainImmediately();
+    }
+
+    public AsyncFlushResult flush() {
+      if (logger.isDebugEnabled() || VERBOSE) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flush requested"));
+      }
+      return hdfsEventQueue.getFlushObserver().flush();
+    }
+    
+    /**
+     * This class keeps the regionkey and seqNum. The objects of this class are 
+     * kept in a concurrent skip list. The order of elements is decided based on the 
+     * comparison of regionKey + seqNum. This kind of comparison allows us to keep 
+     * multiple updates on a single key (becaus it has different seq Num)
+     */
+    static class KeyToSeqNumObject implements Comparable<KeyToSeqNumObject>
+    {
+      private byte[] regionkey; 
+      private Long seqNum;
+      
+      KeyToSeqNumObject(byte[] regionkey, Long seqNum){
+        this.regionkey = regionkey;
+        this.seqNum = seqNum;
+      }
+      
+      /**
+       * This function compares the key first. If the keys are same then seq num is compared.
+       * This function is a key function because it ensures that the skiplists hold the elements 
+       * in an order we want it to and for multiple updates on key fetches the most recent one 
+       * Currently we are comparing seq numbers but we will have to change it to version stamps. 
+       * * List can have elements in following sequence 
+       * K1 Value1 version : 1 
+       * K2 Value2a version : 2
+       * K2 Value2 version : 1
+       * K3 Value3 version : 1
+       * For a get on K2, it should retunr K2 Value 2a.  
+       */
+      @Override
+      public int compareTo(KeyToSeqNumObject o) {
+        int compareOutput = ByteComparator.compareBytes(
+            this.getRegionkey(), 0, this.getRegionkey().length, o.getRegionkey(), 0, o.getRegionkey().length);
+        if (compareOutput != 0 )
+          return compareOutput;
+        
+        // If the keys are same and this is an object with dummy seq number, 
+        // return -1. This will ensure that ceiling function on a skip list will enumerate 
+        // all the entries and return the last one.   
+        if (this.getSeqNum() == Long.MIN_VALUE) 
+          return -1;
+        
+        // this is to just maintain consistency with the above statement. 
+        if (o.getSeqNum() == Long.MIN_VALUE) 
+          return 1;
+       
+        // minus operator pushes entries with lower seq number in the end so that 
+        // the order as mentioned above is maintained. And the entries with 
+        // higher version are fetched on a get. 
+        return this.getSeqNum().compareTo(o.getSeqNum()) * -1;  
+      }
+      
+      @Override
+      public boolean equals (Object o) {
+    	KeyToSeqNumObject obj = null;
+      	if (o == null)
+    		return false; 
+    	
+    	if (o instanceof KeyToSeqNumObject) 
+    		obj = (KeyToSeqNumObject)o;
+    	else
+    		return false;
+    	
+    	if (this.compareTo(obj) != 0)
+          return false;
+        else
+          return true;
+      }
+      
+      public int hashCode() {
+    	assert false : "hashCode not designed";
+    	return -1;
+      }
+      
+      byte[] getRegionkey() {
+        return regionkey;
+      }
+
+      public Long getSeqNum() {
+        return seqNum;
+      }
+
+      public void setSeqNum(Long seqNum) {
+        this.seqNum = seqNum;
+      }
+      
+      @Override
+      public String toString() {
+        return EntryEventImpl.deserialize(regionkey) + " {" + seqNum + "}";
+      }
+    }
+    
+    public interface HDFSEventQueue {
+      FlushObserver getFlushObserver();
+      
+      /** puts an event in the queue. */ 
+      public void put (long key, HDFSGatewayEventImpl event, int size);
+      
+      public SortedEventQueueIterator iterator(Region region);
+
+      public void rollover();
+
+      /** Get a value from the queue
+       * @throws IllegalStateException if this queue doesn't support get  
+       **/
+      public HDFSGatewayEventImpl get(Region region, byte[] regionKey,
+          long minValue);
+
+      // Peeks a batch of size specified by batchSize
+      // And add the results to the array list
+      public void peek(ArrayList result);
+      
+      // Checks if there are elements to bee peeked 
+      public boolean isEmpty();
+      
+      // removes the event if it has already been peeked. 
+      public boolean remove(HDFSGatewayEventImpl event);
+      
+      // take care of the elements that were peeked 
+      // but were not removed after a batch dispatch 
+      // due to concurrency effects. 
+      public void handleRemainingElements(HashSet<Long> listToBeremoved);
+      
+      // clears the list. 
+      public void clear();
+      
+      // get the time when the last peek was done. 
+      public long getLastPeekTimeInMillis();
+    }
+    
+    class MultiRegionSortedQueue implements HDFSEventQueue {
+      ConcurrentMap<String, SortedEventQueue> regionToEventQueue = new ConcurrentHashMap<String, SortedEventQueue>();
+      volatile Set<SortedEventQueue> peekedQueues = Collections.EMPTY_SET;
+      private final AtomicBoolean peeking = new AtomicBoolean(false);
+      long lastPeekTimeInMillis = System.currentTimeMillis();
+      
+      private final FlushObserver flush = new FlushObserver() {
+        @Override
+        public AsyncFlushResult flush() {
+          final Set<AsyncFlushResult> flushes = new HashSet<AsyncFlushResult>();
+          for (SortedEventQueue queue : regionToEventQueue.values()) {
+            flushes.add(queue.getFlushObserver().flush());
+          }
+          
+          return new AsyncFlushResult() {
+            @Override
+            public boolean waitForFlush(long timeout, TimeUnit unit) throws InterruptedException {
+              long start = System.nanoTime();
+              long remaining = unit.toNanos(timeout);
+              for (AsyncFlushResult afr : flushes) {
+                if (!afr.waitForFlush(remaining, TimeUnit.NANOSECONDS)) {
+                  return false;
+                }
+                remaining -= (System.nanoTime() - start);
+              }
+              return true;
+            }
+          };
+        }
+        
+        @Override
+        public boolean shouldDrainImmediately() {
+          for (SortedEventQueue queue : regionToEventQueue.values()) {
+            if (queue.getFlushObserver().shouldDrainImmediately()) {
+              return true;
+            }
+          }
+          return false;
+        }
+      };
+      
+      @Override
+      public FlushObserver getFlushObserver() {
+        return flush;
+      }
+
+      @Override
+      public void put(long key, HDFSGatewayEventImpl event, int size) {
+        
+        String region = event.getRegionPath();
+        SortedEventQueue regionQueue = regionToEventQueue.get(region);
+        if(regionQueue == null) {
+          regionToEventQueue.putIfAbsent(region, new SortedEventQueue());
+          regionQueue = regionToEventQueue.get(region);
+        }
+        regionQueue.put(key, event, size);
+      }
+
+      @Override
+      public void peek(ArrayList result) {
+        // The elements that were peeked last time, have not been persisted to HDFS 
+        // yet. You cannot take out next batch until that is done.
+        if (!peeking.compareAndSet(false, true)) {
+          if (logger.isTraceEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Peek already in progress, aborting"));
+          }
+          return;
+        }
+        //Maintain a separate set of peeked queues.
+        //All of these queues are statefull, and expect to be
+        //handleRemainingElements and clear to be called on
+        //them iff peek was called on them. However, new queues
+        //may be created in that time.
+        peekedQueues = Collections.newSetFromMap(new ConcurrentHashMap<SortedEventQueue, Boolean>(regionToEventQueue.size()));
+        
+        //Peek from all of the existing queues
+        for(SortedEventQueue queue : regionToEventQueue.values()) {
+          if(!queue.isEmpty()) {
+            queue.peek(result);
+            peekedQueues.add(queue);
+          }
+        }
+        if (result.isEmpty()) 
+          peeking.set(false);
+        
+        
+        this.lastPeekTimeInMillis = System.currentTimeMillis();
+      }
+
+      @Override
+      public boolean isEmpty() {
+        for(SortedEventQueue queue : regionToEventQueue.values()) {
+          if(!queue.isEmpty()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      @Override
+      public boolean remove(HDFSGatewayEventImpl event) {
+        String region = event.getRegionPath();
+        SortedEventQueue regionQueue = regionToEventQueue.get(region);
+        return regionQueue.remove(event);
+      }
+
+      @Override
+      public void handleRemainingElements(HashSet<Long> removedSeqNums){
+        for(SortedEventQueue queue : peekedQueues) {
+          queue.handleRemainingElements(removedSeqNums);
+        }
+        peekedQueues.clear();
+        peeking.set(false);
+      }
+
+      @Override
+      public void clear() {
+        for(SortedEventQueue queue : regionToEventQueue.values()) {
+          queue.clear();
+        }
+        peekedQueues.clear();
+        peeking.set(false);
+      }
+
+      @Override
+      public long getLastPeekTimeInMillis() {
+        return this.lastPeekTimeInMillis;
+      }
+
+      @Override
+      public HDFSGatewayEventImpl get(Region region, byte[] regionKey,
+          long minValue) {
+        SortedEventQueue queue = regionToEventQueue.get(region.getFullPath());
+        if(queue == null) {
+          return null;
+        }
+        return queue.get(region, regionKey, minValue);
+      }
+
+      @Override
+      public SortedEventQueueIterator iterator(Region region) {
+        SortedEventQueue queue = regionToEventQueue.get(region.getFullPath());
+        if(queue == null) {
+          return new SortedEventQueueIterator(new LinkedBlockingDeque<SortedEventBuffer>());
+        }
+        return queue.iterator(region);
+      }
+
+      @Override
+      public void rollover() {
+        for(SortedEventQueue queue : regionToEventQueue.values()) {
+          queue.rollover();
+        }
+      }
+    }
+    
+    class EventQueue implements HDFSEventQueue {
+      private final SignalledFlushObserver flush = new SignalledFlushObserver();
+      private final BlockingQueue<Long> eventSeqNumQueue = new LinkedBlockingQueue<Long>();
+      private final BlockingQueue<Long> peekedEvents = new LinkedBlockingQueue<Long>();
+      private long lastPeekTimeInMillis = System.currentTimeMillis(); 
+      
+      public EventQueue() {
+        
+      }
+      
+      @Override
+      public FlushObserver getFlushObserver() {
+        return flush;
+      }
+
+      @Override
+      public void put(long key, HDFSGatewayEventImpl event, int size) {
+        put(key);
+      }
+      public void put (long key) {
+        eventSeqNumQueue.add(key);
+        flush.push();
+        incQueueSize();
+      }
+      
+      
+      @Override
+      public HDFSGatewayEventImpl get(Region region, byte[] regionKey,
+          long minValue) {
+        throw new InternalGemFireError("Get not supported on unsorted queue");
+      }
+      
+      @Override
+      public void peek(ArrayList peekedEntries) {
+        if (peekedEvents.size() != 0) {
+          return;
+        }
+        
+        for(int size=0; size < batchSize; ) {
+          Long seqNum = eventSeqNumQueue.peek();
+          if (seqNum == null) {
+            // queue is now empty, return
+            break;
+          }
+          Object object = getNoLRU(seqNum, true, false, false);
+          if (object != null) {
+            peekedEvents.add(seqNum);
+            size += ((HDFSGatewayEventImpl)object).getSizeOnHDFSInBytes(!isBucketSorted);
+            peekedEntries.add(object);
+
+          } else {
+            logger.debug("The entry corresponding to the sequence number " + 
+               seqNum +  " is missing. This can happen when an entry is already" +
+               "dispatched before a bucket moved.");
+            // event is being ignored. Decrease the queue size
+            decQueueSize();
+            flush.pop(1);
+           
+          }
+          eventSeqNumQueue.poll();
+          
+        }
+        this.lastPeekTimeInMillis  = System.currentTimeMillis();
+      }
+
+      @Override
+      public boolean isEmpty() {
+        return eventSeqNumQueue.isEmpty();
+      }
+
+      
+      @Override
+      public boolean remove(HDFSGatewayEventImpl event) {
+        boolean deleted = peekedEvents.remove(event.getShadowKey());
+        if (deleted)
+         decQueueSize();
+        return deleted;
+      }
+
+      @Override
+      // It looks like that there is no need for this function 
+      // in EventQueue.
+      public void handleRemainingElements(HashSet<Long> removedSeqNums) {
+        flush.pop(removedSeqNums.size());
+        eventSeqNumQueue.addAll(peekedEvents);
+        peekedEvents.clear();
+      }
+
+      @Override
+      public void clear() {
+        flush.clear();
+        decQueueSize(eventSeqNumQueue.size());
+        eventSeqNumQueue.clear();
+        decQueueSize(peekedEvents.size());
+        peekedEvents.clear();
+      }
+
+      @Override
+      public long getLastPeekTimeInMillis() {
+        return this.lastPeekTimeInMillis;
+      }
+      @Override
+      public SortedEventQueueIterator iterator(Region region) {
+        throw new InternalGemFireError("not supported on unsorted queue");
+      }
+      @Override
+      public void rollover() {
+        throw new InternalGemFireError("not supported on unsorted queue");
+      }
+    }
+    
+    class SortedEventQueue implements HDFSEventQueue {
+      private final SignalledFlushObserver flush = new SignalledFlushObserver();
+
+      // List of all the skip lists that hold the data
+      final Deque<SortedEventBuffer> queueOfLists = 
+          new LinkedBlockingDeque<SortedEventBuffer>();
+      
+      // This points to the tail of the queue
+      volatile SortedEventBuffer currentSkipList = new SortedEventBuffer();
+      
+      private final AtomicBoolean peeking = new AtomicBoolean(false);
+      
+      private long lastPeekTimeInMillis = System.currentTimeMillis(); 
+      
+      public SortedEventQueue() {
+        queueOfLists.add(currentSkipList);
+      }
+      
+      @Override
+      public FlushObserver getFlushObserver() {
+        return flush;
+      }
+
+      public boolean remove(HDFSGatewayEventImpl event) {
+        SortedEventBuffer eventBuffer = queueOfLists.peek();
+        if (eventBuffer != null) {
+          return eventBuffer.copyToBuffer(event);
+        }
+        else {
+          // This can happen when the queue is cleared because of bucket movement 
+          // before the remove is called. 
+          return true;
+        }
+      } 
+
+      public void clear() {
+        flush.clear();
+        for (SortedEventBuffer buf : queueOfLists) {
+          decQueueSize(buf.size());
+          buf.clear();
+        }
+        
+        queueOfLists.clear();
+        rollList(false);
+
+        peeking.set(false);
+      }
+
+      public boolean isEmpty() {
+        if (queueOfLists.size() == 1)
+          return queueOfLists.peek().isEmpty();
+        return false;
+      }
+
+      public void put(long key, HDFSGatewayEventImpl event, int eventSize) {
+        if (logger.isTraceEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Inserting key " + event + " into list " + System.identityHashCode(currentSkipList)));
+        }
+        putInList(new KeyToSeqNumObject(((HDFSGatewayEventImpl)event).getSerializedKey(), key), 
+            eventSize);
+      }
+
+      private void putInList(KeyToSeqNumObject entry, int sizeInBytes) {
+        // It was observed during testing that peek can start peeking 
+        // elements from a list to which a put is happening. This happens 
+        // when the peek changes the value of currentSkiplist to a new list 
+        // but the put continues to write to an older list. 
+        // So there is a possibility that an element is added to the list 
+        // that has already been peeked. To handle this case, in handleRemainingElements
+        // function we re-add the elements that were not peeked. 
+        if (currentSkipList.add(entry, sizeInBytes) == null) {
+          flush.push();
+          incQueueSize();
+        }
+      }
+
+      public void rollover(boolean forceRollover) {
+        if (currentSkipList.bufferSize() >= batchSize || forceRollover) {
+          rollList(forceRollover);
+        }
+      }
+      
+      /**
+       * Ideally this function should be called from a thread periodically to 
+       * rollover the skip list when it is above a certain size. 
+       * 
+       */
+      public void rollover() {
+        rollover(false);
+      }
+
+      public void peek(ArrayList peekedEntries) {
+        // The elements that were peeked last time, have not been persisted to HDFS 
+        // yet. You cannot take out next batch until that is done.
+        if (!peeking.compareAndSet(false, true)) {
+          if (logger.isTraceEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Peek already in progress, aborting"));
+          }
+          return;
+        }
+
+        if (queueOfLists.size() == 1) {
+          rollList(false);
+        }
+        
+        Assert.assertTrue(queueOfLists.size() > 1, "Cannot peek from head of queue");
+        BufferIterator itr = queueOfLists.peek().iterator();
+        while (itr.hasNext()) {
+          KeyToSeqNumObject entry = itr.next();
+          if (logger.isTraceEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Peeking key " + entry + " from list " + System.identityHashCode(queueOfLists.peek())));
+          }
+
+          HDFSGatewayEventImpl ev = itr.value();
+          ev.copyOffHeapValue();
+          peekedEntries.add(ev);
+        }
+        
+        // discard an empty batch as it is not processed and will plug up the
+        // queue
+        if (peekedEntries.isEmpty()) {
+          SortedEventBuffer empty = queueOfLists.remove();
+          if (logger.isTraceEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Discarding empty batch " + empty));
+          }
+          peeking.set(false);
+        }
+        this.lastPeekTimeInMillis = System.currentTimeMillis();
+      }
+
+      public HDFSGatewayEventImpl get(Region region, byte[] regionKey, long key) {
+        KeyToSeqNumObject event = new KeyToSeqNumObject(regionKey, key);
+        Iterator<SortedEventBuffer> queueIterator = queueOfLists.descendingIterator();
+        while (queueIterator.hasNext()) {
+          HDFSGatewayEventImpl evt = queueIterator.next().getFromQueueOrBuffer(event);
+          if (evt != null) {
+            return evt;
+          }
+        }
+        return null;
+      }
+      
+      public void handleRemainingElements(HashSet<Long> removedSeqNums) {
+        if (!peeking.get()) {
+          if (logger.isTraceEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Not peeked, just cleaning up empty batch; current list is " + currentSkipList));
+          }
+          return;
+        }
+
+        Assert.assertTrue(queueOfLists.size() > 1, "Cannot remove only event list");
+
+        // all done with the peeked elements, okay to throw away now
+        SortedEventBuffer buf = queueOfLists.remove();
+        SortedEventBuffer.BufferIterator bufIter = buf.iterator();
+        // Check if the removed buffer has any extra events. If yes, check if these extra 
+        // events are part of region. If yes, reinsert these as they were probably inserted 
+        // into this list while it was being peeked. 
+        while (bufIter.hasNext()) {
+          KeyToSeqNumObject key = bufIter.next();
+          if (!removedSeqNums.contains(key.getSeqNum())) {
+            HDFSGatewayEventImpl evt = (HDFSGatewayEventImpl) getNoLRU(key.getSeqNum(), true, false, false);
+            if (evt != null) {
+              flush.push();
+              incQueueSize();
+              queueOfLists.getFirst().add(key, evt.getSizeOnHDFSInBytes(!isBucketSorted));
+            }
+          }
+        }
+
+        decQueueSize(buf.size());
+        flush.pop(buf.size());
+        peeking.set(false);
+      }
+      
+      public long getLastPeekTimeInMillis(){
+        return this.lastPeekTimeInMillis;
+      }
+      
+      NavigableSet<KeyToSeqNumObject> getPeeked() {
+        assert peeking.get();
+        return queueOfLists.peek().keySet();
+      }
+      
+      private synchronized void rollList(boolean forceRollover) {
+        if (currentSkipList.bufferSize() < batchSize && queueOfLists.size() > 1 && !forceRollover)
+          return;
+        SortedEventBuffer tmp = new SortedEventBuffer();
+        queueOfLists.add(tmp);
+        if (logger.isTraceEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Rolling over list from " + currentSkipList + " to list " + tmp));
+        }
+        currentSkipList = tmp;
+      }
+
+      @Override
+      public SortedEventQueueIterator iterator(Region region) {
+        return new SortedEventQueueIterator(queueOfLists);
+      }
+    }
+    
+    public class SortedEventBuffer {
+      private final HDFSGatewayEventImpl NULL = new HDFSGatewayEventImpl();
+  
+      private final ConcurrentSkipListMap<KeyToSeqNumObject, HDFSGatewayEventImpl> events = new ConcurrentSkipListMap<KeyToSeqNumObject, HDFSGatewayEventImpl>();
+      
+      private int bufferSize = 0;
+      
+      public boolean copyToBuffer(HDFSGatewayEventImpl event) {
+        KeyToSeqNumObject key = new KeyToSeqNumObject(event.getSerializedKey(), event.getShadowKey());
+        if (events.containsKey(key)) {
+          // After an event has been delivered in a batch, we copy it into the
+          // buffer so that it can be returned by an already in progress iterator.
+          // If we do not do this it is possible to miss events since the hoplog
+          // iterator uses a fixed set of files that are determined when the
+          // iterator is created.  The events will be GC'd once the buffer is no
+          // longer strongly referenced.
+          HDFSGatewayEventImpl oldVal = events.put(key, event);
+          assert oldVal == NULL;
+  
+          return true;
+        }
+        // If the primary lock is being relinquished, the events is cleared and probaly that is
+        // why we are here. return true if the primary lock is being relinquished
+        if (releasingPrimaryLock.get())
+          return true;
+        else 
+          return false;
+      }
+  
+      public HDFSGatewayEventImpl getFromQueueOrBuffer(KeyToSeqNumObject key) {
+        KeyToSeqNumObject result = events.ceilingKey(key);
+        if (result != null && Bytes.compareTo(key.getRegionkey(), result.getRegionkey()) == 0) {
+          
+          // first try to fetch the buffered event to make it fast. 
+          HDFSGatewayEventImpl evt = events.get(result);
+          if (evt != NULL) {
+            return evt;
+          }
+          // now try to fetch the event from the queue region
+          evt = (HDFSGatewayEventImpl) getNoLRU(result.getSeqNum(), true, false, false);
+          if (evt != null) {
+            return evt;
+          }
+          
+          // try to fetch again from the buffered events to avoid a race between 
+          // item deletion and the above two statements. 
+          evt = events.get(result);
+          if (evt != NULL) {
+            return evt;
+          }
+          
+        }
+        return null;
+      }
+  
+      public HDFSGatewayEventImpl add(KeyToSeqNumObject key, int sizeInBytes) {
+        bufferSize += sizeInBytes;
+        return events.put(key, NULL);
+      }
+  
+      public void clear() {
+        events.clear();
+      }
+  
+      public boolean isEmpty() {
+        return events.isEmpty();
+      }
+  
+      public int bufferSize() {
+        return bufferSize;
+      }
+      public int size() {
+        return events.size();
+      }
+      public NavigableSet<KeyToSeqNumObject> keySet() {
+        return events.keySet();
+      }
+  
+      public BufferIterator iterator() {
+        return new BufferIterator(events.keySet().iterator());
+      }
+  
+      public class BufferIterator implements Iterator<KeyToSeqNumObject> {
+        private final Iterator<KeyToSeqNumObject> src;
+
+        private KeyToSeqNumObject currentKey;
+        private HDFSGatewayEventImpl currentVal;
+
+        private KeyToSeqNumObject nextKey;
+        private HDFSGatewayEventImpl nextVal;
+        
+        public BufferIterator(Iterator<KeyToSeqNumObject> src) {
+          this.src = src;
+          moveNext();
+        }
+  
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
+        
+        @Override
+        public boolean hasNext() {
+          return nextVal != null;
+        }
+        
+        @Override
+        public KeyToSeqNumObject next() {
+          if (!hasNext()) {
+            throw new NoSuchElementException();
+          }
+          
+          currentKey = nextKey;
+          currentVal = nextVal;
+          
+          moveNext();
+          
+          return currentKey;
+        }
+  
+        public KeyToSeqNumObject key() {
+          assert currentKey != null;
+          return currentKey;
+        }
+        
+        public HDFSGatewayEventImpl value() {
+          assert currentVal != null;
+          return currentVal;
+        }
+        
+        private void moveNext() {
+          while (src.hasNext()) {
+            nextKey = src.next();
+            nextVal = getFromQueueOrBuffer(nextKey);
+            if (nextVal != null) {
+              return;
+            } else if (logger.isDebugEnabled() || VERBOSE) {
+              logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "The entry corresponding to"
+                  + " the sequence number " + nextKey.getSeqNum() 
+                  + " is missing. This can happen when an entry is already" 
+                  + " dispatched before a bucket moved."));
+            }
+          }
+          nextKey = null;
+          nextVal = null;
+        }
+      }
+    }
+  
+    public final class SortedEventQueueIterator implements CursorIterator<HDFSGatewayEventImpl> {
+      /** the iterators to merge */
+      private final List<SortedEventBuffer.BufferIterator> iters;
+  
+      /** the current iteration value */
+      private HDFSGatewayEventImpl value;
+  
+      public SortedEventQueueIterator(Deque<SortedEventBuffer> queueOfLists) {
+        iters = new ArrayList<SortedEventBuffer.BufferIterator>();
+        for (Iterator<SortedEventBuffer> iter = queueOfLists.descendingIterator(); iter.hasNext();) {
+          SortedEventBuffer.BufferIterator buf = iter.next().iterator();
+          if (buf.hasNext()) {
+            buf.next();
+            iters.add(buf);
+          }
+        }
+      }
+      
+      public void close() {
+        value = null;
+        iters.clear();
+      }
+
+      @Override
+      public boolean hasNext() {
+        return !iters.isEmpty();
+      }
+      
+      @Override
+      public HDFSGatewayEventImpl next() {
+        if (!hasNext()) {
+          throw new UnsupportedOperationException();
+        }
+        
+        int diff = 0;
+        KeyToSeqNumObject min = null;
+        SortedEventBuffer.BufferIterator cursor = null;
+        
+        for (Iterator<SortedEventBuffer.BufferIterator> merge = iters.iterator(); merge.hasNext(); ) {
+          SortedEventBuffer.BufferIterator buf = merge.next();
+          KeyToSeqNumObject tmp = buf.key();
+          if (min == null || (diff = Bytes.compareTo(tmp.regionkey, min.regionkey)) < 0) {
+            min = tmp;
+            cursor = buf;
+            
+          } else if (diff == 0 && !advance(buf, min)) {
+            merge.remove();
+          }
+        }
+        
+        value = cursor.value();
+        assert value != null;
+
+        if (!advance(cursor, min)) {
+          iters.remove(cursor);
+        }
+        return current();
+      }
+      
+      @Override
+      public final HDFSGatewayEventImpl current() {
+        return value;
+      }
+
+      @Override 
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+      
+      private boolean advance(SortedEventBuffer.BufferIterator iter, KeyToSeqNumObject key) {
+        while (iter.hasNext()) {
+          if (Bytes.compareTo(iter.next().regionkey, key.regionkey) > 0) {
+            return true;
+          }
+        }
+        return false;
+      }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java
new file mode 100644
index 0000000..c8b7b28
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSet.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.IOException;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.WeakReference;
+import java.util.AbstractSet;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.SortedEventQueueIterator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.HDFSRegionMap;
+import com.gemstone.gemfire.internal.cache.KeyWithRegionContext;
+import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
+import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@SuppressWarnings("rawtypes")
+public class HDFSEntriesSet extends AbstractSet {
+  private final IteratorType type;
+
+  private final HoplogOrganizer hoplogs;
+  private final HDFSBucketRegionQueue brq;
+  
+  private final BucketRegion region; 
+  private final ReferenceQueue<HDFSIterator> refs;
+  
+  public HDFSEntriesSet(BucketRegion region, HDFSBucketRegionQueue brq, 
+      HoplogOrganizer hoplogs, IteratorType type, ReferenceQueue<HDFSIterator> refs) {
+    this.region = region;
+    this.brq = brq;
+    this.hoplogs = hoplogs;
+    this.type = type;
+    this.refs = refs;
+  }
+  
+  @Override
+  public HDFSIterator iterator() {
+    HDFSIterator iter = new HDFSIterator(type, region.getPartitionedRegion(), true);
+    if (refs != null) {
+      // we can't rely on an explicit close but we need to free resources
+      //
+      // This approach has the potential to cause excessive memory load and/or
+      // GC problems if an app holds an iterator ref too long. A lease-based
+      // approach where iterators are automatically for X secs of inactivity is
+      // a potential alternative (but may require tuning for certain
+      // applications)
+      new WeakReference<HDFSEntriesSet.HDFSIterator>(iter, refs);
+    }
+    return iter;
+  }
+
+  @Override
+  public int size() {
+    // TODO this is the tortoise version, need a fast version for estimation
+    // note: more than 2^31-1 records will cause this counter to wrap
+    int size = 0;
+    HDFSIterator iter = new HDFSIterator(null, region.getPartitionedRegion(), false);
+    try {
+      while (iter.hasNext()) {
+        if (includeEntry(iter.next())) {
+          size++;
+        }
+      }
+    } finally {
+      iter.close();
+    }
+    return size;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    HDFSIterator iter = new HDFSIterator(null, region.getPartitionedRegion(), false);
+    try {
+      while (iter.hasNext()) {
+        if (includeEntry(iter.next())) {
+          return false;
+        }
+      }
+    } finally {
+      iter.close();
+    }
+    return true;
+  }
+
+  private boolean includeEntry(Object val) {
+    if (val instanceof HDFSGatewayEventImpl) {
+      HDFSGatewayEventImpl evt = (HDFSGatewayEventImpl) val;
+      if (evt.getOperation().isDestroy()) {
+        return false;
+      }
+    } else if (val instanceof PersistedEventImpl) {
+      PersistedEventImpl evt = (PersistedEventImpl) val;
+      if (evt.getOperation().isDestroy()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  public class HDFSIterator implements Iterator {
+    private final IteratorType type;
+    private final boolean deserialize;
+    
+    private final SortedEventQueueIterator queue;
+    private final HoplogIterator<byte[], SortedHoplogPersistedEvent> hdfs;
+    private Iterator txCreatedEntryIterator;
+    
+    private boolean queueNext;
+    private boolean hdfsNext;
+    private boolean forUpdate;
+    private boolean hasTxEntry;
+
+    private byte[] currentHdfsKey;
+
+    public HDFSIterator(IteratorType type, Region region, boolean deserialize) {
+      this.type = type;
+      this.deserialize = deserialize;
+
+      // Check whether the queue has become primary here.
+      // There could be some time between bucket becoming a primary 
+      // and underlying queue becoming a primary, so isPrimaryWithWait() 
+      // waits for some time for the queue to become a primary on this member
+      if (!brq.getBucketAdvisor().isPrimaryWithWait()) {
+        InternalDistributedMember primaryHolder = brq.getBucketAdvisor()
+            .basicGetPrimaryMember();
+        throw new PrimaryBucketException("Bucket " + brq.getName()
+            + " is not primary. Current primary holder is " + primaryHolder);
+      }
+      // We are deliberating NOT sync'ing while creating the iterators.  If done
+      // in the correct order, we may get duplicates (due to an in-progress
+      // flush) but we won't miss any entries.  The dupes will be eliminated
+      // during iteration.
+      queue = brq.iterator(region);
+      advanceQueue();
+      
+      HoplogIterator<byte[], SortedHoplogPersistedEvent> tmp = null;
+      try {
+        tmp = hoplogs.scan();
+      } catch (IOException e) {
+        HDFSEntriesSet.this.region.checkForPrimary();
+        throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e);
+      }
+      
+      hdfs = tmp;
+      if (hdfs != null) {
+        advanceHdfs();
+      }
+    }
+    
+    @Override
+    public boolean hasNext() {
+      boolean nonTxHasNext = hdfsNext || queueNext;
+      if (!nonTxHasNext && this.txCreatedEntryIterator != null) {
+        this.hasTxEntry = this.txCreatedEntryIterator.hasNext();
+        return this.hasTxEntry;
+      }
+      return nonTxHasNext;
+    }
+    
+    @Override
+    public Object next() {
+      if (!hasNext()) {
+        throw new NoSuchElementException();
+      }
+      if (hasTxEntry) {
+        hasTxEntry = false;
+        return this.txCreatedEntryIterator.next();
+      }
+
+      Object val;
+      if (!queueNext) {
+        val = getFromHdfs();
+        advanceHdfs();
+        
+      } else if (!hdfsNext) {
+        val = getFromQueue();
+        advanceQueue();
+        
+      } else {
+        byte[] qKey = queue.current().getSerializedKey();
+        byte[] hKey = this.currentHdfsKey;
+        
+        int diff = Bytes.compareTo(qKey, hKey);
+        if (diff < 0) {
+          val = getFromQueue();
+          advanceQueue();
+          
+        } else if (diff == 0) {
+          val = getFromQueue();
+          advanceQueue();
+
+          // ignore the duplicate
+          advanceHdfs();
+
+        } else {
+          val = getFromHdfs();
+          advanceHdfs();
+        }
+      }
+      return val;
+    }
+    
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+    
+    public void close() {
+      if (queueNext) {
+        queue.close();
+      }
+
+      if (hdfsNext) {
+        hdfs.close();
+      }
+    }
+
+    private Object getFromQueue() {
+      HDFSGatewayEventImpl evt = queue.current();
+      if (type == null) {
+        return evt;
+      }
+      
+      switch (type) {
+      case KEYS:
+        byte[] key = evt.getSerializedKey();
+        return deserialize ? EntryEventImpl.deserialize(key) : key;
+        
+      case VALUES:
+        return evt.getValue();
+        
+      default:
+        Object keyObj = EntryEventImpl.deserialize(evt.getSerializedKey());
+        if(keyObj instanceof KeyWithRegionContext) {
+          ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion());
+        }
+        return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, evt, true, forUpdate);
+      }
+    }
+
+    private Object getFromHdfs() {
+      if (type == null) {
+        return hdfs.getValue();
+      }
+      
+      switch (type) {
+      case KEYS:
+        byte[] key = this.currentHdfsKey;
+        return deserialize ? EntryEventImpl.deserialize(key) : key;
+        
+      case VALUES:
+        PersistedEventImpl evt = hdfs.getValue();
+        return evt.getValue();
+        
+      default:
+        Object keyObj = EntryEventImpl.deserialize(this.currentHdfsKey);
+        if(keyObj instanceof KeyWithRegionContext) {
+          ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion());
+        }
+        return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, hdfs.getValue(), true, forUpdate);
+      }
+    }
+    
+    private void advanceHdfs() {
+      if (hdfsNext = hdfs.hasNext()) {
+        try {
+          this.currentHdfsKey = hdfs.next();
+        } catch (IOException e) {
+          region.checkForPrimary();
+          throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e);
+        }
+      } else {
+        this.currentHdfsKey = null;
+        hdfs.close();
+      }
+    }
+    
+    private void advanceQueue() {
+      if (queueNext = queue.hasNext()) {
+        queue.next();
+      } else {
+        brq.checkForPrimary();
+        queue.close();
+      }
+    }
+    
+    public void setForUpdate(){
+      this.forUpdate = true;
+    }
+    
+    /**MergeGemXDHDFSToGFE not sure of this function is required */ 
+    /*public void setTXState(TXState txState) {
+      TXRegionState txr = txState.getTXRegionState(region);
+      if (txr != null) {
+        txr.lock();
+        try {
+          this.txCreatedEntryIterator = txr.getCreatedEntryKeys().iterator();
+        }
+        finally{
+          txr.unlock();
+        }
+      }
+    }*/
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java
new file mode 100644
index 0000000..607650f
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventListener.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.i18n.StringId;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
+import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+
+
+/**
+ * Listener that persists events to HDFS
+ *
+ */
+public class HDFSEventListener implements AsyncEventListener {
+  private final LogWriterI18n logger;
+  private volatile boolean senderStopped = false;
+
+  private final FailureTracker failureTracker = new FailureTracker(10L, 60 * 1000L, 1.5f);
+
+  public HDFSEventListener(LogWriterI18n logger) {
+    this.logger = logger;
+  }
+  
+  @Override
+  public void close() {
+    senderStopped = true;
+  }
+  
+  @Override
+  public boolean processEvents(List<AsyncEvent> events) {
+    if (Hoplog.NOP_WRITE) {
+      return true;
+    }
+    
+    // The list of events that async queue receives are sorted at the
+    // bucket level. Events for multiple regions are concatenated together.
+    // Events for multiple buckets are sent which are concatenated
+    // one after the other for e.g.
+    //
+    // <Region1, Key1, bucket1>, <Region1, Key19, bucket1>, 
+    // <Region1, Key4, bucket2>, <Region1, Key6, bucket2>
+    // <Region2, Key1, bucket1>, <Region2, Key4, bucket1>
+    // ..
+    
+    Region previousRegion = null;
+    int prevBucketId = -1;
+    ArrayList<QueuedPersistentEvent> list = null;
+    boolean success = false;
+    try {
+      //Back off if we are experiencing failures
+      failureTracker.sleepIfRetry();
+      
+      HoplogOrganizer bucketOrganizer = null; 
+      for (AsyncEvent asyncEvent : events) {
+        if (senderStopped){
+          failureTracker.failure();
+          if (logger.fineEnabled()) {
+            logger.fine("HDFSEventListener.processEvents: Cache is closing down. Ignoring the batch of data.");
+          }
+          return false;
+        }
+        HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)asyncEvent;
+        Region region = hdfsEvent.getRegion();
+        
+        if (prevBucketId != hdfsEvent.getBucketId() || region != previousRegion){
+          if (prevBucketId != -1) {
+            bucketOrganizer.flush(list.iterator(), list.size());
+            success=true;
+            if (logger.fineEnabled()) {
+              logger.fine("Batch written to HDFS of size " + list.size() + " for region " + previousRegion);
+            }
+          }
+          bucketOrganizer = getOrganizer((PartitionedRegion) region, hdfsEvent.getBucketId());
+          // Bucket organizer can be null only when the bucket has moved. throw an exception so that the 
+          // batch is discarded. 
+          if (bucketOrganizer == null)
+            throw new BucketMovedException("Bucket moved. BucketId: " + hdfsEvent.getBucketId() +  " HDFSRegion: " + region.getName());
+          list = new  ArrayList<QueuedPersistentEvent>();
+        }
+        try {
+          //TODO:HDFS check if there is any serialization overhead
+          list.add(new SortedHDFSQueuePersistedEvent(hdfsEvent));
+        } catch (ClassNotFoundException e) {
+          //TODO:HDFS add localized string
+          logger.warning(new StringId(0, "Error while converting HDFSGatewayEvent to PersistedEventImpl."), e);
+          return false;
+        }
+        prevBucketId = hdfsEvent.getBucketId();
+        previousRegion = region;
+        
+      }
+      if (bucketOrganizer != null) {
+        bucketOrganizer.flush(list.iterator(), list.size());
+        success = true;
+        
+        if (logger.fineEnabled()) {
+          logger.fine("Batch written to HDFS of size " + list.size() + " for region " + previousRegion);
+        }
+      }
+    } catch (IOException e) {
+      logger.warning(LocalizedStrings.HOPLOG_FLUSH_FOR_BATCH_FAILED, e);
+      return false;
+    }
+    catch (ForceReattemptException e) {
+      if (logger.fineEnabled())
+        logger.fine(e);
+      return false;
+    }
+    catch(PrimaryBucketException e) {
+      //do nothing, the bucket is no longer primary so we shouldn't get the same
+      //batch next time.
+      if (logger.fineEnabled())
+        logger.fine(e);
+      return false;
+    }
+    catch(BucketMovedException e) {
+      //do nothing, the bucket is no longer primary so we shouldn't get the same
+      //batch next time.
+      if (logger.fineEnabled())
+        logger.fine(e);
+      return false;
+    }
+    catch (CacheClosedException e) {
+      if (logger.fineEnabled())
+        logger.fine(e);
+      // exit silently
+      return false;
+    } catch (InterruptedException e1) {
+      if (logger.fineEnabled())
+        logger.fine(e1);
+      return false;
+    } finally {
+      failureTracker.record(success);
+    }
+
+    return true;
+  }
+  
+  private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
+    BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
+    if (br == null) {
+      // got rebalanced or something
+      throw new PrimaryBucketException("Bucket region is no longer available " + bucketId + region);
+    }
+
+    return br.getHoplogOrganizer();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java
new file mode 100644
index 0000000..0860e75
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueFilter.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
+import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+
+/**
+ * Current use of this class is limited to ignoring the Bulk DML operations. 
+ * 
+ *
+ */
+public class HDFSEventQueueFilter implements GatewayEventFilter{
+  private LogWriterI18n logger;
+  
+  public HDFSEventQueueFilter(LogWriterI18n logger) {
+    this.logger = logger; 
+  }
+  @Override
+  public void close() {
+    
+  }
+
+  @Override
+  public boolean beforeEnqueue(GatewayQueueEvent event) {
+    Operation op = event.getOperation();
+    
+    
+    /* MergeGemXDHDFSToGFE - Disabled as it is gemxd specific 
+    if (op == Operation.BULK_DML_OP) {
+     // On accessors there are no parallel queues, so with the 
+     // current logic, isSerialWanEnabled function in LocalRegion 
+     // always returns true on an accessor. So when a bulk dml 
+     // op is fired on accessor, this behavior results in distribution 
+     // of the bulk dml operation to other members. To avoid putting 
+     // of this bulk dml in parallel queues, added this filter. This 
+     // is not the efficient way as the filters are used before inserting 
+     // in the queue. The bulk dmls should be blocked before they are distributed.
+     if (logger.fineEnabled())
+       logger.fine( "HDFSEventQueueFilter:beforeEnqueue: Disallowing insertion of a bulk DML in HDFS queue.");
+      return false;
+    }*/
+    
+    return true;
+  }
+
+  @Override
+  public boolean beforeTransmit(GatewayQueueEvent event) {
+   // No op
+   return true;
+  }
+
+  @Override
+  public void afterAcknowledgement(GatewayQueueEvent event) {
+    // No op
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java
new file mode 100644
index 0000000..db99e7e
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.cache.EntryEvent;
+import com.gemstone.gemfire.internal.InternalDataSerializer;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.lru.Sizeable;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
+import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
+import com.gemstone.gemfire.internal.offheap.StoredObject;
+import com.gemstone.gemfire.internal.offheap.annotations.Retained;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+
+
+/**
+ * Gateway event extended for HDFS functionality 
+ *
+ */
+public class HDFSGatewayEventImpl extends GatewaySenderEventImpl {
+  
+  private static final long serialVersionUID = 4642852957292192406L;
+  protected transient boolean keyIsSerialized = false;
+  protected byte[] serializedKey = null; 
+  protected VersionTag versionTag; 
+  
+  public HDFSGatewayEventImpl(){
+  }
+  
+  @Retained
+  public HDFSGatewayEventImpl(EnumListenerEvent operation, EntryEvent event,
+      Object substituteValue)
+      throws IOException {
+    super(operation, event, substituteValue);
+    initializeHDFSGatewayEventObject(event);
+  }
+
+  @Retained
+  public HDFSGatewayEventImpl(EnumListenerEvent operation, EntryEvent event,
+      Object substituteValue, boolean initialize, int bucketId) throws IOException {
+    super(operation, event,substituteValue, initialize, bucketId);
+    initializeHDFSGatewayEventObject(event);
+  }
+
+  @Retained
+  public HDFSGatewayEventImpl(EnumListenerEvent operation, EntryEvent event,
+      Object substituteValue, boolean initialize) throws IOException {
+    super(operation, event, substituteValue, initialize);
+    initializeHDFSGatewayEventObject(event);
+  }
+
+  protected HDFSGatewayEventImpl(HDFSGatewayEventImpl offHeapEvent) {
+    super(offHeapEvent);
+    this.keyIsSerialized = offHeapEvent.keyIsSerialized;
+    this.serializedKey = offHeapEvent.serializedKey;
+    this.versionTag = offHeapEvent.versionTag;
+  }
+  
+  @Override
+  protected GatewaySenderEventImpl makeCopy() {
+    return new HDFSGatewayEventImpl(this);
+  }
+
+  private void initializeHDFSGatewayEventObject(EntryEvent event)
+      throws IOException {
+
+    serializeKey();
+    versionTag = ((EntryEventImpl)event).getVersionTag();
+    if (versionTag != null && versionTag.getMemberID() == null) {
+      versionTag.setMemberID(((LocalRegion)getRegion()).getVersionMember());
+    }
+  }
+
+  private void serializeKey() throws IOException {
+    if (!keyIsSerialized && isInitialized())
+    {
+      this.serializedKey = CacheServerHelper.serialize(this.key);
+      keyIsSerialized = true;
+    } 
+  }
+  /**MergeGemXDHDFSToGFE This function needs to enabled if similar functionality is added to gatewaysendereventimpl*/
+  /*@Override
+  protected StoredObject obtainOffHeapValueBasedOnOp(EntryEventImpl event,
+      boolean hasNonWanDispatcher) {
+    return  event.getOffHeapNewValue();
+  }*/
+  
+  /**MergeGemXDHDFSToGFE This function needs to enabled if similar functionality is added to gatewaysendereventimpl*/
+  /*@Override
+  protected Object obtainHeapValueBasedOnOp(EntryEventImpl event,
+      boolean hasNonWanDispatcher) {
+    return   event.getRawNewValue(shouldApplyDelta());
+  }*/
+  
+  @Override
+  protected boolean shouldApplyDelta() {
+    return true;
+  }
+
+  
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    super.toData(out);
+    DataSerializer.writeObject(this.versionTag, out);
+    
+  }
+  
+  @Override
+  protected void serializeKey(DataOutput out) throws IOException {
+    DataSerializer.writeByteArray((byte[])this.serializedKey, out);
+  }
+  
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    super.fromData(in);
+    this.versionTag = (VersionTag)DataSerializer.readObject(in);
+  }
+  
+  @Override
+  protected void deserializeKey(DataInput in) throws IOException,
+    ClassNotFoundException {
+    this.serializedKey = DataSerializer.readByteArray(in);
+    this.key = BlobHelper.deserializeBlob(this.serializedKey,
+        InternalDataSerializer.getVersionForDataStreamOrNull(in), null);
+    keyIsSerialized = true;
+  }
+
+  @Override
+  public int getDSFID() {
+    
+    return HDFS_GATEWAY_EVENT_IMPL;
+  }
+  public byte[] getSerializedKey() {
+    
+    return this.serializedKey;
+  }
+  
+  public VersionTag getVersionTag() {
+    
+    return this.versionTag;
+  }
+  
+  /**
+   * Returns the size on HDFS of this event  
+   * @param writeOnly
+   */
+  public int getSizeOnHDFSInBytes(boolean writeOnly) {
+  
+    if (writeOnly)
+      return UnsortedHDFSQueuePersistedEvent.getSizeInBytes(this.serializedKey.length,  
+          getSerializedValueSize(), this.versionTag);
+    else
+      return SortedHDFSQueuePersistedEvent.getSizeInBytes(this.serializedKey.length,  
+          getSerializedValueSize(), this.versionTag);
+  
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
new file mode 100644
index 0000000..740a607
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSIntegrationUtil.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.Assert;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+
+/**
+ * Contains utility functions
+ *
+ *
+ */
+public class HDFSIntegrationUtil {
+
+  public static <K, V> AsyncEventQueue createDefaultAsyncQueueForHDFS(Cache cache, boolean writeOnly, String regionPath) {
+    return createAsyncQueueForHDFS(cache, regionPath, writeOnly, null);
+  }
+
+  private static AsyncEventQueue createAsyncQueueForHDFS(Cache cache, String regionPath, boolean writeOnly,
+      HDFSStore configView) {
+    LogWriterI18n logger = cache.getLoggerI18n();
+    String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
+
+    if (configView == null) {
+      configView = new HDFSStoreFactoryImpl(cache).getConfigView();
+    }
+    
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(configView.getBatchSize());
+    factory.setPersistent(configView.getBufferPersistent());
+    factory.setDiskStoreName(configView.getDiskStoreName());
+    factory.setMaximumQueueMemory(configView.getMaxMemory());
+    factory.setBatchTimeInterval(configView.getBatchInterval());
+    factory.setDiskSynchronous(configView.getSynchronousDiskWrite());
+    factory.setDispatcherThreads(configView.getDispatcherThreads());
+    factory.setParallel(true);
+    factory.addGatewayEventFilter(new HDFSEventQueueFilter(logger));
+    ((AsyncEventQueueFactoryImpl) factory).setBucketSorted(!writeOnly);
+    ((AsyncEventQueueFactoryImpl) factory).setIsHDFSQueue(true);
+
+    AsyncEventQueue asyncQ = null;
+
+    if (!writeOnly)
+      asyncQ = factory.create(defaultAsyncQueueName, new HDFSEventListener(cache.getLoggerI18n()));
+    else
+      asyncQ = factory.create(defaultAsyncQueueName, new HDFSWriteOnlyStoreEventListener(cache.getLoggerI18n()));
+
+    logger.fine("HDFS: async queue created for HDFS. Id: " + asyncQ.getId() + ". Disk store: "
+        + asyncQ.getDiskStoreName() + ". Batch size: " + asyncQ.getBatchSize() + ". bucket sorted:  " + !writeOnly);
+    return asyncQ;
+
+  }
+
+  public static void createAndAddAsyncQueue(String regionPath, RegionAttributes regionAttributes, Cache cache) {
+    if (!regionAttributes.getDataPolicy().withHDFS()) {
+      return;
+    }
+
+    String leaderRegionPath = getLeaderRegionPath(regionPath, regionAttributes, cache);
+
+    String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(leaderRegionPath);
+    if (cache.getAsyncEventQueue(defaultAsyncQueueName) == null) {
+      if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null
+          && !(regionAttributes.getPartitionAttributes().getLocalMaxMemory() == 0)) {
+        HDFSStore store = ((GemFireCacheImpl) cache).findHDFSStore(regionAttributes.getHDFSStoreName());
+        if (store == null) {
+          throw new IllegalStateException(
+              LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND.toLocalizedString(regionAttributes.getHDFSStoreName()));
+        }
+        HDFSIntegrationUtil
+            .createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), store);
+      }
+    }
+  }
+
+  private static String getLeaderRegionPath(String regionPath, RegionAttributes regionAttributes, Cache cache) {
+    String colocated;
+    while (regionAttributes.getPartitionAttributes() != null
+        && (colocated = regionAttributes.getPartitionAttributes().getColocatedWith()) != null) {
+      // Do not waitOnInitialization() for PR
+      GemFireCacheImpl gfc = (GemFireCacheImpl) cache;
+      Region colocatedRegion = gfc.getPartitionedRegion(colocated, false);
+      if (colocatedRegion == null) {
+        Assert.fail("Could not find parent region " + colocated + " for " + regionPath);
+      }
+      regionAttributes = colocatedRegion.getAttributes();
+      regionPath = colocatedRegion.getFullPath();
+    }
+    return regionPath;
+  }
+
+}


[06/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
new file mode 100644
index 0000000..011d82b
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
@@ -0,0 +1,449 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager.CompactionRequest;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSCompactionManagerJUnitTest extends BaseHoplogTestCase {
+  /**
+   * Tests queueing of major and minor compaction requests in respective queues
+   */
+  public void testMinMajCompactionIsolation() throws Exception {
+    // no-op compactor
+    Compactor compactor = new AbstractCompactor() {
+      Object minor = new Object();
+      Object major = new Object();
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        try {
+          if (isMajor) {
+            synchronized (major) {
+              major.wait();
+            }
+          } else {
+            synchronized (minor) {
+              minor.wait();
+            }
+          }
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+        return true;
+      }
+    };
+
+    // compaction is disabled. all requests will wait in queue
+    HDFSCompactionManager instance = HDFSCompactionManager.getInstance(hdfsStore);
+    alterMinorCompaction(hdfsStore, true);
+    alterMajorCompaction(hdfsStore, true);
+    
+    assertEquals(0, instance.getMinorCompactor().getActiveCount());
+    assertEquals(0, instance.getMajorCompactor().getActiveCount());
+    
+    //minor request
+    CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    //major request
+    cr = new CompactionRequest("region", 0, compactor, true);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    
+    //wait for requests to get in queue
+    TimeUnit.MILLISECONDS.sleep(50);
+    assertEquals(1, instance.getMinorCompactor().getActiveCount());
+    assertEquals(1, instance.getMajorCompactor().getActiveCount());
+  }
+
+  /**
+   * Tests compaction pause. Once compaction is stopped, requests will 
+   * start getting rejected
+   */
+  public void testAlterAutoMinorCompaction() throws Exception {
+    // each new compaction execution increments counter by 1. this way track how many pending tasks
+    final AtomicInteger totalExecuted = new AtomicInteger(0);
+    Compactor compactor = new AbstractCompactor() {
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        totalExecuted.incrementAndGet();
+        return true;
+      }
+    };
+
+    // compaction is enabled. submit requests and after some time counter should be 0
+    alterMinorCompaction(hdfsStore, true);
+    CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    cr = new CompactionRequest("region", 1, compactor, false);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+
+    int totalWait = 20;
+    while (totalWait > 0 && 2 != totalExecuted.get()) {
+      // wait for operations to complete. The execution will terminate as soon as possible
+      System.out.println("waiting one small cycle for dummy request to complete");
+      TimeUnit.MILLISECONDS.sleep(50);
+      totalWait--;
+    }
+    assertEquals(2, totalExecuted.get());
+
+    // so compaction works. now disable comapction and submit large number of requests till rejected
+    // execution counter should not increase
+    alterMinorCompaction(hdfsStore, false);
+    boolean success = false;
+    int i = 0;
+    do {
+      cr = new CompactionRequest("region", ++i, compactor, false);
+      success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+    } while (success);
+
+    TimeUnit.MILLISECONDS.sleep(500);
+    assertEquals(2, totalExecuted.get());
+  }
+  public void testAlterAutoMajorCompaction() throws Exception {
+    // each new compaction execution increments counter by 1. this way track how many pending tasks
+    final AtomicInteger totalExecuted = new AtomicInteger(0);
+    Compactor compactor = new AbstractCompactor() {
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        totalExecuted.incrementAndGet();
+        return true;
+      }
+    };
+    
+    // compaction is enabled. submit requests and after some time counter should be 0
+    alterMajorCompaction(hdfsStore, true);
+    CompactionRequest cr = new CompactionRequest("region", 0, compactor, true);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    cr = new CompactionRequest("region", 1, compactor, true);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    
+    int totalWait = 20;
+    while (totalWait > 0 && 2 != totalExecuted.get()) {
+      // wait for operations to complete. The execution will terminate as soon as possible
+      System.out.println("waiting one small cycle for dummy request to complete");
+      TimeUnit.MILLISECONDS.sleep(50);
+      totalWait--;
+    }
+    assertEquals(2, totalExecuted.get());
+    
+    // so compaction works. now disable comapction and submit large number of requests till rejected
+    // execution counter should not increase
+    alterMajorCompaction(hdfsStore, false);
+    boolean success = false;
+    int i = 0;
+    do {
+      cr = new CompactionRequest("region", ++i, compactor, true);
+      success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+      System.out.println("success: " + success);
+    } while (success);
+    
+    TimeUnit.MILLISECONDS.sleep(500);
+    assertEquals(2, totalExecuted.get());
+  }
+  
+  /**
+   * Tests duplicate compaction requests do not cause rejection
+   */
+   public void testDuplicateRequests() throws Exception {
+    final AtomicBoolean barrierOpen = new AtomicBoolean(false);
+    class TestCompactor extends AbstractCompactor {
+      AtomicBoolean busy = new AtomicBoolean(false);
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        synchronized (barrierOpen) {
+          busy.set(true);
+          if (barrierOpen.get()) {
+            return false;
+          }
+          try {
+            barrierOpen.wait();
+          } catch (InterruptedException e) {
+            return false;
+          }
+          busy.set(false);
+        }
+        return true;
+      }
+      public boolean isBusy(boolean isMajor) {return busy.get();}
+    };
+    
+    System.setProperty(HoplogConfig.COMPCATION_QUEUE_CAPACITY, "10");
+
+    alterMinorCompaction(hdfsStore, true);
+    alterMajorCompaction(hdfsStore, true);
+    // capacity is 10, thread num is 2, so only the first 12 request will be
+    // submitted
+    for (int i = 0; i < 15; i++) {
+      CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), true);
+      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+      if (success) {
+        assertTrue("failed for " + i, i < 12);
+      } else {
+        assertTrue("failed for " + i, i >= 12);
+      }
+    }
+    
+    synchronized (barrierOpen) {
+      barrierOpen.set(true);
+      barrierOpen.notifyAll();
+    }
+    TimeUnit.MILLISECONDS.sleep(100);
+    barrierOpen.set(false);
+    
+    HDFSCompactionManager.getInstance(hdfsStore).reset();
+    TestCompactor compactor = new TestCompactor();
+    for (int i = 0; i < 10; i++) {
+      TimeUnit.MILLISECONDS.sleep(20);
+      CompactionRequest cr = new CompactionRequest("region", 0, compactor, true);
+      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+      if (success) {
+        assertTrue("failed for " + i, i < 2);
+      } else {
+        assertTrue("failed for " + i, i > 0);
+      }
+    }
+  }
+
+  public void testForceCompactionWithAutoDisabled() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("2"), ("2-1")));
+    organizer.flush(items.iterator(), items.size());
+    
+    FileStatus[] files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    CompactionRequest cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true);
+    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    TimeUnit.MILLISECONDS.sleep(500);
+
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    organizer.forceCompaction(true);
+    TimeUnit.MILLISECONDS.sleep(500);
+    
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+  }
+
+  /**
+   * Test force major compaction completes on version upgrade even when there is only one hoplog
+   */
+  public void testForceCompaction() throws Exception {
+    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent(("1"), ("1-1")));
+    organizer.flush(items.iterator(), items.size());
+
+    items.clear();
+    items.add(new TestEvent(("2"), ("2-1")));
+    organizer.flush(items.iterator(), items.size());
+    
+    FileStatus[] files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(0, files.length);
+    
+    // isForced is true for user submitted compaction requests (through system procedure)
+    // we do not want to compact an already compacted file
+    CompactionRequest cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true, true/*isForced*/);
+    Future<CompactionStatus> status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    status.get().equals(true);
+
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+
+    // second request to force compact does not do anything
+    status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    status.get().equals(false);
+    
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(1, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+
+    // upon version upgrade force compaction is allowed
+    cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true, true, true);
+    status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
+    status.get().equals(true);
+    
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
+    assertEquals(2, files.length);
+    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    assertEquals(3, files.length); // + 1 for old major hoplog
+  }
+
+  /**
+   * Test successful sequential submission
+   */
+  public void testSameBucketSeqRequest() throws Exception {
+    final AtomicInteger counter = new AtomicInteger(0);
+    Compactor compactor = new AbstractCompactor() {
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        counter.set(1);
+        return true;
+      }
+    };
+
+    HDFSCompactionManager.getInstance(hdfsStore).reset();
+    alterMinorCompaction(hdfsStore, true);
+    alterMajorCompaction(hdfsStore, true);
+    CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
+    assertEquals(0, counter.get());
+    boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+    assertEquals(true, success);
+    while (!counter.compareAndSet(1, 0)) {
+      TimeUnit.MILLISECONDS.sleep(20);
+    }
+    
+    assertEquals(0, counter.get());
+    success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+    assertEquals(true, success);
+    for (int i = 0; i < 10; i++) {
+      TimeUnit.MILLISECONDS.sleep(20);
+      if (counter.get() == 1) {
+        break;
+      }
+    }
+    assertEquals(1, counter.get());
+  }
+  
+  public void testAlterMinorThreadsIncrease() throws Exception {
+    doAlterCompactionThreads(false, false);
+  }
+  public void testAlterMinorThreadsDecrease() throws Exception {
+    doAlterCompactionThreads(false, true);
+  }
+  public void testAlterMajorThreadsIncrease() throws Exception {
+    doAlterCompactionThreads(true, false);
+  }
+  public void testAlterMajorThreadsDecrease() throws Exception {
+    doAlterCompactionThreads(true, true);
+  }
+  
+  public void doAlterCompactionThreads(final boolean testMajor, boolean decrease) throws Exception {
+    final AtomicBoolean barrierOpen = new AtomicBoolean(false);
+    final AtomicInteger counter = new AtomicInteger(0);
+    class TestCompactor extends AbstractCompactor {
+      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+        synchronized (barrierOpen) {
+          if ((testMajor && !isMajor)  || (!testMajor && isMajor)) {
+            return true;
+          }
+          if (barrierOpen.get()) {
+            return false;
+          }
+          try {
+            barrierOpen.wait();
+          } catch (InterruptedException e) {
+            return false;
+          }
+          counter.incrementAndGet();
+        }
+        return true;
+      }
+    };
+    
+    System.setProperty(HoplogConfig.COMPCATION_QUEUE_CAPACITY, "1");
+
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    int defaultThreadCount = 10;
+    if (testMajor) {
+      alterMajorCompaction(hdfsStore, true);
+      defaultThreadCount = 2;
+      mutator.setMajorCompactionThreads(15);
+      if (decrease) {
+        mutator.setMajorCompactionThreads(1);
+      }
+    } else {
+      alterMinorCompaction(hdfsStore, true);
+      mutator.setMinorCompactionThreads(15);
+      if (decrease) {
+        mutator.setMinorCompactionThreads(1);
+      }
+    }
+    
+    // capacity is 1, thread num is 10 or 2, so only the first 11 or 3 request will be
+    // submitted
+    cache.getLogger().info("<ExpectedException action=add>java.util.concurrent.RejectedExecutionException</ExpectedException>");
+    for (int i = 0; i < 15; i++) {
+      CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), testMajor);
+      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+      if (success) {
+        assertTrue("failed for " + i, i <= defaultThreadCount);
+      } else {
+        assertTrue("failed for " + i, i > defaultThreadCount);
+      }
+    }
+    
+    TimeUnit.MILLISECONDS.sleep(500);
+    assertEquals(0, counter.get());
+    synchronized (barrierOpen) {
+      barrierOpen.set(true);
+      barrierOpen.notifyAll();
+    }
+    TimeUnit.MILLISECONDS.sleep(500);
+    assertEquals(defaultThreadCount, counter.get());
+    
+    hdfsStore.alter(mutator);
+
+    counter.set(0);
+    barrierOpen.set(false);
+    for (int i = 0; i < 15; i++) {
+      TimeUnit.MILLISECONDS.sleep(100);
+      CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), testMajor);
+      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
+      if (decrease) {
+        if (i > 3) {
+          assertFalse("failed for " + i, success);
+        }
+      } else {
+        assertTrue("failed for " + i, success);
+      }
+    }
+    TimeUnit.MILLISECONDS.sleep(500);
+    synchronized (barrierOpen) {
+      barrierOpen.set(true);
+      barrierOpen.notifyAll();
+    }
+    TimeUnit.MILLISECONDS.sleep(500);
+    if (decrease) {
+      assertTrue(counter.get() < 4);
+    } else {
+      assertEquals(15, counter.get());
+    }
+
+    cache.getLogger().info("<ExpectedException action=remove>java.util.concurrent.RejectedExecutionException</ExpectedException>");
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
new file mode 100644
index 0000000..dc7b987
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
@@ -0,0 +1,97 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.internal.HoplogListenerForRegion;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSRegionDirectorJUnitTest extends BaseHoplogTestCase {
+  public void testDirector() throws Exception {
+    int bucketId = 0;
+
+    HdfsRegionManager mgr = regionManager;
+    
+    // no buckets have been created so far.
+    assertEquals(0, director.getBucketCount("/" + getName()));
+
+    // one bucket created
+    mgr.create(bucketId);
+    assertEquals(1, director.getBucketCount("/" + getName()));
+
+    // close bucket test
+    mgr.close(bucketId);
+    
+    // all buckets have been closed.
+    assertEquals(0, director.getBucketCount("/" + getName()));
+
+    mgr.create(bucketId);
+    assertEquals(1, director.getBucketCount("/" + getName()));
+    director.clear("/" + getName());
+    try {
+      assertEquals(0, director.getBucketCount("/" + getName()));
+      fail("The region is no longer managed, hence an exception is expected");
+    } catch (IllegalStateException e) {
+      // exception expected as the region is no longer managed
+    }
+  }
+  
+  public void testCompactionEvents() throws Exception {
+    final AtomicInteger counter = new AtomicInteger(0);
+    HoplogListener myListener = new HoplogListener() {
+      public void hoplogDeleted(String regionFolder, int bucketId, Hoplog... oplogs)
+          throws IOException {
+      }
+      public void hoplogCreated(String regionFolder, int bucketId, Hoplog... oplogs)
+          throws IOException {
+      }
+      public void compactionCompleted(String region, int bucket, boolean isMajor) {
+        counter.incrementAndGet();
+      }
+    };
+
+    HoplogListenerForRegion listenerManager = ((LocalRegion)region).getHoplogListener();
+    listenerManager.addListener(myListener);
+    
+    HoplogOrganizer bucket = regionManager.create(0);
+    // #1
+    ArrayList<PersistedEventImpl> items = new ArrayList<PersistedEventImpl>();
+    items.add(new TestEvent("1", "1"));
+    bucket.flush(items.iterator(), items.size());
+
+    // #2
+    items.clear();
+    items.add(new TestEvent("2", "1"));
+    bucket.flush(items.iterator(), items.size());
+
+    // #3
+    items.clear();
+    items.add(new TestEvent("3", "1"));
+    bucket.flush(items.iterator(), items.size());
+    
+    // #4
+    items.clear();
+    items.add(new TestEvent("4", "1"));
+    bucket.flush(items.iterator(), items.size());
+    
+    bucket.getCompactor().compact(false, false);
+    assertEquals(1, counter.get());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
new file mode 100644
index 0000000..1d17232
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
@@ -0,0 +1,250 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSStatsJUnitTest extends BaseHoplogTestCase {
+  public void testStoreUsageStats() throws Exception {
+    HoplogOrganizer bucket = regionManager.create(0);
+    
+    long oldUsage = 0;
+    assertEquals(oldUsage, stats.getStoreUsageBytes());
+
+    for (int j = 0; j < 5; j++) {
+      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+      for (int i = 0; i < 100; i++) {
+        String key = ("key-" + (j * 100 + i));
+        String value = ("value-" + System.nanoTime());
+        items.add(new TestEvent(key, value));
+      }
+      bucket.flush(items.iterator(), 100);
+    }
+    
+    assertTrue(0 < stats.getStoreUsageBytes());
+    oldUsage = stats.getStoreUsageBytes();
+    
+    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
+    assertEquals(2, stats.getStoreUsageBytes() / oldUsage);
+    
+    organizer.close();
+    assertEquals(1, stats.getStoreUsageBytes() / oldUsage);
+  }
+  
+  public void testWriteStats() throws Exception {
+    HoplogOrganizer bucket = regionManager.create(0);
+
+    // validate flush stats
+    // flush and create many hoplogs and execute one compaction cycle also
+    // 5 hoplogs, total 500 keys
+    assertEquals(0, stats.getFlush().getCount());
+    assertEquals(0, stats.getFlush().getBytes());
+    assertEquals(0, stats.getActiveFileCount());
+    int bytesSent = 0;
+    for (int j = 0; j < 5; j++) {
+      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+      for (int i = 0; i < 100; i++) {
+        String key = ("key-" + (j * 100 + i));
+        String value = ("value-" + System.nanoTime());
+        items.add(new TestEvent(key, value));
+        bytesSent += (key.getBytes().length + value.getBytes().length);
+      }
+      bucket.flush(items.iterator(), 100);
+
+      // verify stats show
+      assertEquals(j + 1, stats.getFlush().getCount());
+      assertTrue(stats.getFlush().getBytes() > bytesSent);
+      assertEquals(j + 1, stats.getActiveFileCount());
+    }
+
+    // verify compaction stats
+    assertEquals(0, stats.getMinorCompaction().getCount());
+    assertEquals(0, stats.getMinorCompaction().getBytes());
+    assertEquals(0, stats.getInactiveFileCount());
+    bucket.getCompactor().compact(false, false);
+    assertEquals(1, stats.getMinorCompaction().getCount());
+    assertEquals(1, stats.getActiveFileCount());
+    assertEquals(0, stats.getInactiveFileCount());
+    assertEquals(stats.getMinorCompaction().getBytes(), stats.getFlush()
+        .getBytes());
+  }
+  
+  public void testInactiveFileStats() throws Exception {
+    // steps 
+    // create files -> validate active and inactive file count
+    // -> increment reference by using scanner-> compact -> verify active and inactive file count 
+    HoplogOrganizer bucket = regionManager.create(0);
+    assertEquals(0, stats.getActiveFileCount());
+    assertEquals(0, stats.getInactiveFileCount());
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int j = 0; j < 5; j++) {
+      items.clear();
+      for (int i = 0; i < 100; i++) {
+        String key = ("key-" + (j * 100 + i));
+        String value = ("value-" + System.nanoTime());
+        items.add(new TestEvent(key, value));
+      }
+      bucket.flush(items.iterator(), 100);
+    }
+    
+    assertEquals(5, stats.getActiveFileCount());
+    assertEquals(0, stats.getInactiveFileCount());
+    
+    HoplogIterator<byte[], PersistedEventImpl> scanner = bucket.scan();
+    bucket.getCompactor().compact(true, false);
+    assertEquals(1, stats.getActiveFileCount());
+    assertEquals(5, stats.getInactiveFileCount());
+    
+    scanner.close();
+    assertEquals(1, stats.getActiveFileCount());
+    assertEquals(0, stats.getInactiveFileCount());
+  }
+
+  public void testReadStats() throws Exception {
+    HoplogOrganizer<SortedHoplogPersistedEvent> bucket = regionManager.create(0);
+
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 100; i++) {
+      items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
+    }
+    bucket.flush(items.iterator(), 100);
+    
+    // validate read stats
+    assertEquals(0, stats.getRead().getCount());
+    assertEquals(0, stats.getRead().getBytes());
+    // number of bytes read must be greater than size of key and value and must be increasing
+    int bytesRead = "key-1".getBytes().length + "value=1233232".getBytes().length;
+    for (int i = 0; i < 5; i++) {
+      long previousRead = stats.getRead().getBytes();
+      PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + i));
+      assertNotNull(e);
+      assertEquals(i + 1, stats.getRead().getCount());
+      assertTrue( (bytesRead + previousRead) < stats.getRead().getBytes());
+    }
+    
+    //Make sure the block cache stats are being updated.
+//    assertTrue(storeStats.getBlockCache().getMisses() > 0);
+//    assertTrue(storeStats.getBlockCache().getBytesCached() > 0);
+//    assertTrue(storeStats.getBlockCache().getCached() > 0);
+    
+    //Do a duplicate read to make sure we get a hit in the cache
+//    bucket.read(BlobHelper.serializeToBlob("key-" + 0));
+//    assertTrue(storeStats.getBlockCache().getHits() > 0);
+  }
+
+  public void testBloomStats() throws Exception {
+    HoplogOrganizer bucket = regionManager.create(0);
+
+    // create 10 hoplogs
+    for (int j = 0; j < 5; j++) {
+      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+      for (int i = 0; i < 100; i++) {
+        String key = ("key-" + (j * 100 + i));
+        String value = ("value-" + System.nanoTime());
+        items.add(new TestEvent(key, value));
+      }
+      bucket.flush(items.iterator(), 100);
+    }
+
+    // initially bloom stat will be zero
+    // reading key in first hop will increase bloom hit by 1 (key 0 to 99)
+    // reading key in 5 hoplog will increase bloom hit by 5 (key 400 to 499)
+    assertEquals(0, stats.getBloom().getCount());
+    bucket.read(BlobHelper.serializeToBlob("key-450"));
+    assertEquals(1, stats.getBloom().getCount());
+    bucket.read(BlobHelper.serializeToBlob("key-50"));
+    assertEquals(6, stats.getBloom().getCount());
+  }
+  
+  public void testScanStats() throws Exception {
+    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(
+          testDataDir, "H-1-1.hop"),blockCache, stats, storeStats);
+    createHoplog(5, hoplog);
+    
+    // initially scan stats will be zero. creating a scanner should increase
+    // scan iteration stats and bytes. On scanner close scan count should be
+    // incremented
+    assertEquals(0, stats.getScan().getCount());
+    assertEquals(0, stats.getScan().getBytes());
+    assertEquals(0, stats.getScan().getTime());
+    assertEquals(0, stats.getScan().getIterations());
+    assertEquals(0, stats.getScan().getIterationTime());
+    
+    HoplogIterator<byte[], byte[]> scanner = hoplog.getReader().scan();
+    assertEquals(0, stats.getScan().getCount());
+    int count = 0;
+    for (byte[] bs = null; scanner.hasNext(); ) {
+      bs = scanner.next();
+      count += bs.length + scanner.getValue().length;
+    }
+    assertEquals(count, stats.getScan().getBytes());
+    assertEquals(5, stats.getScan().getIterations());
+    assertTrue(0 < stats.getScan().getIterationTime());
+    // getcount will be 0 as scanner.close is not being called
+    assertEquals(0, stats.getScan().getCount());
+    assertEquals(0, stats.getScan().getTime());
+    assertEquals(1, stats.getScan().getInProgress());
+    
+    scanner.close();
+    assertEquals(1, stats.getScan().getCount());
+    assertTrue(0 < stats.getScan().getTime());
+    assertTrue(stats.getScan().getIterationTime() <= stats.getScan().getTime());
+  }
+  
+  /**
+   * Validates two buckets belonging to same region update the same stats
+   */
+  public void testRegionBucketShareStats() throws Exception {
+    HoplogOrganizer bucket1 = regionManager.create(0);
+    HoplogOrganizer bucket2 = regionManager.create(1);
+
+    // validate flush stats
+    assertEquals(0, stats.getFlush().getCount());
+    assertEquals(0, stats.getActiveFileCount());
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < 100; i++) {
+      items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
+    }
+    bucket1.flush(items.iterator(), 100);
+    assertEquals(1, stats.getFlush().getCount());
+    assertEquals(1, stats.getActiveFileCount());
+    items.clear();
+
+    for (int i = 0; i < 100; i++) {
+      items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
+    }
+    bucket2.flush(items.iterator(), 100);
+    assertEquals(2, stats.getFlush().getCount());
+    assertEquals(2, stats.getActiveFileCount());
+  }
+
+  @Override
+  protected Cache createCache() {
+    CacheFactory cf = new CacheFactory().set("mcast-port", "0")
+        .set("log-level", "info")
+        .set("enable-time-statistics", "true")
+//        .set("statistic-archive-file", "statArchive.gfs")
+        ;
+    cache = cf.create();
+
+    return cache;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
new file mode 100644
index 0000000..ab1ccac
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
@@ -0,0 +1,297 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog.SequenceFileIterator;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+/**
+ * Test class to test hoplog functionality for streaming ingest 
+ * 
+ * @author hemantb
+ *
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSUnsortedHoplogOrganizerJUnitTest extends BaseHoplogTestCase {
+ 
+  /**
+   * Tests flush operation
+   */
+  public void testFlush() throws Exception {
+    int count = 10;
+    int bucketId = (int) System.nanoTime();
+    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < count; i++) {
+      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
+    }
+    
+    organizer.flush(items.iterator(), count);
+    organizer.closeCurrentWriter();
+    
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+                      HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
+
+    // only one hoplog should exists
+    assertEquals(1, hoplogs.length);
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
+  }
+  
+  public void testAlterRollOverInterval() throws Exception {
+    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, 0);
+    
+    // flush 4 times with small delays. Only one seq file will be created
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int j = 0; j < 3; j++) {
+      items.clear();
+      for (int i = 0; i < 10; i++) {
+        items.add(new TestEvent(("key-" + (i + 10 * j)), ("value-" + System.nanoTime())));
+      }
+      organizer.flush(items.iterator(), 10);
+      TimeUnit.MILLISECONDS.sleep(1100);
+    }
+    organizer.closeCurrentWriter();
+    
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
+    
+    // only one hoplog should exists
+    assertEquals(1, hoplogs.length);
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
+    
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    mutator.setWriteOnlyFileRolloverInterval(1);
+    hdfsStore.alter(mutator);
+    
+    TimeUnit.MILLISECONDS.sleep(1100);
+    for (int j = 0; j < 2; j++) {
+      items.clear();
+      for (int i = 0; i < 10; i++) {
+        items.add(new TestEvent(("key-" + (i + 10 * j)), ("value-" + System.nanoTime())));
+      }
+      organizer.flush(items.iterator(), 10);
+      TimeUnit.MILLISECONDS.sleep(1100);
+    }
+    organizer.closeCurrentWriter();
+    hoplogs = getBucketHoplogs(getName() + "/" + 0,
+        HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
+    assertEquals(3, hoplogs.length);
+  }
+  
+  public void testSequenceFileScan() throws Exception {
+    int count = 10000;
+    int bucketId = (int) System.nanoTime();
+    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < count; i++) {
+      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
+    }
+    
+    organizer.flush(items.iterator(), count);
+    organizer.closeCurrentWriter();
+    
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+                      HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
+
+    // only one hoplog should exists
+    assertEquals(1, hoplogs.length);
+    
+    SequenceFileDetails sfd = getSequenceFileDetails(hdfsStore.getFileSystem(), hoplogs[0].getPath());
+    
+    // End position is before a sync. Should read until sync.
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, sfd.indexOfKeyBeforeSecondSync ,
+        0, sfd.posBeforeSecondSync);
+    
+    // Start position is inside header. Should start from first key and go to next sync point. 
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, sfd.indexOfKeyBeforeSecondSync, 
+        10, sfd.posAfterFirstSync);
+    
+    // Start and end position are between two sync markers. Should not read any keys.    
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 29, 28, 
+        sfd.posAfterFirstSync, sfd.posBeforeSecondSync - sfd.posAfterFirstSync);
+    
+    // Start position is after a sync and End position is beyond the file size. 
+    //Should read all the records after the next sync.
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), sfd.indexOfKeyAfterFirstSync, 9999, 
+        sfd.posBeforeFirstSync, 10000000);
+    
+    // Should read all the records. 
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, 9999, 0, -1);
+  }
+  
+  class SequenceFileDetails {
+    public int posBeforeFirstSync;
+    public int indexOfKeyBeforeFirstSync;
+    
+    public int posAfterFirstSync;
+    public int indexOfKeyAfterFirstSync; 
+    
+    public int posBeforeSecondSync;
+    public int indexOfKeyBeforeSecondSync;
+  }
+  
+  public SequenceFileDetails getSequenceFileDetails(FileSystem inputFS, Path sequenceFileName) throws Exception {
+    SequenceFileDetails fd = new SequenceFileDetails();
+    SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
+      
+    SequenceFileIterator iter = (SequenceFileIterator)hoplog.getReader().scan();;
+    int currentkeyStartPos = 0;
+    int cursorPos = 0;
+    String currentKey = null;
+    boolean firstSyncSeen = false; 
+    try {
+      while (iter.hasNext()) {
+        iter.next();
+        currentkeyStartPos = cursorPos;
+        currentKey = ((String)CacheServerHelper.deserialize(iter.getKey()));
+        cursorPos = (int)iter.getPosition();
+        if (iter.syncSeen()){
+          if (firstSyncSeen) {
+            
+            fd.posBeforeSecondSync = currentkeyStartPos;
+            fd.indexOfKeyBeforeSecondSync = Integer.parseInt(currentKey.substring(4));
+            break;
+          } else {
+            fd.posBeforeFirstSync = currentkeyStartPos;
+            fd.indexOfKeyBeforeFirstSync = Integer.parseInt(currentKey.substring(4));
+            
+            fd.posAfterFirstSync = cursorPos;
+            fd.indexOfKeyAfterFirstSync = Integer.parseInt(currentKey.substring(4)) + 1;
+            firstSyncSeen = true;
+          }
+        }
+      }
+
+    } catch (Exception e) {
+      assertTrue(e.toString(), false);
+    }
+    iter.close();
+    hoplog.close();
+    return fd;
+  }
+  
+  public void testClear() throws Exception {
+    int count = 10;
+    int bucketId = (int) System.nanoTime();
+    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < count; i++) {
+      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
+    }
+    organizer.flush(items.iterator(), count);
+    organizer.closeCurrentWriter();
+    // check file existence in bucket directory
+    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+                      AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
+    assertEquals(1, hoplogs.length);
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
+    
+    
+    // write another batch but do not close the data. 
+    organizer.flush(items.iterator(), count);
+    
+    organizer.clear();
+    
+    hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+        AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
+    // check file existence in bucket directory
+    FileStatus[] expiredhoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+                      AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    
+    // two expired hoplog should exists
+    assertEquals(2, expiredhoplogs.length);
+    assertEquals(2, hoplogs.length);
+    // check the expired hops name should be same 
+    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
+        expiredhoplogs[1].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
+    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
+        expiredhoplogs[1].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
+    
+    // Test that second time clear should be harmless and should not result in extra files. 
+    organizer.clear();
+    hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+        AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
+    // check file existence in bucket directory
+    expiredhoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
+                      AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
+    
+    // two expired hoplog should exists
+    assertEquals(2, expiredhoplogs.length);
+    assertEquals(2, hoplogs.length);
+    // check the expired hops name should be same 
+    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
+        expiredhoplogs[1].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
+    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
+        expiredhoplogs[1].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
+    
+    
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
+    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[1].getPath(), 0);
+  }
+  
+  public void readSequenceFile(FileSystem inputFS, Path sequenceFileName, int index)  throws IOException{
+    readSequenceFile(inputFS, sequenceFileName, index, -1, 0, -1);
+  }
+  /**
+   * Reads the sequence file assuming that it has keys and values starting from index that 
+   * is specified as parameter. 
+   * 
+   */
+  public void readSequenceFile(FileSystem inputFS, Path sequenceFileName, int index, int endIndex,
+      int startoffset, int length) throws IOException {
+    SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
+    
+    HoplogIterator<byte[], byte[]> iter = null;
+    if (length == -1){
+      iter = hoplog.getReader().scan();
+    }
+    else {
+      iter = hoplog.getReader().scan(startoffset, length);
+    }
+    
+    try {
+      while (iter.hasNext()) {
+        iter.next();
+        PersistedEventImpl te = UnsortedHoplogPersistedEvent.fromBytes(iter.getValue());
+        String stringkey = ((String)CacheServerHelper.deserialize(iter.getKey()));
+        assertTrue("Expected key: key-" + index + ". Actual key: " + stringkey , ((String)stringkey).equals("key-" + index));
+        index++;
+      }
+      if (endIndex != -1)
+      assertTrue ("The keys should have been until key-"+ endIndex + " but they are until key-"+ (index-1),  index == endIndex + 1) ;
+    } catch (Exception e) {
+      assertTrue(e.toString(), false);
+    }
+    iter.close();
+    hoplog.close();
+ }
+
+}



[07/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
new file mode 100644
index 0000000..3ba7086
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
@@ -0,0 +1,719 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import static com.gemstone.gemfire.test.dunit.Wait.waitForCriterion;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog;
+import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
+
+import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+
+@SuppressWarnings({"serial", "rawtypes", "unchecked"})
+public abstract class RegionWithHDFSTestBase extends CacheTestCase {
+
+  protected String tmpDir;
+
+  public static String homeDir = null;
+
+  protected abstract void checkWithGetAll(String uniqueName, ArrayList arrayl);
+
+  protected abstract void checkWithGet(String uniqueName, int start,
+      int end, boolean expectValue);
+
+  protected abstract void doDestroys(final String uniqueName, int start, int end);
+
+  protected abstract void doPutAll(final String uniqueName, Map map);
+
+  protected abstract void doPuts(final String uniqueName, int start, int end);
+
+  protected abstract SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets, final int batchSizeMB,
+      final int maximumEntries, final String folderPath, final String uniqueName, final int batchInterval, final boolean queuePersistent, 
+      final boolean writeonly, final long timeForRollover, final long maxFileSize);
+  
+  protected abstract void verifyHDFSData(VM vm, String uniqueName) throws Exception ;
+  
+  protected abstract AsyncInvocation doAsyncPuts(VM vm, final String regionName,
+                                                 final int start, final int end, final String suffix) throws Exception;
+  
+  public RegionWithHDFSTestBase(String name) {
+    super(name);
+  }
+
+  @Override
+  public void preTearDownCacheTestCase() throws Exception {
+    super.preTearDownCacheTestCase();
+    for (int h = 0; h < Host.getHostCount(); h++) {
+      Host host = Host.getHost(h);
+      SerializableCallable cleanUp = cleanUpStoresAndDisconnect();
+      for (int v = 0; v < host.getVMCount(); v++) {
+        VM vm = host.getVM(v);
+        // This store will be deleted by the first VM itself. Invocations from
+        // subsequent VMs will be no-op.
+        vm.invoke(cleanUp);
+      }
+    }
+  }
+
+  public SerializableCallable cleanUpStoresAndDisconnect() throws Exception {
+    SerializableCallable cleanUp = new SerializableCallable("cleanUpStoresAndDisconnect") {
+      public Object call() throws Exception {
+        disconnectFromDS();
+        File file;
+        if (homeDir != null) {
+          file = new File(homeDir);
+          FileUtil.delete(file);
+          homeDir = null;
+        }
+        file = new File(tmpDir);
+        FileUtil.delete(file);
+        return 0;
+      }
+    };
+    return cleanUp;
+  }
+
+  @Override
+  public void preSetUp() throws Exception {
+    super.preSetUp();
+    tmpDir = /*System.getProperty("java.io.tmpdir") + "/" +*/ "RegionWithHDFSBasicDUnitTest_" + System.nanoTime();
+  }
+  
+  int createServerRegion(VM vm, final int totalnumOfBuckets, 
+      final int batchSize, final int maximumEntries, final String folderPath, 
+      final String uniqueName, final int batchInterval) {
+    return createServerRegion(vm, totalnumOfBuckets, 
+        batchSize, maximumEntries, folderPath, 
+        uniqueName, batchInterval, false, false);
+  }
+
+  protected int createServerRegion(VM vm, final int totalnumOfBuckets, 
+      final int batchSizeMB, final int maximumEntries, final String folderPath, 
+      final String uniqueName, final int batchInterval, final boolean writeonly,
+      final boolean queuePersistent) {
+    return createServerRegion(vm, totalnumOfBuckets, 
+        batchSizeMB, maximumEntries, folderPath, 
+        uniqueName, batchInterval, writeonly, queuePersistent, -1, -1);
+  }
+  protected int createServerRegion(VM vm, final int totalnumOfBuckets, 
+      final int batchSizeMB, final int maximumEntries, final String folderPath, 
+      final String uniqueName, final int batchInterval, final boolean writeonly,
+      final boolean queuePersistent, final long timeForRollover, final long maxFileSize) {
+    SerializableCallable createRegion = getCreateRegionCallable(
+        totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
+        batchInterval, queuePersistent, writeonly, timeForRollover, maxFileSize);
+
+    return (Integer) vm.invoke(createRegion);
+  }
+  protected AsyncInvocation createServerRegionAsync(VM vm, final int totalnumOfBuckets, 
+      final int batchSizeMB, final int maximumEntries, final String folderPath, 
+      final String uniqueName, final int batchInterval, final boolean writeonly,
+      final boolean queuePersistent) {
+    SerializableCallable createRegion = getCreateRegionCallable(
+        totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
+        batchInterval, queuePersistent, writeonly, -1, -1);
+
+    return vm.invokeAsync(createRegion);
+  }
+  protected AsyncInvocation createServerRegionAsync(VM vm, final int totalnumOfBuckets, 
+      final int batchSizeMB, final int maximumEntries, final String folderPath, 
+      final String uniqueName, final int batchInterval, final boolean writeonly,
+      final boolean queuePersistent, final long timeForRollover, final long maxFileSize) {
+    SerializableCallable createRegion = getCreateRegionCallable(
+        totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
+        batchInterval, queuePersistent, writeonly, timeForRollover, maxFileSize);
+
+    return vm.invokeAsync(createRegion);
+  }
+  
+  /**
+   * Does puts, gets, destroy and getAll. Since there are many updates 
+   * most of the time the data is not found in memory and queue and 
+   * is fetched from HDFS
+   * @throws Throwable 
+   */
+  public void testGetFromHDFS() throws Throwable {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    final String uniqueName = getName();
+    final String homeDir = "../../testGetFromHDFS";
+    
+    createServerRegion(vm0, 7, 1, 50, homeDir, uniqueName, 50, false, true);
+    createServerRegion(vm1, 7, 1, 50, homeDir, uniqueName, 50, false, true);
+    
+    // Do some puts
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 0, 40);
+        return null;
+      }
+    });
+    
+    // Do some puts and destroys 
+    // some order manipulation has been done because of an issue: 
+    // " a higher version update on a key can be batched and 
+    // sent to HDFS before a lower version update on the same key 
+    // is batched and sent to HDFS. This will cause the latest 
+    // update on a key in an older file. Hence, a fetch from HDFS 
+    // will return an older update from a newer file."
+    
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 40, 50);
+        doDestroys(uniqueName, 40, 50);
+        doPuts(uniqueName, 50, 100);
+        doPuts(uniqueName, 30, 40);
+        return null;
+      }
+    });
+    
+    // do some more puts and destroy 
+    // some order manipulation has been done because of an issue: 
+    // " a higher version update on a key can be batched and 
+    // sent to HDFS before a lower version update on the same key 
+    // is batched and sent to HDFS. This will cause the latest 
+    // update on a key in an older file. Hence, a fetch from HDFS 
+    // will return an older update from a newer file."
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 80, 90);
+        doDestroys(uniqueName, 80, 90);
+        doPuts(uniqueName, 110, 200);
+        doPuts(uniqueName, 90, 110);
+        return null;
+      }
+      
+    });
+    
+    // get and getall the values and compare them. 
+    SerializableCallable checkData = new SerializableCallable() {
+      public Object call() throws Exception {
+        checkWithGet(uniqueName, 0, 40, true);
+        checkWithGet(uniqueName, 40, 50, false);
+        checkWithGet(uniqueName, 50, 80, true);
+        checkWithGet(uniqueName, 80, 90, false);
+        checkWithGet(uniqueName, 90, 200, true);
+        checkWithGet(uniqueName, 200, 201, false);
+        
+        ArrayList arrayl = new ArrayList();
+        for (int i =0; i< 200; i++) {
+          String k = "K" + i;
+          if ( !((40 <= i && i < 50) ||   (80 <= i && i < 90)))
+            arrayl.add(k);
+        }
+        checkWithGetAll(uniqueName, arrayl);
+        
+        return null;
+      }
+    };
+    vm1.invoke(checkData);
+    
+    //Restart the members and verify that we can still get the data
+    closeCache(vm0);
+    closeCache(vm1);
+    AsyncInvocation async0 = createServerRegionAsync(vm0, 7, 1, 50, homeDir, uniqueName, 50, false, true);
+    AsyncInvocation async1 = createServerRegionAsync(vm1, 7, 1, 50, homeDir, uniqueName, 50, false, true);
+    
+    async0.getResult();
+    async1.getResult();
+    
+    
+    // get and getall the values and compare them.
+    vm1.invoke(checkData);
+  
+    //TODO:HDFS we are just reading the files here. Need to verify 
+    // once the folder structure is finalized. 
+    dumpFiles(vm1, uniqueName);
+    
+  }
+
+  /**
+   * puts a few entries (keys with multiple updates ). Gets them immediately. 
+   * High probability that it gets it from async queue. 
+   */
+  public void testGetForAsyncQueue() {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    
+    final String uniqueName = getName();
+    final String homeDir = "../../testGetForAsyncQueue";
+    
+    createServerRegion(vm0, 2, 5, 1, homeDir, uniqueName, 10000);
+    createServerRegion(vm1, 2, 5, 1, homeDir, uniqueName, 10000);
+    
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 0, 4);
+        return null;
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 0, 2);
+        doDestroys(uniqueName, 2, 3);
+        doPuts(uniqueName, 3, 7);
+        
+        checkWithGet(uniqueName, 0, 2, true);
+        checkWithGet(uniqueName, 2, 3, false);
+        checkWithGet(uniqueName, 3, 7, true);
+        return null;
+      }
+    });
+  }
+
+  /**
+   * puts a few entries (keys with multiple updates ). Calls getAll immediately. 
+   * High probability that it gets it from async queue. 
+   */
+  public void testGetAllForAsyncQueue() {
+    
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    
+    final String uniqueName = getName();
+    createServerRegion(vm0, 2, 5, 2, uniqueName, uniqueName, 10000);
+    createServerRegion(vm1, 2, 5, 2, uniqueName, uniqueName, 10000);
+    
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 0, 4);
+        return null;
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        doPuts(uniqueName, 1, 5);
+  
+        ArrayList arrayl = new ArrayList();
+        for (int i =0; i< 5; i++) {
+          String k = "K" + i;
+          arrayl.add(k);
+        }
+        checkWithGetAll(uniqueName, arrayl);
+        return null;
+      }
+    });
+  }
+
+  /**
+   * puts a few entries (keys with multiple updates ). Calls getAll immediately. 
+   * High probability that it gets it from async queue. 
+   */
+  public void testPutAllForAsyncQueue() {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    
+    final String uniqueName = getName();
+    final String homeDir = "../../testPutAllForAsyncQueue";
+    createServerRegion(vm0, 2, 5, 2, homeDir, uniqueName, 10000);
+    createServerRegion(vm1, 2, 5, 2, homeDir, uniqueName, 10000);
+    
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        HashMap putAllmap = new HashMap();
+        for (int i =0; i< 4; i++)
+          putAllmap.put("K" + i, "V"+ i );
+        doPutAll(uniqueName, putAllmap);
+        return null;
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        HashMap putAllmap = new HashMap();
+        for (int i =1; i< 5; i++)
+          putAllmap.put("K" + i, "V"+ i );
+        doPutAll(uniqueName, putAllmap);
+        checkWithGet(uniqueName, 0, 5, true);
+        return null;
+      }
+    });
+  }
+
+  /**
+   * Does putAll and get. Since there are many updates 
+   * most of the time the data is not found in memory and queue and 
+   * is fetched from HDFS
+   */
+  public void _testPutAllAndGetFromHDFS() {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    
+    final String uniqueName = getName();
+    final String homeDir = "../../testPutAllAndGetFromHDFS";
+    createServerRegion(vm0, 7, 1, 500, homeDir, uniqueName, 500);
+    createServerRegion(vm1, 7, 1, 500, homeDir, uniqueName, 500);
+    
+    // Do some puts
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+          
+        HashMap putAllmap = new HashMap();
+        
+        for (int i =0; i< 500; i++)
+          putAllmap.put("K" + i, "V"+ i );
+        doPutAll(uniqueName, putAllmap);
+        return null;
+      }
+    });
+    
+    // Do putAll and some  destroys 
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        HashMap putAllmap = new HashMap();
+        for (int i = 500; i< 1000; i++)
+          putAllmap.put("K" + i, "V"+ i );
+        doPutAll(uniqueName, putAllmap);
+        return null;
+      }
+    });
+    
+    // do some more puts 
+    // some order manipulation has been done because of an issue: 
+    // " a higher version update on a key can be batched and 
+    // sent to HDFS before a lower version update on the same key 
+    // is batched and sent to HDFS. This will cause the latest 
+    // update on a key in an older file. Hence, a fetch from HDFS 
+    // will return an older update from a newer file."
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        HashMap putAllmap = new HashMap();
+        for (int i =1100; i< 2000; i++)
+          putAllmap.put("K" + i, "V"+ i );
+        doPutAll(uniqueName, putAllmap);
+        putAllmap = new HashMap();
+        for (int i = 900; i< 1100; i++)
+          putAllmap.put("K" + i, "V"+ i );
+        doPutAll(uniqueName, putAllmap);
+        return null;
+      }
+      
+    });
+    
+    // get and getall the values and compare them. 
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        checkWithGet(uniqueName, 0, 2000, true);
+        checkWithGet(uniqueName, 2000,  2001, false);
+        
+        ArrayList arrayl = new ArrayList();
+        for (int i =0; i< 2000; i++) {
+          String k = "K" + i;
+          arrayl.add(k);
+        }
+        checkWithGetAll(uniqueName, arrayl);
+        return null;
+      }
+    });
+    
+  }
+
+  public void _testWObasicClose() throws Throwable{
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+    VM vm3 = host.getVM(3);
+    
+    String homeDir = "../../testWObasicClose";
+    final String uniqueName = getName();
+
+    createServerRegion(vm0, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    createServerRegion(vm1, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    createServerRegion(vm2, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    createServerRegion(vm3, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    
+    AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 50, "vm0");
+    AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 40, 100, "vm1");
+    AsyncInvocation a3 = doAsyncPuts(vm2, uniqueName, 40, 100, "vm2");
+    AsyncInvocation a4 = doAsyncPuts(vm3, uniqueName, 90, 150, "vm3");
+    
+    a1.join();
+    a2.join();
+    a3.join();
+    a4.join();
+   
+    Thread.sleep(5000); 
+    cacheClose (vm0, false);
+    cacheClose (vm1, false);
+    cacheClose (vm2, false);
+    cacheClose (vm3, false);
+    
+    AsyncInvocation async1 = createServerRegionAsync(vm0, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    AsyncInvocation async2 = createServerRegionAsync(vm1, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    AsyncInvocation async3 = createServerRegionAsync(vm2, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    AsyncInvocation async4 = createServerRegionAsync(vm3, 11, 1,  500, homeDir, uniqueName, 500, true, false);
+    async1.getResult();
+    async2.getResult();
+    async3.getResult();
+    async4.getResult();
+    
+    verifyHDFSData(vm0, uniqueName); 
+    
+    cacheClose (vm0, false);
+    cacheClose (vm1, false);
+    cacheClose (vm2, false);
+    cacheClose (vm3, false);
+  }
+  
+  
+  protected void cacheClose(VM vm, final boolean sleep){
+    vm.invoke( new SerializableCallable() {
+      public Object call() throws Exception {
+        if (sleep)
+          Thread.sleep(2000);
+        getCache().getLogger().info("Cache close in progress "); 
+        getCache().close();
+        getCache().getLogger().info("Cache closed");
+        return null;
+      }
+    });
+    
+  }
+  
+  protected void verifyInEntriesMap (HashMap<String, String> entriesMap, int start, int end, String suffix) {
+    for (int i =start; i< end; i++) {
+      String k = "K" + i;
+      String v = "V"+ i + suffix;
+      Object s = entriesMap.get(v);
+      assertTrue( "The expected key " + k+ " didn't match the received value " + s + ". value: " + v, k.equals(s));
+    }
+  }
+  
+  /**
+   * Reads all the sequence files and returns the list of key value pairs persisted. 
+   * Returns the key value pair as <value, key> tuple as there can be multiple values 
+   * for a key
+   * @throws Exception
+   */
+  protected HashMap<String, HashMap<String, String>>  createFilesAndEntriesMap(VM vm0, final String uniqueName, final String regionName) throws Exception {
+    HashMap<String, HashMap<String, String>> entriesToFileMap = (HashMap<String, HashMap<String, String>>) 
+    vm0.invoke( new SerializableCallable() {
+      public Object call() throws Exception {
+        HashMap<String, HashMap<String, String>> entriesToFileMap = new HashMap<String, HashMap<String, String>>();
+        HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
+        FileSystem fs = hdfsStore.getFileSystem();
+        System.err.println("dumping file names in HDFS directory: " + hdfsStore.getHomeDir());
+        try {
+          Path basePath = new Path(hdfsStore.getHomeDir());
+          Path regionPath = new Path(basePath, regionName);
+          RemoteIterator<LocatedFileStatus> files = fs.listFiles(regionPath, true);
+          
+          while(files.hasNext()) {
+            HashMap<String, String> entriesMap = new HashMap<String, String>();
+            LocatedFileStatus next = files.next();
+            /* MergeGemXDHDFSToGFE - Disabled as I am not pulling in DunitEnv */
+            // System.err.println(DUnitEnv.get().getPid() + " - " + next.getPath());
+            System.err.println(" - " + next.getPath());
+            readSequenceFile(fs, next.getPath(), entriesMap);
+            entriesToFileMap.put(next.getPath().getName(), entriesMap);
+          }
+        } catch (FileNotFoundException e) {
+          // TODO Auto-generated catch block
+          e.printStackTrace();
+        } catch (IOException e) {
+          // TODO Auto-generated catch block
+          e.printStackTrace();
+        }
+        
+        return entriesToFileMap;
+      }
+      @SuppressWarnings("deprecation")
+      public void readSequenceFile(FileSystem inputFS, Path sequenceFileName,  
+          HashMap<String, String> entriesMap) throws IOException {
+        SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
+        HoplogIterator<byte[], byte[]> iter = hoplog.getReader().scan();
+        try {
+          while (iter.hasNext()) {
+            iter.next();
+            PersistedEventImpl te = UnsortedHoplogPersistedEvent.fromBytes(iter.getValue());
+            String stringkey = ((String)CacheServerHelper.deserialize(iter.getKey()));
+            String value = (String) te.getDeserializedValue();
+            entriesMap.put(value, stringkey);
+            if (getCache().getLoggerI18n().fineEnabled())
+              getCache().getLoggerI18n().fine("Key: " + stringkey + " value: " + value  + " path " + sequenceFileName.getName());
+          }
+        } catch (Exception e) {
+          assertTrue(e.toString(), false);
+        }
+        iter.close();
+        hoplog.close();
+     }
+    });
+    return entriesToFileMap;
+  }
+ protected SerializableCallable validateEmpty(VM vm0, final int numEntries, final String uniqueName) {
+    SerializableCallable validateEmpty = new SerializableCallable("validateEmpty") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        
+        assertTrue(r.isEmpty());
+        
+        //validate region is empty on peer as well
+        assertFalse(r.entrySet().iterator().hasNext());
+        //Make sure the region is empty
+        for (int i =0; i< numEntries; i++) {
+          assertEquals("failure on key K" + i , null, r.get("K" + i));
+        }
+        
+        return null;
+      }
+    };
+    
+    vm0.invoke(validateEmpty);
+    return validateEmpty;
+  }
+
+  protected void closeCache(VM vm0) {
+    //Restart and validate still empty.
+    SerializableRunnable closeCache = new SerializableRunnable("close cache") {
+      @Override
+      public void run() {
+        getCache().close();
+        disconnectFromDS();
+      }
+    };
+    
+    vm0.invoke(closeCache);
+  }
+
+  protected void verifyDataInHDFS(VM vm0, final String uniqueName, final boolean shouldHaveData,
+      final boolean wait, final boolean waitForQueueToDrain, final int numEntries) {
+        vm0.invoke(new SerializableCallable("check for data in hdfs") {
+          @Override
+          public Object call() throws Exception {
+            
+            HDFSRegionDirector director = HDFSRegionDirector.getInstance();
+            final SortedOplogStatistics stats = director.getHdfsRegionStats("/" + uniqueName);
+            waitForCriterion(new WaitCriterion() {
+              @Override
+              public boolean done() {
+                return stats.getActiveFileCount() > 0 == shouldHaveData;
+              }
+              
+              @Override
+              public String description() {
+                return "Waiting for active file count to be greater than 0: " + stats.getActiveFileCount() + " stats=" + System.identityHashCode(stats);
+              }
+            }, 30000, 100, true);
+            
+            if(waitForQueueToDrain) {
+              PartitionedRegion region = (PartitionedRegion) getCache().getRegion(uniqueName);
+              final AsyncEventQueueStats queueStats = region.getHDFSEventQueueStats();
+              waitForCriterion(new WaitCriterion() {
+                @Override
+                public boolean done() {
+                  return queueStats.getEventQueueSize() <= 0;
+                }
+                
+                @Override
+                public String description() {
+                  return "Waiting for queue stats to reach 0: " + queueStats.getEventQueueSize();
+                }
+              }, 30000, 100, true);
+            }
+            return null;
+          }
+        });
+      }
+
+  protected void doPuts(VM vm0, final String uniqueName, final int numEntries) {
+    // Do some puts
+    vm0.invoke(new SerializableCallable("do puts") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        for (int i =0; i< numEntries; i++)
+          r.put("K" + i, "V"+ i );
+        return null;
+      }
+    });
+  }
+
+  protected void validate(VM vm1, final String uniqueName, final int numEntries) {
+    SerializableCallable validate = new SerializableCallable("validate") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        
+        for (int i =0; i< numEntries; i++) {
+          assertEquals("failure on key K" + i , "V"+ i, r.get("K" + i));
+        }
+        
+        return null;
+      }
+    };
+    vm1.invoke(validate);
+  }
+
+  protected void dumpFiles(VM vm0, final String uniqueName) {
+    vm0.invoke(new SerializableRunnable() {
+  
+      @Override
+      public void run() {
+        HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
+        FileSystem fs;
+        try {
+          fs = hdfsStore.getFileSystem();
+        } catch (IOException e1) {
+          throw new HDFSIOException(e1.getMessage(), e1);
+        }
+        System.err.println("dumping file names in HDFS directory: " + hdfsStore.getHomeDir());
+        try {
+          RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path(hdfsStore.getHomeDir()), true);
+          
+          while(files.hasNext()) {
+            LocatedFileStatus next = files.next();
+            /* MergeGemXDHDFSToGFE - Disabled as I am not pulling in DunitEnv */
+            // System.err.println(DUnitEnv.get().getPid() + " - " + next.getPath());
+            System.err.println(" - " + next.getPath());
+          }
+        } catch (FileNotFoundException e) {
+          // TODO Auto-generated catch block
+          e.printStackTrace();
+        } catch (IOException e) {
+          // TODO Auto-generated catch block
+          e.printStackTrace();
+        }
+        
+      }
+      
+    });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
new file mode 100644
index 0000000..26c7094
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.experimental.categories.Category;
+
+import junit.framework.TestCase;
+
+import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class SignalledFlushObserverJUnitTest extends TestCase {
+  private AtomicInteger events;
+  private AtomicInteger delivered;
+  
+  private SignalledFlushObserver sfo;
+  
+  public void testEmpty() throws InterruptedException {
+    assertFalse(sfo.shouldDrainImmediately());
+    assertTrue(sfo.flush().waitForFlush(0, TimeUnit.NANOSECONDS));
+    assertFalse(sfo.shouldDrainImmediately());
+  }
+  
+  public void testSingle() throws InterruptedException {
+    sfo.push();
+    AsyncFlushResult result = sfo.flush();
+
+    assertTrue(sfo.shouldDrainImmediately());
+    sfo.pop(1);
+    
+    assertTrue(result.waitForFlush(0, TimeUnit.MILLISECONDS));
+    assertFalse(sfo.shouldDrainImmediately());
+  }
+
+  public void testDouble() throws InterruptedException {
+    sfo.push();
+    sfo.push();
+
+    AsyncFlushResult result = sfo.flush();
+    assertTrue(sfo.shouldDrainImmediately());
+
+    sfo.pop(1);
+    assertFalse(result.waitForFlush(0, TimeUnit.MILLISECONDS));
+
+    sfo.pop(1);
+    assertTrue(result.waitForFlush(0, TimeUnit.MILLISECONDS));
+    assertFalse(sfo.shouldDrainImmediately());
+  }
+
+  public void testTimeout() throws InterruptedException {
+    sfo.push();
+    AsyncFlushResult result = sfo.flush();
+
+    assertTrue(sfo.shouldDrainImmediately());
+    assertFalse(result.waitForFlush(100, TimeUnit.MILLISECONDS));
+    sfo.pop(1);
+    
+    assertTrue(result.waitForFlush(0, TimeUnit.MILLISECONDS));
+    assertFalse(sfo.shouldDrainImmediately());
+  }
+  
+  @Override
+  protected void setUp() {
+    events = new AtomicInteger(0);
+    delivered = new AtomicInteger(0);
+    sfo = new SignalledFlushObserver();
+    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
+  }
+  
+  private int push() {
+    return events.incrementAndGet();
+  }
+  
+  private int pop() {
+    return delivered.incrementAndGet();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
new file mode 100644
index 0000000..8a7fb34
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
@@ -0,0 +1,565 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ConcurrentSkipListSet;
+
+import org.junit.experimental.categories.Category;
+
+import junit.framework.TestCase;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.asyncqueue.internal.ParallelAsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.KeyToSeqNumObject;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.MultiRegionSortedQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue.SortedEventQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
+import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
+import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * A test class for testing whether the functionalities of sorted Aysync Queue.
+ * 
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class SortedListForAsyncQueueJUnitTest extends TestCase {
+  
+  public SortedListForAsyncQueueJUnitTest() {
+    super();
+  }
+
+  private GemFireCacheImpl c;
+
+  @Override
+  public void setUp() {
+    
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
+ // make it a loner
+    this.c = createCache();
+    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
+  }
+
+  protected GemFireCacheImpl createCache() {
+    return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").set("log-level", "warning")
+        .create();
+  }
+
+  @Override
+  public void tearDown() {
+    this.c.close();
+  }
+  
+  public void testHopQueueWithOneBucket() throws Exception {
+    this.c.close();
+    this.c = createCache();
+    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    paf.setTotalNumBuckets(1);
+    
+    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
+    PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
+    r1.put("K9", "x1");
+    r1.put("K8", "x2");
+    // hack to get the queue. 
+    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
+    HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
+    
+    EntryEventImpl ev1 = EntryEventImpl.create((LocalRegion)r1, Operation.CREATE,
+        (Object)"K1", (Object)"V1", null,
+        false, (DistributedMember)c.getMyId());
+    // put some keys with multiple updates.
+    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2) );
+    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8) );
+    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7) );
+    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3) );
+    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6) );
+    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9) );
+    
+    assertTrue(" skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 6);
+    
+    
+    // peek a key. it should be the lowesy
+    Object[] l = hopqueue.peek(1, 0).toArray();
+    
+    assertTrue("First key should be K1 but is " + ((HDFSGatewayEventImpl)l[0]).getKey(), ((HDFSGatewayEventImpl)l[0]).getKey().equals("K1"));
+    assertTrue(" Peeked skip list size should be  0 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
+    assertTrue(" skip list size should be  6 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
+    
+    // try to fetch the key. it would be in peeked skip list but still available
+    Object o = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);
+    assertTrue("First key should be K1", ((HDFSGatewayEventImpl)o).getKey().equals("K1"));
+    
+    assertTrue(" skip lists size should be  6"  , ( getSortedEventQueue(hdfsBQ).getPeeked().size() + getSortedEventQueue(hdfsBQ).currentSkipList.size() ) == 6);
+    
+    o = hopqueue.get(r1, CacheServerHelper.serialize("K2"), 0);
+    Object v = ((HDFSGatewayEventImpl)o).getDeserializedValue();
+    assertTrue(" key should K2 with value V2a but the value was " + v , ((String)v).equals("V2a"));
+    
+    o = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);
+    v = ((HDFSGatewayEventImpl)o).getDeserializedValue();
+    assertTrue(" key should K3 with value V3b but the value was " + v , ((String)v).equals("V3b"));
+  }
+
+  protected SortedEventQueue getSortedEventQueue(HDFSBucketRegionQueue hdfsBQ) {
+    MultiRegionSortedQueue multiQueue = (MultiRegionSortedQueue)(hdfsBQ.hdfsEventQueue);
+    return multiQueue.regionToEventQueue.values().iterator().next();
+  }
+  
+  public void testPeekABatch() throws Exception {
+    this.c.close();
+    this.c = createCache();
+    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    paf.setTotalNumBuckets(1);
+    
+    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
+    PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
+    r1.put("K9", "x1");
+    r1.put("K8", "x2");
+    // hack to get the queue. 
+    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
+    HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
+    
+    
+    // put some keys with multiple updates.
+    hopqueue.put(getNewEvent("K2", "V2", r1, 0, 2) );
+    hopqueue.put(getNewEvent("K3", "V3a", r1, 0, 8) );
+    hopqueue.put(getNewEvent("K3", "V3", r1, 0, 7) );
+    hopqueue.put(getNewEvent("K1", "V1", r1, 0, 3) );
+    hopqueue.put(getNewEvent("K2", "V2a", r1, 0, 6) );
+    hopqueue.put(getNewEvent("K3", "V3b", r1, 0, 9) );
+    
+    getSortedEventQueue(hdfsBQ).rollover(true);
+    
+    hopqueue.put(getNewEvent("K1", "V12", r1, 0, 11) );
+    hopqueue.put(getNewEvent("K5", "V3a", r1, 0, 12) );
+    hopqueue.put(getNewEvent("K5", "V3b", r1, 0, 13) );
+    
+    assertTrue(" skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(), getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
+    assertTrue(" skip list size should be  6 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(), getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 6);
+    
+    Object o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);;
+    Object o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);;
+    Object v1 = ((HDFSGatewayEventImpl)o1).getDeserializedValue();
+    Object v2 = ((HDFSGatewayEventImpl)o2).getDeserializedValue();
+    assertTrue(" key should K3 with value V3b but the value was " + v1 , ((String)v1).equals("V3b"));
+    assertTrue(" key should K1 with value V12 but the value was " + v2 , ((String)v2).equals("V12"));
+    
+    
+    ArrayList a = hdfsBQ.peekABatch();
+    assertTrue("First key should be K1 but is " + ((HDFSGatewayEventImpl)a.get(0)).getKey(), ((HDFSGatewayEventImpl)a.get(0)).getKey().equals("K1"));
+    assertTrue("Second key should be K2 but is " + ((HDFSGatewayEventImpl)a.get(1)).getKey(), ((HDFSGatewayEventImpl)a.get(1)).getKey().equals("K2"));
+    assertTrue("Third key should be K2 but is " + ((HDFSGatewayEventImpl)a.get(2)).getKey(), ((HDFSGatewayEventImpl)a.get(2)).getKey().equals("K2"));
+    
+    
+    assertTrue(" Peeked skip list size should be 6 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 6);
+    assertTrue(" queueOfLists size should be  2 ", getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
+    
+    assertTrue(" skip list size should be  3 ", getSortedEventQueue(hdfsBQ).currentSkipList.size() == 3);
+    
+    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);;
+    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);;
+    v1 = ((HDFSGatewayEventImpl)o1).getDeserializedValue();
+    v2 = ((HDFSGatewayEventImpl)o2).getDeserializedValue();
+    assertTrue(" key should K3 with value V3b but the value was " + v1 , ((String)v1).equals("V3b"));
+    assertTrue(" key should K1 with value V12 but the value was " + v2 , ((String)v2).equals("V12"));
+    
+    
+    java.util.Iterator<KeyToSeqNumObject> iter1 = getSortedEventQueue(hdfsBQ).getPeeked().iterator();
+    assertTrue("key in peeked list should be 3 ", iter1.next().getSeqNum() == 3);
+    assertTrue("key in peeked list should be 6 ", iter1.next().getSeqNum() == 6);
+    assertTrue("key in peeked list should be 2 ", iter1.next().getSeqNum() == 2);
+    assertTrue("key in peeked list should be 9 ", iter1.next().getSeqNum() == 9);
+    assertTrue("key in peeked list should be 8 ", iter1.next().getSeqNum() == 8);
+    assertTrue("key in peeked list should be 7 ", iter1.next().getSeqNum() == 7);
+    assertTrue(" Peeked list should not have any more elements. ", iter1.hasNext() == false);
+    
+    
+    java.util.Iterator<KeyToSeqNumObject> iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
+    assertTrue("key in peeked list should be 11", iter2.next().getSeqNum() == 11);
+    assertTrue("key in peeked list should be 13", iter2.next().getSeqNum() == 13);
+    assertTrue("key in peeked list should be 12 ", iter2.next().getSeqNum() == 12);
+    
+    iter2 = getSortedEventQueue(hdfsBQ).currentSkipList.iterator();
+    HashSet<Long> hs = new HashSet<Long>();
+    hs.add((long) 11);
+    hs.add((long) 13);
+    hs.add((long) 12);
+    hs.add((long) 3);
+    hs.add((long) 6);
+    hs.add((long) 2);
+    hs.add((long) 9);
+    hs.add((long) 8);
+    hs.add((long) 7);
+    
+    hdfsBQ.hdfsEventQueue.handleRemainingElements(hs);
+    
+    ArrayList a1 = hdfsBQ.peekABatch();
+    o1 = hopqueue.get(r1, CacheServerHelper.serialize("K3"), 0);;
+    o2 = hopqueue.get(r1, CacheServerHelper.serialize("K1"), 0);;
+    v2 = ((HDFSGatewayEventImpl)o2).getDeserializedValue();
+    assertTrue(" key should K3 should not have been found ",  o1 ==null);
+    assertTrue(" key should K1 with value V12 but the value was " + v2 , ((String)v2).equals("V12"));
+    
+    assertTrue("First key should be K1 but is " + ((HDFSGatewayEventImpl)a1.get(0)).getKey(), ((HDFSGatewayEventImpl)a1.get(0)).getKey().equals("K1"));
+    assertTrue("Second key should be K5 but is " + ((HDFSGatewayEventImpl)a1.get(1)).getKey(), ((HDFSGatewayEventImpl)a1.get(1)).getKey().equals("K5"));
+    assertTrue("Third key should be K5 but is " + ((HDFSGatewayEventImpl)a1.get(2)).getKey(), ((HDFSGatewayEventImpl)a1.get(2)).getKey().equals("K5"));
+    
+    assertTrue(" Peeked skip list size should be  3 ", getSortedEventQueue(hdfsBQ).getPeeked().size() == 3);
+    assertTrue(" skip list size should be  0 but is " + getSortedEventQueue(hdfsBQ).currentSkipList.size(), getSortedEventQueue(hdfsBQ).currentSkipList.size() == 0);
+    assertTrue(" skip list size should be  3 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.peek().size(), getSortedEventQueue(hdfsBQ).queueOfLists.peek().size() == 3);
+    assertTrue(" skip list size should be  2 but is " + getSortedEventQueue(hdfsBQ).queueOfLists.size(), getSortedEventQueue(hdfsBQ).queueOfLists.size() == 2);
+    
+  }
+  
+  private HDFSGatewayEventImpl getNewEvent(Object key, Object value, Region r1, int bid, int tailKey) throws Exception {
+    EntryEventImpl ev1 = EntryEventImpl.create((LocalRegion)r1, Operation.CREATE,
+        key, value, null,
+        false, (DistributedMember)c.getMyId());
+    ev1.setEventId(new EventID(this.c.getDistributedSystem()));
+    HDFSGatewayEventImpl event = null;
+    event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, ev1, null , true, bid);
+    event.setShadowKey((long)tailKey);
+    return event;
+  }
+  
+  /**
+   * Creates the HDFS Queue instance for a region (this skips the creation of 
+   * event processor)
+   */
+  private HDFSParallelGatewaySenderQueue getHDFSQueue(Region region, Cache c) {
+    GatewaySenderAttributes gattrs = new GatewaySenderAttributes();
+    gattrs.isHDFSQueue = true;
+    gattrs.id = "SortedListForAsyncQueueJUnitTest_test";
+    ParallelAsyncEventQueueImpl gatewaySender = new ParallelAsyncEventQueueImpl(c, gattrs);
+    HashSet<Region> set = new HashSet<Region>();
+    set.add(region);
+    HDFSParallelGatewaySenderQueue queue = new HDFSParallelGatewaySenderQueue(gatewaySender, set, 0, 1);
+    queue.start();
+    return queue;
+  }
+  
+ // A test for testing whether the KeyToSeqNumObject compare function is in order.
+  public void testIfTheKeyToSeqNumIsKeptSortedWithoutConflation() throws Exception {
+    byte[] k1 = new byte[] { 1};
+    byte[] k2 = new byte[] { 2};
+    byte[] k3 = new byte[] { 3};
+    byte[] k4 = new byte[] { 4};
+    
+    KeyToSeqNumObject keyToSeq1 = new KeyToSeqNumObject(k1, new Long(2));
+    KeyToSeqNumObject keyToSeq2 = new KeyToSeqNumObject(k1, new Long(5));
+    KeyToSeqNumObject keyToSeq3 = new KeyToSeqNumObject(k1, new Long(8));
+    KeyToSeqNumObject keyToSeq4 = new KeyToSeqNumObject(k2, new Long(3));
+    KeyToSeqNumObject keyToSeq5 = new KeyToSeqNumObject(k2, new Long(7));
+    
+    ConcurrentSkipListSet<KeyToSeqNumObject> list = new ConcurrentSkipListSet<HDFSBucketRegionQueue.KeyToSeqNumObject>();
+    list.add(keyToSeq4);
+    list.add(keyToSeq3);
+    list.add(keyToSeq5);
+    list.add(keyToSeq1);
+    list.add(keyToSeq2);
+    list.add(keyToSeq5);
+    KeyToSeqNumObject k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq3));
+    list.remove(k);
+    
+    k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq2));
+    list.remove(k);
+    
+    k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq1));
+    list.remove(k);
+    
+    list.add(keyToSeq4);
+    list.add(keyToSeq3);
+    list.add(keyToSeq5);
+    list.add(keyToSeq1);
+    k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq3));
+    list.remove(k);
+    
+    k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq1));
+    list.remove(k);
+    
+    k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq5));
+    list.remove(k);
+    
+    k = list.pollFirst();
+    this.c.getLoggerI18n().fine(" KeyToSeqNumObject  byte: " + k.getRegionkey()[0] + " seq num: " + k.getSeqNum());
+    assertTrue ("Order of elements in Concurrent list is not correct ", k.equals(keyToSeq4));
+    
+    list.remove(k);
+  }
+  
+  public void testSingleGet() throws Exception {
+    checkQueueGet("K1", new KeyValue("K1", "V1"), "K1-V1");
+  }
+  
+  public void testMissingGet() throws Exception {
+    checkQueueGet("K1", null, 
+        "K0-V0",
+        "K2-V2");
+  }
+
+  public void testMultipleGet() throws Exception {
+    checkQueueGet("K1", new KeyValue("K1", "V1"), 
+        "K0-V0",
+        "K1-V1",
+        "K2-V2");
+  }
+
+  public void testDuplicateGet() throws Exception {
+    checkQueueGet("K1", new KeyValue("K1", "V1.4"), 
+        "K0-V0",
+        "K1-V1.0",
+        "K1-V1.1",
+        "K1-V1.2",
+        "K1-V1.3",
+        "K1-V1.4",
+        "K2-V2");
+  }
+
+  public void testEmptyIterator() throws Exception {
+    checkQueueIteration(Collections.<KeyValue>emptyList());
+  }
+  
+  public void testSingleIterator() throws Exception {
+    checkQueueIteration(getExpected(), 
+        "K0-V0",
+        "K1-V1",
+        "K2-V2",
+        "K3-V3",
+        "K4-V4",
+        "K5-V5",
+        "K6-V6",
+        "K7-V7",
+        "K8-V8",
+        "K9-V9"
+        );
+  }
+
+  public void testMultipleIterator() throws Exception {
+    checkQueueIteration(getExpected(), 
+        "K0-V0",
+        "K1-V1",
+        "K2-V2",
+        "roll",
+        "K3-V3",
+        "K4-V4",
+        "K5-V5",
+        "K6-V6",
+        "roll",
+        "K7-V7",
+        "K8-V8",
+        "K9-V9"
+        );
+  }
+
+  public void testMixedUpIterator() throws Exception {
+    checkQueueIteration(getExpected(), 
+        "K0-V0",
+        "K5-V5",
+        "K9-V9",
+        "roll",
+        "K3-V3",
+        "K2-V2",
+        "K6-V6",
+        "roll",
+        "K4-V4",
+        "K7-V7",
+        "K8-V8",
+        "K1-V1"
+        );
+  }
+
+  public void testMixedUpIterator2() throws Exception {
+    List<KeyValue> expected = new ArrayList<KeyValue>();
+    expected.add(new KeyValue("K0", "V0"));
+    expected.add(new KeyValue("K1", "V1.2"));
+    expected.add(new KeyValue("K2", "V2.1"));
+    expected.add(new KeyValue("K3", "V3.1"));
+    expected.add(new KeyValue("K4", "V4.2"));
+    expected.add(new KeyValue("K5", "V5.2"));
+    expected.add(new KeyValue("K6", "V6"));
+    expected.add(new KeyValue("K7", "V7"));
+    expected.add(new KeyValue("K8", "V8"));
+    expected.add(new KeyValue("K9", "V9"));
+    
+    checkQueueIteration(expected, 
+        "K1-V1.0",
+        "K2-V2.0",
+        "K3-V3.0",
+        "K4-V4.0",
+        "roll",
+        "K2-V2.1",
+        "K4-V4.1",
+        "K6-V6",
+        "K8-V8",
+        "roll",
+        "K1-V1.1",
+        "K3-V3.1",
+        "K5-V5.0",
+        "K7-V7",
+        "K9-V9",
+        "roll",
+        "K0-V0",
+        "K1-V1.2",
+        "K4-V4.2",
+        "K5-V5.1",
+        "K5-V5.2"
+        );
+  }
+
+  private List<KeyValue> getExpected() {
+    List<KeyValue> expected = new ArrayList<KeyValue>();
+    expected.add(new KeyValue("K0", "V0"));
+    expected.add(new KeyValue("K1", "V1"));
+    expected.add(new KeyValue("K2", "V2"));
+    expected.add(new KeyValue("K3", "V3"));
+    expected.add(new KeyValue("K4", "V4"));
+    expected.add(new KeyValue("K5", "V5"));
+    expected.add(new KeyValue("K6", "V6"));
+    expected.add(new KeyValue("K7", "V7"));
+    expected.add(new KeyValue("K8", "V8"));
+    expected.add(new KeyValue("K9", "V9"));
+    
+    return expected;
+  }
+  
+  private void checkQueueGet(String key, KeyValue expected, String... entries) throws Exception {
+    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    paf.setTotalNumBuckets(1);
+    
+    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
+    PartitionedRegion r1 = (PartitionedRegion) rf1.setPartitionAttributes(paf.create()).create("r1");
+
+    // create the buckets
+    r1.put("blah", "blah");
+
+    // hack to get the queue. 
+    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
+    HDFSBucketRegionQueue brq = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
+
+    
+    int seq = 0;
+    for (String s : entries) {
+      if (s.equals("roll")) {
+        brq.rolloverSkipList();
+      } else {
+        String[] kv = s.split("-");
+        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
+      }
+    }
+
+    byte[] bkey = EntryEventImpl.serialize(key);
+    HDFSGatewayEventImpl evt = hopqueue.get(r1, bkey, 0);
+    if (expected == null) {
+      assertNull(evt);
+      
+    } else {
+      assertEquals(expected.key, evt.getKey());
+      assertEquals(expected.value, evt.getDeserializedValue());
+    }
+  }
+  
+  private void checkQueueIteration(List<KeyValue> expected, String... entries) throws Exception {
+    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    paf.setTotalNumBuckets(1);
+    
+    RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
+    Region r1 = rf1.setPartitionAttributes(paf.create()).create("r1");
+
+    // create the buckets
+    r1.put("blah", "blah");
+
+    HDFSParallelGatewaySenderQueue hopqueue = getHDFSQueue(r1, this.c);
+    HDFSBucketRegionQueue brq = (HDFSBucketRegionQueue)((PartitionedRegion)hopqueue.getRegion()).getDataStore().getLocalBucketById(0);
+    
+    int seq = 0;
+    for (String s : entries) {
+      if (s.equals("roll")) {
+        brq.rolloverSkipList();
+      } else {
+        String[] kv = s.split("-");
+        hopqueue.put(getNewEvent(kv[0], kv[1], r1, 0, seq++));
+        getSortedEventQueue(brq).rollover(true);
+      }
+    }
+    
+    Iterator<HDFSGatewayEventImpl> iter = brq.iterator(r1);
+    List<KeyValue> actual = new ArrayList<KeyValue>();
+    while (iter.hasNext()) {
+      HDFSGatewayEventImpl evt = iter.next();
+      actual.add(new KeyValue((String) evt.getKey(), (String) evt.getDeserializedValue()));
+    }
+    
+    assertEquals(expected, actual);
+  }
+  
+  public static class KeyValue {
+    public final String key;
+    public final String value;
+    
+    public KeyValue(String key, String value) {
+      this.key = key;
+      this.value = value;
+    }
+    
+    @Override
+    public boolean equals(Object o) {
+      if (o == null)
+        return false;
+
+      KeyValue obj = (KeyValue) o;
+      return key.equals(obj.key) && value.equals(obj.value);
+    }
+    
+    @Override
+    public String toString() {
+      return key + "=" + value;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
new file mode 100644
index 0000000..c8c15d5
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
@@ -0,0 +1,394 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeMap;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.SerializedCacheValue;
+import com.gemstone.gemfire.cache.TransactionId;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.FileSystemFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.cache.versions.DiskVersionTag;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+
+import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+
+public abstract class BaseHoplogTestCase extends TestCase {
+  public static final String HDFS_STORE_NAME = "hdfs";
+  public static final Random rand = new Random(System.currentTimeMillis());
+  protected Path testDataDir;
+  protected Cache cache;
+  
+  protected HDFSRegionDirector director; 
+  protected HdfsRegionManager regionManager;
+  protected HDFSStoreFactory hsf;
+  protected HDFSStoreImpl hdfsStore;
+  protected RegionFactory<Object, Object> regionfactory;
+  protected Region<Object, Object> region;
+  protected SortedOplogStatistics stats;
+  protected HFileStoreStatistics storeStats;
+  protected BlockCache blockCache;
+  
+  Set<IgnoredException> exceptions = new HashSet<IgnoredException>();
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
+    
+    //This is logged by HDFS when it is stopped.
+    exceptions.add(IgnoredException.addIgnoredException("sleep interrupted"));
+    exceptions.add(IgnoredException.addIgnoredException("java.io.InterruptedIOException"));
+    
+    testDataDir = new Path("test-case");
+
+    cache = createCache();
+    
+    configureHdfsStoreFactory();
+    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
+
+    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+    region = regionfactory.create(getName());
+    
+    // disable compaction by default and clear existing queues
+    HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
+    compactionManager.reset();
+    
+    director = HDFSRegionDirector.getInstance();
+    director.setCache(cache);
+    regionManager = ((LocalRegion)region).getHdfsRegionManager();
+    stats = director.getHdfsRegionStats("/" + getName());
+    storeStats = hdfsStore.getStats();
+    blockCache = hdfsStore.getBlockCache();
+    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
+  }
+
+  protected void configureHdfsStoreFactory() throws Exception {
+    hsf = this.cache.createHDFSStoreFactory();
+    hsf.setHomeDir(testDataDir.toString());
+    hsf.setMinorCompaction(false);
+    hsf.setMajorCompaction(false);
+  }
+
+  protected Cache createCache() {
+    CacheFactory cf = new CacheFactory().set("mcast-port", "0")
+        .set("log-level", "info")
+        ;
+    cache = cf.create();
+    return cache;
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+    if (region != null) {
+      region.destroyRegion();
+    }
+    
+    if (hdfsStore != null) {
+      hdfsStore.getFileSystem().delete(testDataDir, true);
+      hdfsStore.destroy();
+    }
+    
+    if (cache != null) {
+      cache.close();
+    }
+    super.tearDown();
+    for (IgnoredException ex: exceptions) {
+      ex.remove();
+    }
+  }
+
+  /**
+   * creates a hoplog file with numKeys records. Keys follow key-X pattern and values follow value-X
+   * pattern where X=0 to X is = numKeys -1
+   * 
+   * @return the sorted map of inserted KVs
+   */
+  protected TreeMap<String, String> createHoplog(int numKeys, Hoplog oplog) throws IOException {
+    int offset = (numKeys > 10 ? 100000 : 0);
+    
+    HoplogWriter writer = oplog.createWriter(numKeys);
+    TreeMap<String, String> map = new TreeMap<String, String>();
+    for (int i = offset; i < (numKeys + offset); i++) {
+      String key = ("key-" + i);
+      String value = ("value-" + System.nanoTime());
+      writer.append(key.getBytes(), value.getBytes());
+      map.put(key, value);
+    }
+    writer.close();
+    return map;
+  }
+  
+  protected FileStatus[] getBucketHoplogs(String regionAndBucket, final String type)
+      throws IOException {
+    return getBucketHoplogs(hdfsStore.getFileSystem(), regionAndBucket, type);
+  }
+  
+  protected FileStatus[] getBucketHoplogs(FileSystem fs, String regionAndBucket, final String type)
+      throws IOException {
+    FileStatus[] hoplogs = fs.listStatus(
+        new Path(testDataDir, regionAndBucket), new PathFilter() {
+          @Override
+          public boolean accept(Path file) {
+            return file.getName().endsWith(type);
+          }
+        });
+    return hoplogs;
+  }
+
+  protected String getRandomHoplogName() {
+    String hoplogName = "hoplog-" + System.nanoTime() + "-" + rand.nextInt(10000) + ".hop";
+    return hoplogName;
+  }
+  
+//  public static MiniDFSCluster initMiniCluster(int port, int numDN) throws Exception {
+//    HashMap<String, String> map = new HashMap<String, String>();
+//    map.put(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
+//    return initMiniCluster(port, numDN, map);
+//  }
+//
+//  public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
+//    System.setProperty("test.build.data", "hdfs-test-cluster");
+//    Configuration hconf = new HdfsConfiguration();
+//    for (Entry<String, String> entry : map.entrySet()) {
+//      hconf.set(entry.getKey(), entry.getValue());
+//    }
+//
+//    hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
+//    
+//    Builder builder = new MiniDFSCluster.Builder(hconf);
+//    builder.numDataNodes(numDN);
+//    builder.nameNodePort(port);
+//    MiniDFSCluster cluster = builder.build();
+//    return cluster;
+//  }
+
+  public static void setConfigFile(HDFSStoreFactory factory, File configFile, String config)
+      throws Exception {
+    BufferedWriter bw = new BufferedWriter(new FileWriter(configFile));
+    bw.write(config);
+    bw.close();
+    factory.setHDFSClientConfigFile(configFile.getName());
+  }
+  
+  public static void alterMajorCompaction(HDFSStoreImpl store, boolean enable) {
+    HDFSStoreMutator mutator = store.createHdfsStoreMutator();
+    mutator.setMajorCompaction(enable);
+    store.alter(mutator);
+  }
+  
+  public static void alterMinorCompaction(HDFSStoreImpl store, boolean enable) {
+    HDFSStoreMutator mutator = store.createHdfsStoreMutator();
+    mutator.setMinorCompaction(enable);
+    store.alter(mutator);
+  }
+  
+  public void deleteMiniClusterDir() throws Exception {
+    File clusterDir = new File("hdfs-test-cluster");
+    if (clusterDir.exists()) {
+      FileUtils.deleteDirectory(clusterDir);
+    }
+  }
+  
+  public static class TestEvent extends SortedHDFSQueuePersistedEvent {
+    Object key;
+    
+    public TestEvent(String k, String v) throws Exception {
+      this(k, v, Operation.PUT_IF_ABSENT);
+    }
+
+    public TestEvent(String k, String v, Operation op) throws Exception {
+      super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
+      this.key = k; 
+    }
+
+    public Object getKey() {
+      return key;
+      
+    }
+
+    public Object getNewValue() {
+      return valueObject;
+    }
+
+    public Operation getOperation() {
+      return op;
+    }
+    
+    public Region<Object, Object> getRegion() {
+      return null;
+    }
+
+    public Object getCallbackArgument() {
+      return null;
+    }
+
+    public boolean isCallbackArgumentAvailable() {
+      return false;
+    }
+
+    public boolean isOriginRemote() {
+      return false;
+    }
+
+    public DistributedMember getDistributedMember() {
+      return null;
+    }
+
+    public boolean isExpiration() {
+      return false;
+    }
+
+    public boolean isDistributed() {
+      return false;
+    }
+
+    public Object getOldValue() {
+      return null;
+    }
+
+    public SerializedCacheValue<Object> getSerializedOldValue() {
+      return null;
+    }
+
+    public SerializedCacheValue<Object> getSerializedNewValue() {
+      return null;
+    }
+
+    public boolean isLocalLoad() {
+      return false;
+    }
+
+    public boolean isNetLoad() {
+      return false;
+    }
+
+    public boolean isLoad() {
+      return false;
+    }
+
+    public boolean isNetSearch() {
+      return false;
+    }
+
+    public TransactionId getTransactionId() {
+      return null;
+    }
+
+    public boolean isBridgeEvent() {
+      return false;
+    }
+
+    public boolean hasClientOrigin() {
+      return false;
+    }
+
+    public boolean isOldValueAvailable() {
+      return false;
+    }
+  }
+  
+  public abstract class AbstractCompactor implements Compactor {
+    @Override
+    public HDFSStore getHdfsStore() {
+      return hdfsStore;
+    }
+
+    public void suspend() {
+    }
+
+    public void resume() {
+    }
+
+    public boolean isBusy(boolean isMajor) {
+      return false;
+    }
+  }
+  
+  public HDFSStoreFactoryImpl getCloseableLocalHdfsStoreFactory() {
+    final FileSystemFactory fsFactory = new FileSystemFactory() {
+      // by default local FS instance is not disabled by close. Hence this
+      // customization
+      class CustomFileSystem extends LocalFileSystem {
+        boolean isClosed = false;
+
+        public void close() throws IOException {
+          isClosed = true;
+          super.close();
+        }
+
+        public FileStatus getFileStatus(Path f) throws IOException {
+          if (isClosed) {
+            throw new IOException();
+          }
+          return super.getFileStatus(f);
+        }
+      }
+
+      public FileSystem create(URI namenode, Configuration conf, boolean forceNew) throws IOException {
+        CustomFileSystem fs = new CustomFileSystem();
+        fs.initialize(namenode, conf);
+        return fs;
+      }
+    };
+
+    HDFSStoreFactoryImpl storeFactory = new HDFSStoreFactoryImpl(cache) {
+      public HDFSStore create(String name) {
+        return new HDFSStoreImpl(name, this.configHolder) {
+          public FileSystemFactory getFileSystemFactory() {
+            return fsFactory;
+          }
+        };
+      }
+    };
+    return storeFactory;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
new file mode 100644
index 0000000..db050b3
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
@@ -0,0 +1,188 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class CardinalityEstimatorJUnitTest extends BaseHoplogTestCase {
+
+  public void testSingleHoplogCardinality() throws Exception {
+    int count = 10;
+    int bucketId = (int) System.nanoTime();
+    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    // flush and create hoplog
+    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
+    for (int i = 0; i < count; i++) {
+      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
+    }
+    // assert that size is 0 before flush begins
+    assertEquals(0, organizer.sizeEstimate());
+    organizer.flush(items.iterator(), count);
+
+    assertEquals(count, organizer.sizeEstimate());
+    assertEquals(0, stats.getActiveReaderCount());
+    
+    organizer.close();
+    organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+    assertEquals(count, organizer.sizeEstimate());
+    assertEquals(1, stats.getActiveReaderCount());
+  }
+
+  public void testSingleHoplogCardinalityWithDuplicates() throws Exception {
+    int bucketId = (int) System.nanoTime();
+    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    List<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent("key-0", "value-0"));
+    items.add(new TestEvent("key-0", "value-0"));
+    items.add(new TestEvent("key-1", "value-1"));
+    items.add(new TestEvent("key-2", "value-2"));
+    items.add(new TestEvent("key-3", "value-3"));
+    items.add(new TestEvent("key-3", "value-3"));
+    items.add(new TestEvent("key-4", "value-4"));
+
+    organizer.flush(items.iterator(), 7);
+    assertEquals(5, organizer.sizeEstimate());
+  }
+
+  public void testMultipleHoplogCardinality() throws Exception {
+    int bucketId = (int) System.nanoTime();
+    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    List<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent("key-0", "value-0"));
+    items.add(new TestEvent("key-1", "value-1"));
+    items.add(new TestEvent("key-2", "value-2"));
+    items.add(new TestEvent("key-3", "value-3"));
+    items.add(new TestEvent("key-4", "value-4"));
+
+    organizer.flush(items.iterator(), 5);
+    assertEquals(5, organizer.sizeEstimate());
+
+    items.clear();
+    items.add(new TestEvent("key-1", "value-0"));
+    items.add(new TestEvent("key-5", "value-5"));
+    items.add(new TestEvent("key-6", "value-6"));
+    items.add(new TestEvent("key-7", "value-7"));
+    items.add(new TestEvent("key-8", "value-8"));
+    items.add(new TestEvent("key-9", "value-9"));
+
+    organizer.flush(items.iterator(), 6);
+    assertEquals(10, organizer.sizeEstimate());
+  }
+
+  public void testCardinalityAfterRestart() throws Exception {
+    int bucketId = (int) System.nanoTime();
+    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    List<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent("key-0", "value-0"));
+    items.add(new TestEvent("key-1", "value-1"));
+    items.add(new TestEvent("key-2", "value-2"));
+    items.add(new TestEvent("key-3", "value-3"));
+    items.add(new TestEvent("key-4", "value-4"));
+
+    assertEquals(0, organizer.sizeEstimate());
+    organizer.flush(items.iterator(), 5);
+    assertEquals(5, organizer.sizeEstimate());
+
+    // restart
+    organizer.close();
+    organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+    assertEquals(5, organizer.sizeEstimate());
+    
+    items.clear();
+    items.add(new TestEvent("key-1", "value-0"));
+    items.add(new TestEvent("key-5", "value-5"));
+    items.add(new TestEvent("key-6", "value-6"));
+    items.add(new TestEvent("key-7", "value-7"));
+    items.add(new TestEvent("key-8", "value-8"));
+    items.add(new TestEvent("key-9", "value-9"));
+
+    organizer.flush(items.iterator(), 6);
+    assertEquals(10, organizer.sizeEstimate());
+
+    // restart - make sure that HLL from the youngest file is read
+    organizer.close();
+    organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+    assertEquals(10, organizer.sizeEstimate());
+    
+    items.clear();
+    items.add(new TestEvent("key-1", "value-1"));
+    items.add(new TestEvent("key-5", "value-5"));
+    items.add(new TestEvent("key-10", "value-10"));
+    items.add(new TestEvent("key-11", "value-11"));
+    items.add(new TestEvent("key-12", "value-12"));
+    items.add(new TestEvent("key-13", "value-13"));
+    items.add(new TestEvent("key-14", "value-14"));
+
+    organizer.flush(items.iterator(), 7);
+    assertEquals(15, organizer.sizeEstimate());
+  }
+
+  public void testCardinalityAfterMajorCompaction() throws Exception {
+    doCardinalityAfterCompactionWork(true);
+  }
+
+  public void testCardinalityAfterMinorCompaction() throws Exception {
+    doCardinalityAfterCompactionWork(false);
+  }
+
+  private void doCardinalityAfterCompactionWork(boolean isMajor) throws Exception {
+    int bucketId = (int) System.nanoTime();
+    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
+
+    List<TestEvent> items = new ArrayList<TestEvent>();
+    items.add(new TestEvent("key-0", "value-0"));
+    items.add(new TestEvent("key-1", "value-1"));
+    items.add(new TestEvent("key-2", "value-2"));
+    items.add(new TestEvent("key-3", "value-3"));
+    items.add(new TestEvent("key-4", "value-4"));
+
+    organizer.flush(items.iterator(), 5);
+    assertEquals(5, organizer.sizeEstimate());
+
+    items.clear();
+    items.add(new TestEvent("key-0", "value-0"));
+    items.add(new TestEvent("key-1", "value-5", Operation.DESTROY));
+    items.add(new TestEvent("key-2", "value-6", Operation.INVALIDATE));
+    items.add(new TestEvent("key-5", "value-5"));
+
+    organizer.flush(items.iterator(), 4);
+    assertEquals(6, organizer.sizeEstimate());
+
+    items.clear();
+    items.add(new TestEvent("key-3", "value-5", Operation.DESTROY));
+    items.add(new TestEvent("key-4", "value-6", Operation.INVALIDATE));
+    items.add(new TestEvent("key-5", "value-0"));
+    items.add(new TestEvent("key-6", "value-5"));
+
+    organizer.flush(items.iterator(), 4);
+    
+    items.add(new TestEvent("key-5", "value-0"));
+    items.add(new TestEvent("key-6", "value-5"));
+    
+    items.clear();
+    organizer.flush(items.iterator(), items.size());
+    assertEquals(7, organizer.sizeEstimate());
+
+    organizer.getCompactor().compact(isMajor, false);
+    assertEquals(3, organizer.sizeEstimate());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
new file mode 100644
index 0000000..67dcddf
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
@@ -0,0 +1,106 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.List;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.AttributesMutator;
+import com.gemstone.gemfire.cache.CacheLoader;
+import com.gemstone.gemfire.cache.CacheLoaderException;
+import com.gemstone.gemfire.cache.LoaderHelper;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * Tests that entries loaded from a cache loader are inserted in the HDFS queue 
+ * 
+ * @author hemantb
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSCacheLoaderJUnitTest extends BaseHoplogTestCase {
+
+  private static int totalEventsReceived = 0;
+  protected void configureHdfsStoreFactory() throws Exception {
+    hsf = this.cache.createHDFSStoreFactory();
+    hsf.setHomeDir(testDataDir.toString());
+    hsf.setBatchInterval(100000000);
+    hsf.setBatchSize(10000);
+  }
+
+  /**
+   * Tests that entries loaded from a cache loader are inserted in the HDFS queue 
+   * but are not inserted in async queues. 
+   * @throws Exception
+   */
+  public void testCacheLoaderForAsyncQAndHDFS() throws Exception {
+    
+    final AsyncEventQueueStats hdfsQueuestatistics = ((AsyncEventQueueImpl)cache.
+        getAsyncEventQueues().toArray()[0]).getStatistics();
+    
+    AttributesMutator am = this.region.getAttributesMutator();
+    am.setCacheLoader(new CacheLoader() {
+      private int i = 0;
+      public Object load(LoaderHelper helper)
+      throws CacheLoaderException {
+        return new Integer(i++);
+      }
+      
+      public void close() { }
+    });
+    
+    
+    
+    String asyncQueueName = "myQueue";
+    new AsyncEventQueueFactoryImpl(cache).setBatchTimeInterval(1).
+    create(asyncQueueName, new AsyncEventListener() {
+      
+      @Override
+      public void close() {
+        // TODO Auto-generated method stub
+        
+      }
+
+      @Override
+      public boolean processEvents(List events) {
+        totalEventsReceived += events.size();
+        return true;
+      }
+    });
+    am.addAsyncEventQueueId(asyncQueueName);
+    
+    region.put(1, new Integer(100));
+    region.destroy(1);
+    region.get(1);
+    region.destroy(1);
+    
+    assertTrue("HDFS queue should have received four events. But it received " + 
+        hdfsQueuestatistics.getEventQueueSize(), 4 == hdfsQueuestatistics.getEventQueueSize());
+    assertTrue("HDFS queue should have received four events. But it received " + 
+        hdfsQueuestatistics.getEventsReceived(), 4 == hdfsQueuestatistics.getEventsReceived());
+    
+    region.get(1);
+    Thread.sleep(2000);
+    
+    assertTrue("Async queue should have received only 5 events. But it received " + 
+        totalEventsReceived, totalEventsReceived == 5);
+    assertTrue("HDFS queue should have received 5 events. But it received " + 
+        hdfsQueuestatistics.getEventQueueSize(), 5 == hdfsQueuestatistics.getEventQueueSize());
+    assertTrue("HDFS queue should have received 5 events. But it received " + 
+        hdfsQueuestatistics.getEventsReceived(), 5 == hdfsQueuestatistics.getEventsReceived());
+    
+    
+  }
+  
+}


[16/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java
new file mode 100644
index 0000000..b13f499
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/org/apache/hadoop/io/SequenceFile.java
@@ -0,0 +1,3726 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io;
+
+import java.io.*;
+import java.util.*;
+import java.rmi.server.UID;
+import java.security.MessageDigest;
+import org.apache.commons.logging.*;
+import org.apache.hadoop.util.Options;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.io.compress.CodecPool;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.Serializer;
+import org.apache.hadoop.io.serializer.SerializationFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Progress;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.MergeSort;
+import org.apache.hadoop.util.PriorityQueue;
+import org.apache.hadoop.util.Time;
+// ** Pivotal Changes Begin
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.UTF8;
+import org.apache.hadoop.io.VersionMismatchException;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.io.WritableName;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+//** Pivotal Changes End
+
+/** 
+ * <code>SequenceFile</code>s are flat files consisting of binary key/value 
+ * pairs.
+ * 
+ * <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ * {@link Sorter} classes for writing, reading and sorting respectively.</p>
+ * 
+ * There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
+ * {@link CompressionType} used to compress key/value pairs:
+ * <ol>
+ *   <li>
+ *   <code>Writer</code> : Uncompressed records.
+ *   </li>
+ *   <li>
+ *   <code>RecordCompressWriter</code> : Record-compressed files, only compress 
+ *                                       values.
+ *   </li>
+ *   <li>
+ *   <code>BlockCompressWriter</code> : Block-compressed files, both keys & 
+ *                                      values are collected in 'blocks' 
+ *                                      separately and compressed. The size of 
+ *                                      the 'block' is configurable.
+ * </ol>
+ * 
+ * <p>The actual compression algorithm used to compress key and/or values can be
+ * specified by using the appropriate {@link CompressionCodec}.</p>
+ * 
+ * <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ * provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+ *
+ * <p>The {@link Reader} acts as the bridge and can read any of the above 
+ * <code>SequenceFile</code> formats.</p>
+ *
+ * <h4 id="Formats">SequenceFile Formats</h4>
+ * 
+ * <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ * depending on the <code>CompressionType</code> specified. All of them share a
+ * <a href="#Header">common header</a> described below.
+ * 
+ * <h5 id="Header">SequenceFile Header</h5>
+ * <ul>
+ *   <li>
+ *   version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual 
+ *             version number (e.g. SEQ4 or SEQ6)
+ *   </li>
+ *   <li>
+ *   keyClassName -key class
+ *   </li>
+ *   <li>
+ *   valueClassName - value class
+ *   </li>
+ *   <li>
+ *   compression - A boolean which specifies if compression is turned on for 
+ *                 keys/values in this file.
+ *   </li>
+ *   <li>
+ *   blockCompression - A boolean which specifies if block-compression is 
+ *                      turned on for keys/values in this file.
+ *   </li>
+ *   <li>
+ *   compression codec - <code>CompressionCodec</code> class which is used for  
+ *                       compression of keys and/or values (if compression is 
+ *                       enabled).
+ *   </li>
+ *   <li>
+ *   metadata - {@link Metadata} for this file.
+ *   </li>
+ *   <li>
+ *   sync - A sync marker to denote end of the header.
+ *   </li>
+ * </ul>
+ * 
+ * <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ * <ul>
+ * <li>
+ * <a href="#Header">Header</a>
+ * </li>
+ * <li>
+ * Record
+ *   <ul>
+ *     <li>Record length</li>
+ *     <li>Key length</li>
+ *     <li>Key</li>
+ *     <li>Value</li>
+ *   </ul>
+ * </li>
+ * <li>
+ * A sync-marker every few <code>100</code> bytes or so.
+ * </li>
+ * </ul>
+ *
+ * <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ * <ul>
+ * <li>
+ * <a href="#Header">Header</a>
+ * </li>
+ * <li>
+ * Record
+ *   <ul>
+ *     <li>Record length</li>
+ *     <li>Key length</li>
+ *     <li>Key</li>
+ *     <li><i>Compressed</i> Value</li>
+ *   </ul>
+ * </li>
+ * <li>
+ * A sync-marker every few <code>100</code> bytes or so.
+ * </li>
+ * </ul>
+ * 
+ * <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ * <ul>
+ * <li>
+ * <a href="#Header">Header</a>
+ * </li>
+ * <li>
+ * Record <i>Block</i>
+ *   <ul>
+ *     <li>Uncompressed number of records in the block</li>
+ *     <li>Compressed key-lengths block-size</li>
+ *     <li>Compressed key-lengths block</li>
+ *     <li>Compressed keys block-size</li>
+ *     <li>Compressed keys block</li>
+ *     <li>Compressed value-lengths block-size</li>
+ *     <li>Compressed value-lengths block</li>
+ *     <li>Compressed values block-size</li>
+ *     <li>Compressed values block</li>
+ *   </ul>
+ * </li>
+ * <li>
+ * A sync-marker every block.
+ * </li>
+ * </ul>
+ * 
+ * <p>The compressed blocks of key lengths and value lengths consist of the 
+ * actual lengths of individual keys/values encoded in ZeroCompressedInteger 
+ * format.</p>
+ * 
+ * @see CompressionCodec
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class SequenceFile {
+  private static final Log LOG = LogFactory.getLog(SequenceFile.class);
+
+  private SequenceFile() {}                         // no public ctor
+
+  private static final byte BLOCK_COMPRESS_VERSION = (byte)4;
+  private static final byte CUSTOM_COMPRESS_VERSION = (byte)5;
+  private static final byte VERSION_WITH_METADATA = (byte)6;
+  private static byte[] VERSION = new byte[] {
+    (byte)'S', (byte)'E', (byte)'Q', VERSION_WITH_METADATA
+  };
+
+  private static final int SYNC_ESCAPE = -1;      // "length" of sync entries
+  private static final int SYNC_HASH_SIZE = 16;   // number of bytes in hash 
+  private static final int SYNC_SIZE = 4+SYNC_HASH_SIZE; // escape + hash
+
+  /** The number of bytes between sync points.*/
+  public static final int SYNC_INTERVAL = 100*SYNC_SIZE; 
+
+  /** 
+   * The compression type used to compress key/value pairs in the 
+   * {@link SequenceFile}.
+   * 
+   * @see SequenceFile.Writer
+   */
+  public static enum CompressionType {
+    /** Do not compress records. */
+    NONE, 
+    /** Compress values only, each separately. */
+    RECORD,
+    /** Compress sequences of records together in blocks. */
+    BLOCK
+  }
+
+  /**
+   * Get the compression type for the reduce outputs
+   * @param job the job config to look in
+   * @return the kind of compression to use
+   */
+  static public CompressionType getDefaultCompressionType(Configuration job) {
+    String name = job.get("io.seqfile.compression.type");
+    return name == null ? CompressionType.RECORD : 
+      CompressionType.valueOf(name);
+  }
+  
+  /**
+   * Set the default compression type for sequence files.
+   * @param job the configuration to modify
+   * @param val the new compression type (none, block, record)
+   */
+  static public void setDefaultCompressionType(Configuration job, 
+                                               CompressionType val) {
+    job.set("io.seqfile.compression.type", val.toString());
+  }
+
+  /**
+   * Create a new Writer with the given options.
+   * @param conf the configuration to use
+   * @param opts the options to create the file with
+   * @return a new Writer
+   * @throws IOException
+   */
+  public static Writer createWriter(Configuration conf, Writer.Option... opts
+                                    ) throws IOException {
+    Writer.CompressionOption compressionOption = 
+      Options.getOption(Writer.CompressionOption.class, opts);
+    CompressionType kind;
+    if (compressionOption != null) {
+      kind = compressionOption.getValue();
+    } else {
+      kind = getDefaultCompressionType(conf);
+      opts = Options.prependOptions(opts, Writer.compression(kind));
+    }
+    switch (kind) {
+      default:
+      case NONE:
+        return new Writer(conf, opts);
+      case RECORD:
+        return new RecordCompressWriter(conf, opts);
+      case BLOCK:
+        return new BlockCompressWriter(conf, opts);
+    }
+  }
+
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem. 
+   * @param conf The configuration.
+   * @param name The name of the file. 
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer 
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass) throws IOException {
+    return createWriter(conf, Writer.filesystem(fs),
+                        Writer.file(name), Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass));
+  }
+  
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem. 
+   * @param conf The configuration.
+   * @param name The name of the file. 
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer 
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, 
+                 CompressionType compressionType) throws IOException {
+    return createWriter(conf, Writer.filesystem(fs),
+                        Writer.file(name), Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass), 
+                        Writer.compression(compressionType));
+  }
+  
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem. 
+   * @param conf The configuration.
+   * @param name The name of the file. 
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param progress The Progressable object to track progress.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, CompressionType compressionType,
+                 Progressable progress) throws IOException {
+    return createWriter(conf, Writer.file(name),
+                        Writer.filesystem(fs),
+                        Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass), 
+                        Writer.compression(compressionType),
+                        Writer.progressable(progress));
+  }
+
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem. 
+   * @param conf The configuration.
+   * @param name The name of the file. 
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer 
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, CompressionType compressionType, 
+                 CompressionCodec codec) throws IOException {
+    return createWriter(conf, Writer.file(name),
+                        Writer.filesystem(fs),
+                        Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass), 
+                        Writer.compression(compressionType, codec));
+  }
+  
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem. 
+   * @param conf The configuration.
+   * @param name The name of the file. 
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @param progress The Progressable object to track progress.
+   * @param metadata The metadata of the file.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, 
+                 CompressionType compressionType, CompressionCodec codec,
+                 Progressable progress, Metadata metadata) throws IOException {
+    return createWriter(conf, Writer.file(name),
+                        Writer.filesystem(fs),
+                        Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass),
+                        Writer.compression(compressionType, codec),
+                        Writer.progressable(progress),
+                        Writer.metadata(metadata));
+  }
+
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem.
+   * @param conf The configuration.
+   * @param name The name of the file.
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param bufferSize buffer size for the underlaying outputstream.
+   * @param replication replication factor for the file.
+   * @param blockSize block size for the file.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @param progress The Progressable object to track progress.
+   * @param metadata The metadata of the file.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer
+    createWriter(FileSystem fs, Configuration conf, Path name,
+                 Class keyClass, Class valClass, int bufferSize,
+                 short replication, long blockSize,
+                 CompressionType compressionType, CompressionCodec codec,
+                 Progressable progress, Metadata metadata) throws IOException {
+    return createWriter(conf, Writer.file(name),
+                        Writer.filesystem(fs),
+                        Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass), 
+                        Writer.bufferSize(bufferSize), 
+                        Writer.replication(replication),
+                        Writer.blockSize(blockSize),
+                        Writer.compression(compressionType, codec),
+                        Writer.progressable(progress),
+                        Writer.metadata(metadata));
+  }
+
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem.
+   * @param conf The configuration.
+   * @param name The name of the file.
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param bufferSize buffer size for the underlaying outputstream.
+   * @param replication replication factor for the file.
+   * @param blockSize block size for the file.
+   * @param createParent create parent directory if non-existent
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @param metadata The metadata of the file.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   */
+  @Deprecated
+  public static Writer
+  createWriter(FileSystem fs, Configuration conf, Path name,
+               Class keyClass, Class valClass, int bufferSize,
+               short replication, long blockSize, boolean createParent,
+               CompressionType compressionType, CompressionCodec codec,
+               Metadata metadata) throws IOException {
+    return createWriter(FileContext.getFileContext(fs.getUri(), conf),
+        conf, name, keyClass, valClass, compressionType, codec,
+        metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
+        CreateOpts.bufferSize(bufferSize),
+        createParent ? CreateOpts.createParent()
+                     : CreateOpts.donotCreateParent(),
+        CreateOpts.repFac(replication),
+        CreateOpts.blockSize(blockSize)
+      );
+  }
+
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fc The context for the specified file.
+   * @param conf The configuration.
+   * @param name The name of the file.
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @param metadata The metadata of the file.
+   * @param createFlag gives the semantics of create: overwrite, append etc.
+   * @param opts file creation options; see {@link CreateOpts}.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   */
+  public static Writer
+  createWriter(FileContext fc, Configuration conf, Path name,
+               Class keyClass, Class valClass,
+               CompressionType compressionType, CompressionCodec codec,
+               Metadata metadata,
+               final EnumSet<CreateFlag> createFlag, CreateOpts... opts)
+               throws IOException {
+    return createWriter(conf, fc.create(name, createFlag, opts),
+          keyClass, valClass, compressionType, codec, metadata).ownStream();
+  }
+
+  /**
+   * Construct the preferred type of SequenceFile Writer.
+   * @param fs The configured filesystem. 
+   * @param conf The configuration.
+   * @param name The name of the file. 
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @param progress The Progressable object to track progress.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, 
+                 CompressionType compressionType, CompressionCodec codec,
+                 Progressable progress) throws IOException {
+    return createWriter(conf, Writer.file(name),
+                        Writer.filesystem(fs),
+                        Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass),
+                        Writer.compression(compressionType, codec),
+                        Writer.progressable(progress));
+  }
+
+  /**
+   * Construct the preferred type of 'raw' SequenceFile Writer.
+   * @param conf The configuration.
+   * @param out The stream on top which the writer is to be constructed.
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @param metadata The metadata of the file.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer
+    createWriter(Configuration conf, FSDataOutputStream out, 
+                 Class keyClass, Class valClass,
+                 CompressionType compressionType,
+                 CompressionCodec codec, Metadata metadata) throws IOException {
+    return createWriter(conf, Writer.stream(out), Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass), 
+                        Writer.compression(compressionType, codec),
+                        Writer.metadata(metadata));
+  }
+  
+  /**
+   * Construct the preferred type of 'raw' SequenceFile Writer.
+   * @param conf The configuration.
+   * @param out The stream on top which the writer is to be constructed.
+   * @param keyClass The 'key' type.
+   * @param valClass The 'value' type.
+   * @param compressionType The compression type.
+   * @param codec The compression codec.
+   * @return Returns the handle to the constructed SequenceFile Writer.
+   * @throws IOException
+   * @deprecated Use {@link #createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)}
+   *     instead.
+   */
+  @Deprecated
+  public static Writer
+    createWriter(Configuration conf, FSDataOutputStream out, 
+                 Class keyClass, Class valClass, CompressionType compressionType,
+                 CompressionCodec codec) throws IOException {
+    return createWriter(conf, Writer.stream(out), Writer.keyClass(keyClass),
+                        Writer.valueClass(valClass),
+                        Writer.compression(compressionType, codec));
+  }
+  
+
+  /** The interface to 'raw' values of SequenceFiles. */
+  public static interface ValueBytes {
+
+    /** Writes the uncompressed bytes to the outStream.
+     * @param outStream : Stream to write uncompressed bytes into.
+     * @throws IOException
+     */
+    public void writeUncompressedBytes(DataOutputStream outStream)
+      throws IOException;
+
+    /** Write compressed bytes to outStream. 
+     * Note: that it will NOT compress the bytes if they are not compressed.
+     * @param outStream : Stream to write compressed bytes into.
+     */
+    public void writeCompressedBytes(DataOutputStream outStream) 
+      throws IllegalArgumentException, IOException;
+
+    /**
+     * Size of stored data.
+     */
+    public int getSize();
+  }
+  
+  private static class UncompressedBytes implements ValueBytes {
+    private int dataSize;
+    private byte[] data;
+    
+    private UncompressedBytes() {
+      data = null;
+      dataSize = 0;
+    }
+    
+    private void reset(DataInputStream in, int length) throws IOException {
+      if (data == null) {
+        data = new byte[length];
+      } else if (length > data.length) {
+        data = new byte[Math.max(length, data.length * 2)];
+      }
+      dataSize = -1;
+      in.readFully(data, 0, length);
+      dataSize = length;
+    }
+    
+    @Override
+    public int getSize() {
+      return dataSize;
+    }
+    
+    @Override
+    public void writeUncompressedBytes(DataOutputStream outStream)
+      throws IOException {
+      outStream.write(data, 0, dataSize);
+    }
+
+    @Override
+    public void writeCompressedBytes(DataOutputStream outStream) 
+      throws IllegalArgumentException, IOException {
+      throw 
+        new IllegalArgumentException("UncompressedBytes cannot be compressed!");
+    }
+
+  } // UncompressedBytes
+  
+  private static class CompressedBytes implements ValueBytes {
+    private int dataSize;
+    private byte[] data;
+    DataInputBuffer rawData = null;
+    CompressionCodec codec = null;
+    CompressionInputStream decompressedStream = null;
+
+    private CompressedBytes(CompressionCodec codec) {
+      data = null;
+      dataSize = 0;
+      this.codec = codec;
+    }
+
+    private void reset(DataInputStream in, int length) throws IOException {
+      if (data == null) {
+        data = new byte[length];
+      } else if (length > data.length) {
+        data = new byte[Math.max(length, data.length * 2)];
+      } 
+      dataSize = -1;
+      in.readFully(data, 0, length);
+      dataSize = length;
+    }
+    
+    @Override
+    public int getSize() {
+      return dataSize;
+    }
+    
+    @Override
+    public void writeUncompressedBytes(DataOutputStream outStream)
+      throws IOException {
+      if (decompressedStream == null) {
+        rawData = new DataInputBuffer();
+        decompressedStream = codec.createInputStream(rawData);
+      } else {
+        decompressedStream.resetState();
+      }
+      rawData.reset(data, 0, dataSize);
+
+      byte[] buffer = new byte[8192];
+      int bytesRead = 0;
+      while ((bytesRead = decompressedStream.read(buffer, 0, 8192)) != -1) {
+        outStream.write(buffer, 0, bytesRead);
+      }
+    }
+
+    @Override
+    public void writeCompressedBytes(DataOutputStream outStream) 
+      throws IllegalArgumentException, IOException {
+      outStream.write(data, 0, dataSize);
+    }
+
+  } // CompressedBytes
+  
+  /**
+   * The class encapsulating with the metadata of a file.
+   * The metadata of a file is a list of attribute name/value
+   * pairs of Text type.
+   *
+   */
+  public static class Metadata implements Writable {
+
+    private TreeMap<Text, Text> theMetadata;
+    
+    public Metadata() {
+      this(new TreeMap<Text, Text>());
+    }
+    
+    public Metadata(TreeMap<Text, Text> arg) {
+      if (arg == null) {
+        this.theMetadata = new TreeMap<Text, Text>();
+      } else {
+        this.theMetadata = arg;
+      }
+    }
+    
+    public Text get(Text name) {
+      return this.theMetadata.get(name);
+    }
+    
+    public void set(Text name, Text value) {
+      this.theMetadata.put(name, value);
+    }
+    
+    public TreeMap<Text, Text> getMetadata() {
+      return new TreeMap<Text, Text>(this.theMetadata);
+    }
+    
+    @Override
+    public void write(DataOutput out) throws IOException {
+      out.writeInt(this.theMetadata.size());
+      Iterator<Map.Entry<Text, Text>> iter =
+        this.theMetadata.entrySet().iterator();
+      while (iter.hasNext()) {
+        Map.Entry<Text, Text> en = iter.next();
+        en.getKey().write(out);
+        en.getValue().write(out);
+      }
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+      int sz = in.readInt();
+      if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object");
+      this.theMetadata = new TreeMap<Text, Text>();
+      for (int i = 0; i < sz; i++) {
+        Text key = new Text();
+        Text val = new Text();
+        key.readFields(in);
+        val.readFields(in);
+        this.theMetadata.put(key, val);
+      }    
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      if (other == null) {
+        return false;
+      }
+      if (other.getClass() != this.getClass()) {
+        return false;
+      } else {
+        return equals((Metadata)other);
+      }
+    }
+    
+    public boolean equals(Metadata other) {
+      if (other == null) return false;
+      if (this.theMetadata.size() != other.theMetadata.size()) {
+        return false;
+      }
+      Iterator<Map.Entry<Text, Text>> iter1 =
+        this.theMetadata.entrySet().iterator();
+      Iterator<Map.Entry<Text, Text>> iter2 =
+        other.theMetadata.entrySet().iterator();
+      while (iter1.hasNext() && iter2.hasNext()) {
+        Map.Entry<Text, Text> en1 = iter1.next();
+        Map.Entry<Text, Text> en2 = iter2.next();
+        if (!en1.getKey().equals(en2.getKey())) {
+          return false;
+        }
+        if (!en1.getValue().equals(en2.getValue())) {
+          return false;
+        }
+      }
+      if (iter1.hasNext() || iter2.hasNext()) {
+        return false;
+      }
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      assert false : "hashCode not designed";
+      return 42; // any arbitrary constant will do 
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("size: ").append(this.theMetadata.size()).append("\n");
+      Iterator<Map.Entry<Text, Text>> iter =
+        this.theMetadata.entrySet().iterator();
+      while (iter.hasNext()) {
+        Map.Entry<Text, Text> en = iter.next();
+        sb.append("\t").append(en.getKey().toString()).append("\t").append(en.getValue().toString());
+        sb.append("\n");
+      }
+      return sb.toString();
+    }
+  }
+  
+  /** Write key/value pairs to a sequence-format file. */
+  public static class Writer implements java.io.Closeable, Syncable {
+    private Configuration conf;
+    FSDataOutputStream out;
+    boolean ownOutputStream = true;
+    DataOutputBuffer buffer = new DataOutputBuffer();
+
+    Class keyClass;
+    Class valClass;
+
+    private final CompressionType compress;
+    CompressionCodec codec = null;
+    CompressionOutputStream deflateFilter = null;
+    DataOutputStream deflateOut = null;
+    Metadata metadata = null;
+    Compressor compressor = null;
+    
+    protected Serializer keySerializer;
+    protected Serializer uncompressedValSerializer;
+    protected Serializer compressedValSerializer;
+    
+    // Insert a globally unique 16-byte value every few entries, so that one
+    // can seek into the middle of a file and then synchronize with record
+    // starts and ends by scanning for this value.
+    long lastSyncPos;                     // position of last sync
+    byte[] sync;                          // 16 random bytes
+    {
+      try {                                       
+        MessageDigest digester = MessageDigest.getInstance("MD5");
+        long time = Time.now();
+        digester.update((new UID()+"@"+time).getBytes());
+        sync = digester.digest();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    public static interface Option {}
+    
+    static class FileOption extends Options.PathOption 
+                                    implements Option {
+      FileOption(Path path) {
+        super(path);
+      }
+    }
+
+    /**
+     * @deprecated only used for backwards-compatibility in the createWriter methods
+     * that take FileSystem.
+     */
+    @Deprecated
+    private static class FileSystemOption implements Option {
+      private final FileSystem value;
+      protected FileSystemOption(FileSystem value) {
+        this.value = value;
+      }
+      public FileSystem getValue() {
+        return value;
+      }
+    }
+
+    static class StreamOption extends Options.FSDataOutputStreamOption 
+                              implements Option {
+      StreamOption(FSDataOutputStream stream) {
+        super(stream);
+      }
+    }
+
+    static class BufferSizeOption extends Options.IntegerOption
+                                  implements Option {
+      BufferSizeOption(int value) {
+        super(value);
+      }
+    }
+    
+    static class BlockSizeOption extends Options.LongOption implements Option {
+      BlockSizeOption(long value) {
+        super(value);
+      }
+    }
+
+    static class ReplicationOption extends Options.IntegerOption
+                                   implements Option {
+      ReplicationOption(int value) {
+        super(value);
+      }
+    }
+
+    static class KeyClassOption extends Options.ClassOption implements Option {
+      KeyClassOption(Class<?> value) {
+        super(value);
+      }
+    }
+
+    static class ValueClassOption extends Options.ClassOption
+                                          implements Option {
+      ValueClassOption(Class<?> value) {
+        super(value);
+      }
+    }
+
+    static class MetadataOption implements Option {
+      private final Metadata value;
+      MetadataOption(Metadata value) {
+        this.value = value;
+      }
+      Metadata getValue() {
+        return value;
+      }
+    }
+
+    static class ProgressableOption extends Options.ProgressableOption
+                                    implements Option {
+      ProgressableOption(Progressable value) {
+        super(value);
+      }
+    }
+
+    private static class CompressionOption implements Option {
+      private final CompressionType value;
+      private final CompressionCodec codec;
+      CompressionOption(CompressionType value) {
+        this(value, null);
+      }
+      CompressionOption(CompressionType value, CompressionCodec codec) {
+        this.value = value;
+        this.codec = (CompressionType.NONE != value && null == codec)
+          ? new DefaultCodec()
+          : codec;
+      }
+      CompressionType getValue() {
+        return value;
+      }
+      CompressionCodec getCodec() {
+        return codec;
+      }
+    }
+    
+    public static Option file(Path value) {
+      return new FileOption(value);
+    }
+
+    /**
+     * @deprecated only used for backwards-compatibility in the createWriter methods
+     * that take FileSystem.
+     */
+    @Deprecated
+    private static Option filesystem(FileSystem fs) {
+      return new SequenceFile.Writer.FileSystemOption(fs);
+    }
+    
+    public static Option bufferSize(int value) {
+      return new BufferSizeOption(value);
+    }
+    
+    public static Option stream(FSDataOutputStream value) {
+      return new StreamOption(value);
+    }
+    
+    public static Option replication(short value) {
+      return new ReplicationOption(value);
+    }
+    
+    public static Option blockSize(long value) {
+      return new BlockSizeOption(value);
+    }
+    
+    public static Option progressable(Progressable value) {
+      return new ProgressableOption(value);
+    }
+
+    public static Option keyClass(Class<?> value) {
+      return new KeyClassOption(value);
+    }
+    
+    public static Option valueClass(Class<?> value) {
+      return new ValueClassOption(value);
+    }
+    
+    public static Option metadata(Metadata value) {
+      return new MetadataOption(value);
+    }
+
+    public static Option compression(CompressionType value) {
+      return new CompressionOption(value);
+    }
+
+    public static Option compression(CompressionType value,
+        CompressionCodec codec) {
+      return new CompressionOption(value, codec);
+    }
+    
+    /**
+     * Construct a uncompressed writer from a set of options.
+     * @param conf the configuration to use
+     * @param opts the options used when creating the writer
+     * @throws IOException if it fails
+     */
+    Writer(Configuration conf, 
+           Option... opts) throws IOException {
+      BlockSizeOption blockSizeOption = 
+        Options.getOption(BlockSizeOption.class, opts);
+      BufferSizeOption bufferSizeOption = 
+        Options.getOption(BufferSizeOption.class, opts);
+      ReplicationOption replicationOption = 
+        Options.getOption(ReplicationOption.class, opts);
+      ProgressableOption progressOption = 
+        Options.getOption(ProgressableOption.class, opts);
+      FileOption fileOption = Options.getOption(FileOption.class, opts);
+      FileSystemOption fsOption = Options.getOption(FileSystemOption.class, opts);
+      StreamOption streamOption = Options.getOption(StreamOption.class, opts);
+      KeyClassOption keyClassOption = 
+        Options.getOption(KeyClassOption.class, opts);
+      ValueClassOption valueClassOption = 
+        Options.getOption(ValueClassOption.class, opts);
+      MetadataOption metadataOption = 
+        Options.getOption(MetadataOption.class, opts);
+      CompressionOption compressionTypeOption =
+        Options.getOption(CompressionOption.class, opts);
+      // check consistency of options
+      if ((fileOption == null) == (streamOption == null)) {
+        throw new IllegalArgumentException("file or stream must be specified");
+      }
+      if (fileOption == null && (blockSizeOption != null ||
+                                 bufferSizeOption != null ||
+                                 replicationOption != null ||
+                                 progressOption != null)) {
+        throw new IllegalArgumentException("file modifier options not " +
+                                           "compatible with stream");
+      }
+
+      FSDataOutputStream out;
+      boolean ownStream = fileOption != null;
+      if (ownStream) {
+        Path p = fileOption.getValue();
+        FileSystem fs;
+        if (fsOption != null) {
+          fs = fsOption.getValue();
+        } else {
+          fs = p.getFileSystem(conf);
+        }
+        int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
+          bufferSizeOption.getValue();
+        short replication = replicationOption == null ? 
+          fs.getDefaultReplication(p) :
+          (short) replicationOption.getValue();
+        long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
+          blockSizeOption.getValue();
+        Progressable progress = progressOption == null ? null :
+          progressOption.getValue();
+        out = fs.create(p, true, bufferSize, replication, blockSize, progress);
+      } else {
+        out = streamOption.getValue();
+      }
+      Class<?> keyClass = keyClassOption == null ?
+          Object.class : keyClassOption.getValue();
+      Class<?> valueClass = valueClassOption == null ?
+          Object.class : valueClassOption.getValue();
+      Metadata metadata = metadataOption == null ?
+          new Metadata() : metadataOption.getValue();
+      this.compress = compressionTypeOption.getValue();
+      final CompressionCodec codec = compressionTypeOption.getCodec();
+      if (codec != null &&
+          (codec instanceof GzipCodec) &&
+          !NativeCodeLoader.isNativeCodeLoaded() &&
+          !ZlibFactory.isNativeZlibLoaded(conf)) {
+        throw new IllegalArgumentException("SequenceFile doesn't work with " +
+                                           "GzipCodec without native-hadoop " +
+                                           "code!");
+      }
+      init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
+    }
+
+    /** Create the named file.
+     * @deprecated Use 
+     *   {@link SequenceFile#createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)} 
+     *   instead.
+     */
+    @Deprecated
+    public Writer(FileSystem fs, Configuration conf, Path name, 
+                  Class keyClass, Class valClass) throws IOException {
+      this.compress = CompressionType.NONE;
+      init(conf, fs.create(name), true, keyClass, valClass, null, 
+           new Metadata());
+    }
+    
+    /** Create the named file with write-progress reporter.
+     * @deprecated Use 
+     *   {@link SequenceFile#createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)} 
+     *   instead.
+     */
+    @Deprecated
+    public Writer(FileSystem fs, Configuration conf, Path name, 
+                  Class keyClass, Class valClass,
+                  Progressable progress, Metadata metadata) throws IOException {
+      this.compress = CompressionType.NONE;
+      init(conf, fs.create(name, progress), true, keyClass, valClass,
+           null, metadata);
+    }
+    
+    /** Create the named file with write-progress reporter. 
+     * @deprecated Use 
+     *   {@link SequenceFile#createWriter(Configuration, com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Writer.Option...)} 
+     *   instead.
+     */
+    @Deprecated
+    public Writer(FileSystem fs, Configuration conf, Path name,
+                  Class keyClass, Class valClass,
+                  int bufferSize, short replication, long blockSize,
+                  Progressable progress, Metadata metadata) throws IOException {
+      this.compress = CompressionType.NONE;
+      init(conf,
+           fs.create(name, true, bufferSize, replication, blockSize, progress),
+           true, keyClass, valClass, null, metadata);
+    }
+
+    boolean isCompressed() { return compress != CompressionType.NONE; }
+    boolean isBlockCompressed() { return compress == CompressionType.BLOCK; }
+    
+    Writer ownStream() { this.ownOutputStream = true; return this;  }
+
+    /** Write and flush the file header. */
+    private void writeFileHeader() 
+      throws IOException {
+      out.write(VERSION);
+      Text.writeString(out, keyClass.getName());
+      Text.writeString(out, valClass.getName());
+      
+      out.writeBoolean(this.isCompressed());
+      out.writeBoolean(this.isBlockCompressed());
+      
+      if (this.isCompressed()) {
+        Text.writeString(out, (codec.getClass()).getName());
+      }
+      this.metadata.write(out);
+      out.write(sync);                       // write the sync bytes
+      out.flush();                           // flush header
+    }
+    
+    /** Initialize. */
+    @SuppressWarnings("unchecked")
+    void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
+              Class keyClass, Class valClass,
+              CompressionCodec codec, Metadata metadata) 
+      throws IOException {
+      this.conf = conf;
+      this.out = out;
+      this.ownOutputStream = ownStream;
+      this.keyClass = keyClass;
+      this.valClass = valClass;
+      this.codec = codec;
+      this.metadata = metadata;
+      SerializationFactory serializationFactory = new SerializationFactory(conf);
+      this.keySerializer = serializationFactory.getSerializer(keyClass);
+      if (this.keySerializer == null) {
+        throw new IOException(
+            "Could not find a serializer for the Key class: '"
+                + keyClass.getCanonicalName() + "'. "
+                + "Please ensure that the configuration '" +
+                CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                + "properly configured, if you're using"
+                + "custom serialization.");
+      }
+      this.keySerializer.open(buffer);
+      this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
+      if (this.uncompressedValSerializer == null) {
+        throw new IOException(
+            "Could not find a serializer for the Value class: '"
+                + valClass.getCanonicalName() + "'. "
+                + "Please ensure that the configuration '" +
+                CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                + "properly configured, if you're using"
+                + "custom serialization.");
+      }
+      this.uncompressedValSerializer.open(buffer);
+      if (this.codec != null) {
+        ReflectionUtils.setConf(this.codec, this.conf);
+        this.compressor = CodecPool.getCompressor(this.codec);
+        this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
+        this.deflateOut = 
+          new DataOutputStream(new BufferedOutputStream(deflateFilter));
+        this.compressedValSerializer = serializationFactory.getSerializer(valClass);
+        if (this.compressedValSerializer == null) {
+          throw new IOException(
+              "Could not find a serializer for the Value class: '"
+                  + valClass.getCanonicalName() + "'. "
+                  + "Please ensure that the configuration '" +
+                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                  + "properly configured, if you're using"
+                  + "custom serialization.");
+        }
+        this.compressedValSerializer.open(deflateOut);
+      }
+      writeFileHeader();
+    }
+    
+    /** Returns the class of keys in this file. */
+    public Class getKeyClass() { return keyClass; }
+
+    /** Returns the class of values in this file. */
+    public Class getValueClass() { return valClass; }
+
+    /** Returns the compression codec of data in this file. */
+    public CompressionCodec getCompressionCodec() { return codec; }
+    
+    /** create a sync point */
+    public void sync() throws IOException {
+      if (sync != null && lastSyncPos != out.getPos()) {
+        out.writeInt(SYNC_ESCAPE);                // mark the start of the sync
+        out.write(sync);                          // write sync
+        lastSyncPos = out.getPos();               // update lastSyncPos
+      }
+    }
+
+    /**
+     * flush all currently written data to the file system
+     * @deprecated Use {@link #hsync()} or {@link #hflush()} instead
+     */
+    @Deprecated
+    public void syncFs() throws IOException {
+      if (out != null) {
+        out.sync();                               // flush contents to file system
+      }
+    }
+
+    @Override
+    public void hsync() throws IOException {
+      if (out != null) {
+        out.hsync();
+      }
+    }
+    // Pivotal changes begin
+    public void hsyncWithSizeUpdate() throws IOException {
+      if (out != null) {
+        if (out instanceof HdfsDataOutputStream) {
+          try {
+            ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+          } catch (NoSuchMethodError e){
+            // We are probably working with an older version of hadoop jars which does not have the 
+            // hsync function with SyncFlag. Use the hsync version that does not update the size. 
+            out.hsync();
+          }
+        }
+        else {
+            out.hsync();
+        }
+      }
+    }
+    // Pivotal changes end
+    @Override
+    public void hflush() throws IOException {
+      if (out != null) {
+        out.hflush();
+      }
+    }
+    
+    /** Returns the configuration of this file. */
+    Configuration getConf() { return conf; }
+    
+    /** Close the file. */
+    @Override
+    public synchronized void close() throws IOException {
+      keySerializer.close();
+      uncompressedValSerializer.close();
+      if (compressedValSerializer != null) {
+        compressedValSerializer.close();
+      }
+
+      CodecPool.returnCompressor(compressor);
+      compressor = null;
+      
+      if (out != null) {
+        
+        // Close the underlying stream iff we own it...
+        if (ownOutputStream) {
+          out.close();
+        } else {
+          out.flush();
+        }
+        out = null;
+      }
+    }
+
+    synchronized void checkAndWriteSync() throws IOException {
+      if (sync != null &&
+          out.getPos() >= lastSyncPos+SYNC_INTERVAL) { // time to emit sync
+        sync();
+      }
+    }
+
+    /** Append a key/value pair. */
+    public void append(Writable key, Writable val)
+      throws IOException {
+      append((Object) key, (Object) val);
+    }
+
+    /** Append a key/value pair. */
+    @SuppressWarnings("unchecked")
+    public synchronized void append(Object key, Object val)
+      throws IOException {
+      if (key.getClass() != keyClass)
+        throw new IOException("wrong key class: "+key.getClass().getName()
+                              +" is not "+keyClass);
+      if (val.getClass() != valClass)
+        throw new IOException("wrong value class: "+val.getClass().getName()
+                              +" is not "+valClass);
+
+      buffer.reset();
+
+      // Append the 'key'
+      keySerializer.serialize(key);
+      int keyLength = buffer.getLength();
+      if (keyLength < 0)
+        throw new IOException("negative length keys not allowed: " + key);
+
+      // Append the 'value'
+      if (compress == CompressionType.RECORD) {
+        deflateFilter.resetState();
+        compressedValSerializer.serialize(val);
+        deflateOut.flush();
+        deflateFilter.finish();
+      } else {
+        uncompressedValSerializer.serialize(val);
+      }
+
+      // Write the record out
+      checkAndWriteSync();                                // sync
+      out.writeInt(buffer.getLength());                   // total record length
+      out.writeInt(keyLength);                            // key portion length
+      out.write(buffer.getData(), 0, buffer.getLength()); // data
+    }
+
+    public synchronized void appendRaw(byte[] keyData, int keyOffset,
+        int keyLength, ValueBytes val) throws IOException {
+      if (keyLength < 0)
+        throw new IOException("negative length keys not allowed: " + keyLength);
+
+      int valLength = val.getSize();
+
+      checkAndWriteSync();
+      
+      out.writeInt(keyLength+valLength);          // total record length
+      out.writeInt(keyLength);                    // key portion length
+      out.write(keyData, keyOffset, keyLength);   // key
+      val.writeUncompressedBytes(out);            // value
+    }
+
+    /** Returns the current length of the output file.
+     *
+     * <p>This always returns a synchronized position.  In other words,
+     * immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+     * returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called.  However
+     * the key may be earlier in the file than key last written when this
+     * method was called (e.g., with block-compression, it may be the first key
+     * in the block that was being written when this method was called).
+     */
+    public synchronized long getLength() throws IOException {
+      return out.getPos();
+    }
+
+  } // class Writer
+
+  /** Write key/compressed-value pairs to a sequence-format file. */
+  static class RecordCompressWriter extends Writer {
+    
+    RecordCompressWriter(Configuration conf, 
+                         Option... options) throws IOException {
+      super(conf, options);
+    }
+
+    /** Append a key/value pair. */
+    @Override
+    @SuppressWarnings("unchecked")
+    public synchronized void append(Object key, Object val)
+      throws IOException {
+      if (key.getClass() != keyClass)
+        throw new IOException("wrong key class: "+key.getClass().getName()
+                              +" is not "+keyClass);
+      if (val.getClass() != valClass)
+        throw new IOException("wrong value class: "+val.getClass().getName()
+                              +" is not "+valClass);
+
+      buffer.reset();
+
+      // Append the 'key'
+      keySerializer.serialize(key);
+      int keyLength = buffer.getLength();
+      if (keyLength < 0)
+        throw new IOException("negative length keys not allowed: " + key);
+
+      // Compress 'value' and append it
+      deflateFilter.resetState();
+      compressedValSerializer.serialize(val);
+      deflateOut.flush();
+      deflateFilter.finish();
+
+      // Write the record out
+      checkAndWriteSync();                                // sync
+      out.writeInt(buffer.getLength());                   // total record length
+      out.writeInt(keyLength);                            // key portion length
+      out.write(buffer.getData(), 0, buffer.getLength()); // data
+    }
+
+    /** Append a key/value pair. */
+    @Override
+    public synchronized void appendRaw(byte[] keyData, int keyOffset,
+        int keyLength, ValueBytes val) throws IOException {
+
+      if (keyLength < 0)
+        throw new IOException("negative length keys not allowed: " + keyLength);
+
+      int valLength = val.getSize();
+      
+      checkAndWriteSync();                        // sync
+      out.writeInt(keyLength+valLength);          // total record length
+      out.writeInt(keyLength);                    // key portion length
+      out.write(keyData, keyOffset, keyLength);   // 'key' data
+      val.writeCompressedBytes(out);              // 'value' data
+    }
+    
+  } // RecordCompressionWriter
+
+  /** Write compressed key/value blocks to a sequence-format file. */
+  static class BlockCompressWriter extends Writer {
+    
+    private int noBufferedRecords = 0;
+    
+    private DataOutputBuffer keyLenBuffer = new DataOutputBuffer();
+    private DataOutputBuffer keyBuffer = new DataOutputBuffer();
+
+    private DataOutputBuffer valLenBuffer = new DataOutputBuffer();
+    private DataOutputBuffer valBuffer = new DataOutputBuffer();
+
+    private final int compressionBlockSize;
+    
+    BlockCompressWriter(Configuration conf,
+                        Option... options) throws IOException {
+      super(conf, options);
+      compressionBlockSize = 
+        conf.getInt("io.seqfile.compress.blocksize", 1000000);
+      keySerializer.close();
+      keySerializer.open(keyBuffer);
+      uncompressedValSerializer.close();
+      uncompressedValSerializer.open(valBuffer);
+    }
+
+    /** Workhorse to check and write out compressed data/lengths */
+    private synchronized 
+      void writeBuffer(DataOutputBuffer uncompressedDataBuffer) 
+      throws IOException {
+      deflateFilter.resetState();
+      buffer.reset();
+      deflateOut.write(uncompressedDataBuffer.getData(), 0, 
+                       uncompressedDataBuffer.getLength());
+      deflateOut.flush();
+      deflateFilter.finish();
+      
+      WritableUtils.writeVInt(out, buffer.getLength());
+      out.write(buffer.getData(), 0, buffer.getLength());
+    }
+    
+    /** Compress and flush contents to dfs */
+    @Override
+    public synchronized void sync() throws IOException {
+      if (noBufferedRecords > 0) {
+        super.sync();
+        
+        // No. of records
+        WritableUtils.writeVInt(out, noBufferedRecords);
+        
+        // Write 'keys' and lengths
+        writeBuffer(keyLenBuffer);
+        writeBuffer(keyBuffer);
+        
+        // Write 'values' and lengths
+        writeBuffer(valLenBuffer);
+        writeBuffer(valBuffer);
+        
+        // Flush the file-stream
+        out.flush();
+        
+        // Reset internal states
+        keyLenBuffer.reset();
+        keyBuffer.reset();
+        valLenBuffer.reset();
+        valBuffer.reset();
+        noBufferedRecords = 0;
+      }
+      
+    }
+    
+    /** Close the file. */
+    @Override
+    public synchronized void close() throws IOException {
+      if (out != null) {
+        sync();
+      }
+      super.close();
+    }
+
+    /** Append a key/value pair. */
+    @Override
+    @SuppressWarnings("unchecked")
+    public synchronized void append(Object key, Object val)
+      throws IOException {
+      if (key.getClass() != keyClass)
+        throw new IOException("wrong key class: "+key+" is not "+keyClass);
+      if (val.getClass() != valClass)
+        throw new IOException("wrong value class: "+val+" is not "+valClass);
+
+      // Save key/value into respective buffers 
+      int oldKeyLength = keyBuffer.getLength();
+      keySerializer.serialize(key);
+      int keyLength = keyBuffer.getLength() - oldKeyLength;
+      if (keyLength < 0)
+        throw new IOException("negative length keys not allowed: " + key);
+      WritableUtils.writeVInt(keyLenBuffer, keyLength);
+
+      int oldValLength = valBuffer.getLength();
+      uncompressedValSerializer.serialize(val);
+      int valLength = valBuffer.getLength() - oldValLength;
+      WritableUtils.writeVInt(valLenBuffer, valLength);
+      
+      // Added another key/value pair
+      ++noBufferedRecords;
+      
+      // Compress and flush?
+      int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength();
+      if (currentBlockSize >= compressionBlockSize) {
+        sync();
+      }
+    }
+    
+    /** Append a key/value pair. */
+    @Override
+    public synchronized void appendRaw(byte[] keyData, int keyOffset,
+        int keyLength, ValueBytes val) throws IOException {
+      
+      if (keyLength < 0)
+        throw new IOException("negative length keys not allowed");
+
+      int valLength = val.getSize();
+      
+      // Save key/value data in relevant buffers
+      WritableUtils.writeVInt(keyLenBuffer, keyLength);
+      keyBuffer.write(keyData, keyOffset, keyLength);
+      WritableUtils.writeVInt(valLenBuffer, valLength);
+      val.writeUncompressedBytes(valBuffer);
+
+      // Added another key/value pair
+      ++noBufferedRecords;
+
+      // Compress and flush?
+      int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength(); 
+      if (currentBlockSize >= compressionBlockSize) {
+        sync();
+      }
+    }
+  
+  } // BlockCompressionWriter
+
+  /** Get the configured buffer size */
+  private static int getBufferSize(Configuration conf) {
+    return conf.getInt("io.file.buffer.size", 4096);
+  }
+
+  /** Reads key/value pairs from a sequence-format file. */
+  public static class Reader implements java.io.Closeable {
+    private String filename;
+    private FSDataInputStream in;
+    private DataOutputBuffer outBuf = new DataOutputBuffer();
+
+    private byte version;
+
+    private String keyClassName;
+    private String valClassName;
+    private Class keyClass;
+    private Class valClass;
+
+    private CompressionCodec codec = null;
+    private Metadata metadata = null;
+    
+    private byte[] sync = new byte[SYNC_HASH_SIZE];
+    private byte[] syncCheck = new byte[SYNC_HASH_SIZE];
+    private boolean syncSeen;
+
+    private long headerEnd;
+    private long end;
+    private int keyLength;
+    private int recordLength;
+
+    private boolean decompress;
+    private boolean blockCompressed;
+    
+    private Configuration conf;
+
+    private int noBufferedRecords = 0;
+    private boolean lazyDecompress = true;
+    private boolean valuesDecompressed = true;
+    
+    private int noBufferedKeys = 0;
+    private int noBufferedValues = 0;
+    
+    private DataInputBuffer keyLenBuffer = null;
+    private CompressionInputStream keyLenInFilter = null;
+    private DataInputStream keyLenIn = null;
+    private Decompressor keyLenDecompressor = null;
+    private DataInputBuffer keyBuffer = null;
+    private CompressionInputStream keyInFilter = null;
+    private DataInputStream keyIn = null;
+    private Decompressor keyDecompressor = null;
+
+    private DataInputBuffer valLenBuffer = null;
+    private CompressionInputStream valLenInFilter = null;
+    private DataInputStream valLenIn = null;
+    private Decompressor valLenDecompressor = null;
+    private DataInputBuffer valBuffer = null;
+    private CompressionInputStream valInFilter = null;
+    private DataInputStream valIn = null;
+    private Decompressor valDecompressor = null;
+    
+    private Deserializer keyDeserializer;
+    private Deserializer valDeserializer;
+
+    /**
+     * A tag interface for all of the Reader options
+     */
+    public static interface Option {}
+    
+    /**
+     * Create an option to specify the path name of the sequence file.
+     * @param value the path to read
+     * @return a new option
+     */
+    public static Option file(Path value) {
+      return new FileOption(value);
+    }
+    
+    /**
+     * Create an option to specify the stream with the sequence file.
+     * @param value the stream to read.
+     * @return a new option
+     */
+    public static Option stream(FSDataInputStream value) {
+      return new InputStreamOption(value);
+    }
+    
+    /**
+     * Create an option to specify the starting byte to read.
+     * @param value the number of bytes to skip over
+     * @return a new option
+     */
+    public static Option start(long value) {
+      return new StartOption(value);
+    }
+    
+    /**
+     * Create an option to specify the number of bytes to read.
+     * @param value the number of bytes to read
+     * @return a new option
+     */
+    public static Option length(long value) {
+      return new LengthOption(value);
+    }
+    
+    /**
+     * Create an option with the buffer size for reading the given pathname.
+     * @param value the number of bytes to buffer
+     * @return a new option
+     */
+    public static Option bufferSize(int value) {
+      return new BufferSizeOption(value);
+    }
+
+    private static class FileOption extends Options.PathOption 
+                                    implements Option {
+      private FileOption(Path value) {
+        super(value);
+      }
+    }
+    
+    private static class InputStreamOption
+        extends Options.FSDataInputStreamOption 
+        implements Option {
+      private InputStreamOption(FSDataInputStream value) {
+        super(value);
+      }
+    }
+
+    private static class StartOption extends Options.LongOption
+                                     implements Option {
+      private StartOption(long value) {
+        super(value);
+      }
+    }
+
+    private static class LengthOption extends Options.LongOption
+                                      implements Option {
+      private LengthOption(long value) {
+        super(value);
+      }
+    }
+
+    private static class BufferSizeOption extends Options.IntegerOption
+                                      implements Option {
+      private BufferSizeOption(int value) {
+        super(value);
+      }
+    }
+
+    // only used directly
+    private static class OnlyHeaderOption extends Options.BooleanOption 
+                                          implements Option {
+      private OnlyHeaderOption() {
+        super(true);
+      }
+    }
+
+    public Reader(Configuration conf, Option... opts) throws IOException {
+      // Look up the options, these are null if not set
+      FileOption fileOpt = Options.getOption(FileOption.class, opts);
+      InputStreamOption streamOpt = 
+        Options.getOption(InputStreamOption.class, opts);
+      StartOption startOpt = Options.getOption(StartOption.class, opts);
+      LengthOption lenOpt = Options.getOption(LengthOption.class, opts);
+      BufferSizeOption bufOpt = Options.getOption(BufferSizeOption.class,opts);
+      OnlyHeaderOption headerOnly = 
+        Options.getOption(OnlyHeaderOption.class, opts);
+      // check for consistency
+      if ((fileOpt == null) == (streamOpt == null)) {
+        throw new 
+          IllegalArgumentException("File or stream option must be specified");
+      }
+      if (fileOpt == null && bufOpt != null) {
+        throw new IllegalArgumentException("buffer size can only be set when" +
+                                           " a file is specified.");
+      }
+      // figure out the real values
+      Path filename = null;
+      FSDataInputStream file;
+      final long len;
+      if (fileOpt != null) {
+        filename = fileOpt.getValue();
+        FileSystem fs = filename.getFileSystem(conf);
+        int bufSize = bufOpt == null ? getBufferSize(conf): bufOpt.getValue();
+        len = null == lenOpt
+          ? fs.getFileStatus(filename).getLen()
+          : lenOpt.getValue();
+        file = openFile(fs, filename, bufSize, len);
+      } else {
+        len = null == lenOpt ? Long.MAX_VALUE : lenOpt.getValue();
+        file = streamOpt.getValue();
+      }
+      long start = startOpt == null ? 0 : startOpt.getValue();
+      // really set up
+      initialize(filename, file, start, len, conf, headerOnly != null);
+    }
+
+    /**
+     * Construct a reader by opening a file from the given file system.
+     * @param fs The file system used to open the file.
+     * @param file The file being read.
+     * @param conf Configuration
+     * @throws IOException
+     * @deprecated Use Reader(Configuration, Option...) instead.
+     */
+    @Deprecated
+    public Reader(FileSystem fs, Path file, 
+                  Configuration conf) throws IOException {
+      this(conf, file(file.makeQualified(fs)));
+    }
+
+    /**
+     * Construct a reader by the given input stream.
+     * @param in An input stream.
+     * @param buffersize unused
+     * @param start The starting position.
+     * @param length The length being read.
+     * @param conf Configuration
+     * @throws IOException
+     * @deprecated Use Reader(Configuration, Reader.Option...) instead.
+     */
+    @Deprecated
+    public Reader(FSDataInputStream in, int buffersize,
+        long start, long length, Configuration conf) throws IOException {
+      this(conf, stream(in), start(start), length(length));
+    }
+
+    /** Common work of the constructors. */
+    private void initialize(Path filename, FSDataInputStream in,
+                            long start, long length, Configuration conf,
+                            boolean tempReader) throws IOException {
+      if (in == null) {
+        throw new IllegalArgumentException("in == null");
+      }
+      this.filename = filename == null ? "<unknown>" : filename.toString();
+      this.in = in;
+      this.conf = conf;
+      boolean succeeded = false;
+      try {
+        seek(start);
+        this.end = this.in.getPos() + length;
+        // if it wrapped around, use the max
+        if (end < length) {
+          end = Long.MAX_VALUE;
+        }
+        init(tempReader);
+        succeeded = true;
+      } finally {
+        if (!succeeded) {
+          IOUtils.cleanup(LOG, this.in);
+        }
+      }
+    }
+
+    /**
+     * Override this method to specialize the type of
+     * {@link FSDataInputStream} returned.
+     * @param fs The file system used to open the file.
+     * @param file The file being read.
+     * @param bufferSize The buffer size used to read the file.
+     * @param length The length being read if it is >= 0.  Otherwise,
+     *               the length is not available.
+     * @return The opened stream.
+     * @throws IOException
+     */
+    protected FSDataInputStream openFile(FileSystem fs, Path file,
+        int bufferSize, long length) throws IOException {
+      return fs.open(file, bufferSize);
+    }
+    
+    /**
+     * Initialize the {@link Reader}
+     * @param tempReader <code>true</code> if we are constructing a temporary
+     *                  reader {@link SequenceFile.Sorter#cloneFileAttributes}, 
+     *                  and hence do not initialize every component; 
+     *                  <code>false</code> otherwise.
+     * @throws IOException
+     */
+    private void init(boolean tempReader) throws IOException {
+      byte[] versionBlock = new byte[VERSION.length];
+      in.readFully(versionBlock);
+
+      if ((versionBlock[0] != VERSION[0]) ||
+          (versionBlock[1] != VERSION[1]) ||
+          (versionBlock[2] != VERSION[2]))
+        throw new IOException(this + " not a SequenceFile");
+
+      // Set 'version'
+      version = versionBlock[3];
+      if (version > VERSION[3])
+        throw new VersionMismatchException(VERSION[3], version);
+
+      if (version < BLOCK_COMPRESS_VERSION) {
+        UTF8 className = new UTF8();
+
+        className.readFields(in);
+        keyClassName = className.toString(); // key class name
+
+        className.readFields(in);
+        valClassName = className.toString(); // val class name
+      } else {
+        keyClassName = Text.readString(in);
+        valClassName = Text.readString(in);
+      }
+
+      if (version > 2) {                          // if version > 2
+        this.decompress = in.readBoolean();       // is compressed?
+      } else {
+        decompress = false;
+      }
+
+      if (version >= BLOCK_COMPRESS_VERSION) {    // if version >= 4
+        this.blockCompressed = in.readBoolean();  // is block-compressed?
+      } else {
+        blockCompressed = false;
+      }
+      
+      // if version >= 5
+      // setup the compression codec
+      if (decompress) {
+        if (version >= CUSTOM_COMPRESS_VERSION) {
+          String codecClassname = Text.readString(in);
+          try {
+            Class<? extends CompressionCodec> codecClass
+              = conf.getClassByName(codecClassname).asSubclass(CompressionCodec.class);
+            this.codec = ReflectionUtils.newInstance(codecClass, conf);
+          } catch (ClassNotFoundException cnfe) {
+            throw new IllegalArgumentException("Unknown codec: " + 
+                                               codecClassname, cnfe);
+          }
+        } else {
+          codec = new DefaultCodec();
+          ((Configurable)codec).setConf(conf);
+        }
+      }
+      
+      this.metadata = new Metadata();
+      if (version >= VERSION_WITH_METADATA) {    // if version >= 6
+        this.metadata.readFields(in);
+      }
+      
+      if (version > 1) {                          // if version > 1
+        in.readFully(sync);                       // read sync bytes
+        headerEnd = in.getPos();                  // record end of header
+      }
+      
+      // Initialize... *not* if this we are constructing a temporary Reader
+      if (!tempReader) {
+        valBuffer = new DataInputBuffer();
+        if (decompress) {
+          valDecompressor = CodecPool.getDecompressor(codec);
+          valInFilter = codec.createInputStream(valBuffer, valDecompressor);
+          valIn = new DataInputStream(valInFilter);
+        } else {
+          valIn = valBuffer;
+        }
+
+        if (blockCompressed) {
+          keyLenBuffer = new DataInputBuffer();
+          keyBuffer = new DataInputBuffer();
+          valLenBuffer = new DataInputBuffer();
+
+          keyLenDecompressor = CodecPool.getDecompressor(codec);
+          keyLenInFilter = codec.createInputStream(keyLenBuffer, 
+                                                   keyLenDecompressor);
+          keyLenIn = new DataInputStream(keyLenInFilter);
+
+          keyDecompressor = CodecPool.getDecompressor(codec);
+          keyInFilter = codec.createInputStream(keyBuffer, keyDecompressor);
+          keyIn = new DataInputStream(keyInFilter);
+
+          valLenDecompressor = CodecPool.getDecompressor(codec);
+          valLenInFilter = codec.createInputStream(valLenBuffer, 
+                                                   valLenDecompressor);
+          valLenIn = new DataInputStream(valLenInFilter);
+        }
+        
+        SerializationFactory serializationFactory =
+          new SerializationFactory(conf);
+        this.keyDeserializer =
+          getDeserializer(serializationFactory, getKeyClass());
+        if (this.keyDeserializer == null) {
+          throw new IOException(
+              "Could not find a deserializer for the Key class: '"
+                  + getKeyClass().getCanonicalName() + "'. "
+                  + "Please ensure that the configuration '" +
+                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                  + "properly configured, if you're using "
+                  + "custom serialization.");
+        }
+        if (!blockCompressed) {
+          this.keyDeserializer.open(valBuffer);
+        } else {
+          this.keyDeserializer.open(keyIn);
+        }
+        this.valDeserializer =
+          getDeserializer(serializationFactory, getValueClass());
+        if (this.valDeserializer == null) {
+          throw new IOException(
+              "Could not find a deserializer for the Value class: '"
+                  + getValueClass().getCanonicalName() + "'. "
+                  + "Please ensure that the configuration '" +
+                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                  + "properly configured, if you're using "
+                  + "custom serialization.");
+        }
+        this.valDeserializer.open(valIn);
+      }
+    }
+    
+    @SuppressWarnings("unchecked")
+    private Deserializer getDeserializer(SerializationFactory sf, Class c) {
+      return sf.getDeserializer(c);
+    }
+    
+    /** Close the file. */
+    @Override
+    public synchronized void close() throws IOException {
+      // Return the decompressors to the pool
+      CodecPool.returnDecompressor(keyLenDecompressor);
+      CodecPool.returnDecompressor(keyDecompressor);
+      CodecPool.returnDecompressor(valLenDecompressor);
+      CodecPool.returnDecompressor(valDecompressor);
+      keyLenDecompressor = keyDecompressor = null;
+      valLenDecompressor = valDecompressor = null;
+      
+      if (keyDeserializer != null) {
+    	keyDeserializer.close();
+      }
+      if (valDeserializer != null) {
+        valDeserializer.close();
+      }
+      
+      // Close the input-stream
+      in.close();
+    }
+
+    /** Returns the name of the key class. */
+    public String getKeyClassName() {
+      return keyClassName;
+    }
+
+    /** Returns the class of keys in this file. */
+    public synchronized Class<?> getKeyClass() {
+      if (null == keyClass) {
+        try {
+          keyClass = WritableName.getClass(getKeyClassName(), conf);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+      return keyClass;
+    }
+
+    /** Returns the name of the value class. */
+    public String getValueClassName() {
+      return valClassName;
+    }
+
+    /** Returns the class of values in this file. */
+    public synchronized Class<?> getValueClass() {
+      if (null == valClass) {
+        try {
+          valClass = WritableName.getClass(getValueClassName(), conf);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+      return valClass;
+    }
+
+    /** Returns true if values are compressed. */
+    public boolean isCompressed() { return decompress; }
+    
+    /** Returns true if records are block-compressed. */
+    public boolean isBlockCompressed() { return blockCompressed; }
+    
+    /** Returns the compression codec of data in this file. */
+    public CompressionCodec getCompressionCodec() { return codec; }
+    
+    /**
+     * Get the compression type for this file.
+     * @return the compression type
+     */
+    public CompressionType getCompressionType() {
+      if (decompress) {
+        return blockCompressed ? CompressionType.BLOCK : CompressionType.RECORD;
+      } else {
+        return CompressionType.NONE;
+      }
+    }
+
+    /** Returns the metadata object of the file */
+    public Metadata getMetadata() {
+      return this.metadata;
+    }
+    
+    /** Returns the configuration used for this file. */
+    Configuration getConf() { return conf; }
+    
+    /** Read a compressed buffer */
+    private synchronized void readBuffer(DataInputBuffer buffer, 
+                                         CompressionInputStream filter) throws IOException {
+      // Read data into a temporary buffer
+      DataOutputBuffer dataBuffer = new DataOutputBuffer();
+
+      try {
+        int dataBufferLength = WritableUtils.readVInt(in);
+        dataBuffer.write(in, dataBufferLength);
+      
+        // Set up 'buffer' connected to the input-stream
+        buffer.reset(dataBuffer.getData(), 0, dataBuffer.getLength());
+      } finally {
+        dataBuffer.close();
+      }
+
+      // Reset the codec
+      filter.resetState();
+    }
+    
+    /** Read the next 'compressed' block */
+    private synchronized void readBlock() throws IOException {
+      // Check if we need to throw away a whole block of 
+      // 'values' due to 'lazy decompression' 
+      if (lazyDecompress && !valuesDecompressed) {
+        in.seek(WritableUtils.readVInt(in)+in.getPos());
+        in.seek(WritableUtils.readVInt(in)+in.getPos());
+      }
+      
+      // Reset internal states
+      noBufferedKeys = 0; noBufferedValues = 0; noBufferedRecords = 0;
+      valuesDecompressed = false;
+
+      //Process sync
+      if (sync != null) {
+        in.readInt();
+        in.readFully(syncCheck);                // read syncCheck
+        if (!Arrays.equals(sync, syncCheck))    // check it
+          throw new IOException("File is corrupt!");
+      }
+      syncSeen = true;
+
+      // Read number of records in this block
+      noBufferedRecords = WritableUtils.readVInt(in);
+      
+      // Read key lengths and keys
+      readBuffer(keyLenBuffer, keyLenInFilter);
+      readBuffer(keyBuffer, keyInFilter);
+      noBufferedKeys = noBufferedRecords;
+      
+      // Read value lengths and values
+      if (!lazyDecompress) {
+        readBuffer(valLenBuffer, valLenInFilter);
+        readBuffer(valBuffer, valInFilter);
+        noBufferedValues = noBufferedRecords;
+        valuesDecompressed = true;
+      }
+    }
+
+    /** 
+     * Position valLenIn/valIn to the 'value' 
+     * corresponding to the 'current' key 
+     */
+    private synchronized void seekToCurrentValue() throws IOException {
+      if (!blockCompressed) {
+        if (decompress) {
+          valInFilter.resetState();
+        }
+        valBuffer.reset();
+      } else {
+        // Check if this is the first value in the 'block' to be read
+        if (lazyDecompress && !valuesDecompressed) {
+          // Read the value lengths and values
+          readBuffer(valLenBuffer, valLenInFilter);
+          readBuffer(valBuffer, valInFilter);
+          noBufferedValues = noBufferedRecords;
+          valuesDecompressed = true;
+        }
+        
+        // Calculate the no. of bytes to skip
+        // Note: 'current' key has already been read!
+        int skipValBytes = 0;
+        int currentKey = noBufferedKeys + 1;          
+        for (int i=noBufferedValues; i > currentKey; --i) {
+          skipValBytes += WritableUtils.readVInt(valLenIn);
+          --noBufferedValues;
+        }
+        
+        // Skip to the 'val' corresponding to 'current' key
+        if (skipValBytes > 0) {
+          if (valIn.skipBytes(skipValBytes) != skipValBytes) {
+            throw new IOException("Failed to seek to " + currentKey + 
+                                  "(th) value!");
+          }
+        }
+      }
+    }
+
+    /**
+     * Get the 'value' corresponding to the last read 'key'.
+     * @param val : The 'value' to be read.
+     * @throws IOException
+     */
+    public synchronized void getCurrentValue(Writable val) 
+      throws IOException {
+      if (val instanceof Configurable) {
+        ((Configurable) val).setConf(this.conf);
+      }
+
+      // Position stream to 'current' value
+      seekToCurrentValue();
+
+      if (!blockCompressed) {
+        val.readFields(valIn);
+        
+        if (valIn.read() > 0) {
+          LOG.info("available bytes: " + valIn.available());
+          throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
+                                + " bytes, should read " +
+                                (valBuffer.getLength()-keyLength));
+        }
+      } else {
+        // Get the value
+        int valLength = WritableUtils.readVInt(valLenIn);
+        val.readFields(valIn);
+        
+        // Read another compressed 'value'
+        --noBufferedValues;
+        
+        // Sanity check
+        if ((valLength < 0) && LOG.isDebugEnabled()) {
+          LOG.debug(val + " is a zero-length value");
+        }
+      }
+
+    }
+    
+    /**
+     * Get the 'value' corresponding to the last read 'key'.
+     * @param val : The 'value' to be read.
+     * @throws IOException
+     */
+    public synchronized Object getCurrentValue(Object val) 
+      throws IOException {
+      if (val instanceof Configurable) {
+        ((Configurable) val).setConf(this.conf);
+      }
+
+      // Position stream to 'current' value
+      seekToCurrentValue();
+
+      if (!blockCompressed) {
+        val = deserializeValue(val);
+        
+        if (valIn.read() > 0) {
+          LOG.info("available bytes: " + valIn.available());
+          throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
+                                + " bytes, should read " +
+                                (valBuffer.getLength()-keyLength));
+        }
+      } else {
+        // Get the value
+        int valLength = WritableUtils.readVInt(valLenIn);
+        val = deserializeValue(val);
+        
+        // Read another compressed 'value'
+        --noBufferedValues;
+        
+        // Sanity check
+        if ((valLength < 0) && LOG.isDebugEnabled()) {
+          LOG.debug(val + " is a zero-length value");
+        }
+      }
+      return val;
+
+    }
+
+    @SuppressWarnings("unchecked")
+    private Object deserializeValue(Object val) throws IOException {
+      return valDeserializer.deserialize(val);
+    }
+    
+    /** Read the next key in the file into <code>key</code>, skipping its
+     * value.  True if another entry exists, and false at end of file. */
+    public synchronized boolean next(Writable key) throws IOException {
+      if (key.getClass() != getKeyClass())
+        throw new IOException("wrong key class: "+key.getClass().getName()
+                              +" is not "+keyClass);
+
+      if (!blockCompressed) {
+        outBuf.reset();
+        
+        keyLength = next(outBuf);
+        if (keyLength < 0)
+          return false;
+        
+        valBuffer.reset(outBuf.getData(), outBuf.getLength());
+        
+        key.readFields(valBuffer);
+        valBuffer.mark(0);
+        if (valBuffer.getPosition() != keyLength)
+          throw new IOException(key + " read " + valBuffer.getPosition()
+                                + " bytes, should read " + keyLength);
+      } else {
+        //Reset syncSeen
+        syncSeen = false;
+        
+        if (noBufferedKeys == 0) {
+          try {
+            readBlock();
+          } catch (EOFException eof) {
+            return false;
+          }
+        }
+        
+        int keyLength = WritableUtils.readVInt(keyLenIn);
+        
+        // Sanity check
+        if (keyLength < 0) {
+          return false;
+        }
+        
+        //Read another compressed 'key'
+        key.readFields(keyIn);
+        --noBufferedKeys;
+      }
+
+      return true;
+    }
+
+    /** Read the next key/value pair in the file into <code>key</code> and
+     * <code>val</code>.  Returns true if such a pair exists and false when at
+     * end of file */
+    public synchronized boolean next(Writable key, Writable val)
+      throws IOException {
+      if (val.getClass() != getValueClass())
+        throw new IOException("wrong value class: "+val+" is not "+valClass);
+
+      boolean more = next(key);
+      
+      if (more) {
+        getCurrentValue(val);
+      }
+
+      return more;
+    }
+    
+    /**
+     * Read and return the next record length, potentially skipping over 
+     * a sync block.
+     * @return the length of the next record or -1 if there is no next record
+     * @throws IOException
+     */
+    private synchronized int readRecordLength() throws IOException {
+      if (in.getPos() >= end) {
+        return -1;
+      }      
+      int length = in.readInt();
+      if (version > 1 && sync != null &&
+          length == SYNC_ESCAPE) {              // process a sync entry
+        in.readFully(syncCheck);                // read syncCheck
+        if (!Arrays.equals(sync, syncCheck))    // check it
+          throw new IOException("File is corrupt!");
+        syncSeen = true;
+        if (in.getPos() >= end) {
+          return -1;
+        }
+        length = in.readInt();                  // re-read length
+      } else {
+        syncSeen = false;
+      }
+      
+      return length;
+    }
+    
+    /** Read the next key/value pair in the file into <code>buffer</code>.
+     * Returns the length of the key read, or -1 if at end of file.  The length
+     * of the value may be computed by calling buffer.getLength() before and
+     * after calls to this method. */
+    /** @deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}. */
+    @Deprecated
+    synchronized int next(DataOutputBuffer buffer) throws IOException {
+      // Unsupported for block-compressed sequence files
+      if (blockCompressed) {
+        throw new IOException("Unsupported call for block-compressed" +
+                              " SequenceFiles - use SequenceFile.Reader.next(DataOutputStream, ValueBytes)");
+      }
+      try {
+        int length = readRecordLength();
+        if (length == -1) {
+          return -1;
+        }
+        int keyLength = in.readInt();
+        buffer.write(in, length);
+        return keyLength;
+      } catch (ChecksumException e) {             // checksum failure
+        handleChecksumException(e);
+        return next(buffer);
+      }
+    }
+
+    public ValueBytes createValueBytes() {
+      ValueBytes val = null;
+      if (!decompress || blockCompressed) {
+        val = new UncompressedBytes();
+      } else {
+        val = new CompressedBytes(codec);
+      }
+      return val;
+    }
+
+    /**
+     * Read 'raw' records.
+     * @param key - The buffer into which the key is read
+     * @param val - The 'raw' value
+     * @return Returns the total record length or -1 for end of file
+     * @throws IOException
+     */
+    public synchronized int nextRaw(DataOutputBuffer key, ValueBytes val) 
+      throws IOException {
+      if (!blockCompressed) {
+        int length = readRecordLength();
+        if (length == -1) {
+          return -1;
+        }
+        int keyLength = in.readInt();
+        int valLength = length

<TRUNCATED>


[25/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
GEODE-10: Reinstating HDFS persistence code

The HDFS persistence related code was removed from develop in several
pieces, first removing the API and then the underling internals.

This change reverts those commits and adds back all of the HDFS code on
this branch.

Revert "GEODE-1072: Removing HDFS related code"
This reverts commit 46535f28e4740ed9b6da87bbb27c39d0c13b3da4.
Revert "GEODE-429: Remove api for setting HdfsStore in Attributes"
This reverts commit 07d55bda1c1c9d641ca16b3b6804994ecb53bf9d.
Revert "GEODE-429: Remove HDFS persistence DataPolicy"
This reverts commit 1b4fd2fe872af1520027b8e0a84ffe84b9613f27.
Revert "GEODE-429: Remove HdfsStore parser in cache xml"
This reverts commit 12318e9cf862795e46540fdf72836fd8cbba262d.
Revert "GEODE-429: Remove hdfsStore gfsh commands"
This reverts commit 7f251978c9730c403534a62fb385e922eecc8e5b.
Revert "GEODE-429: Remove test category HoplogTests"
This reverts commit 8fb5edd349ac388fec2d5f665119f26244343703.
Revert "GEODE-429: Remove Cache.createHdfsStoreFactory method"
This reverts commit f2390a1ada2acbcabac28dd4226a67f7baf924ae.
Revert "GEODE-429: Remove HdfsStore Junit and Dunits"
This reverts commit 74c3156aaa0d29ccc4ec0b4c9a53659d2c9eb003.
Revert "GEODE-429: Remove RegionFactory.setHdfsStore"
This reverts commit 7bcc1e44cb7f0f69381c06d583b058926ca85331.
Revert "GEODE-429: Remove HDFS RegionShortcuts"
This reverts commit b3f838ea6a0b0eb150dcb92b7f6e46e5ee9db1e4.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9f3f10fd
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9f3f10fd
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9f3f10fd

Branch: refs/heads/feature/GEODE-10
Commit: 9f3f10fd2c2fc3d80d67f23e126480e853055b8b
Parents: 46535f2
Author: Dan Smith <up...@apache.org>
Authored: Wed Apr 27 11:30:36 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Apr 27 13:47:20 2016 -0700

----------------------------------------------------------------------
 geode-core/build.gradle                         |   22 +
 .../gemfire/cache/AttributesFactory.java        |   89 +
 .../gemfire/cache/AttributesMutator.java        |   14 +
 .../java/com/gemstone/gemfire/cache/Cache.java  |    2 +
 .../gemfire/cache/CustomEvictionAttributes.java |   78 +
 .../com/gemstone/gemfire/cache/DataPolicy.java  |   26 +-
 .../gemfire/cache/EvictionCriteria.java         |   57 +
 .../gemstone/gemfire/cache/GemFireCache.java    |   18 +
 .../com/gemstone/gemfire/cache/Operation.java   |   13 +
 .../gemfire/cache/RegionAttributes.java         |   23 +
 .../gemstone/gemfire/cache/RegionFactory.java   |   49 +
 .../gemstone/gemfire/cache/RegionShortcut.java  |   50 +
 .../internal/AsyncEventQueueFactoryImpl.java    |    5 +
 .../gemfire/cache/hdfs/HDFSIOException.java     |   52 +
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  |  341 ++
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |  203 +
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    |  196 +
 .../cache/hdfs/StoreExistsException.java        |   32 +
 .../cache/hdfs/internal/FailureTracker.java     |   96 +
 .../cache/hdfs/internal/FlushObserver.java      |   53 +
 .../hdfs/internal/HDFSBucketRegionQueue.java    | 1232 ++++++
 .../cache/hdfs/internal/HDFSEntriesSet.java     |  329 ++
 .../cache/hdfs/internal/HDFSEventListener.java  |  179 +
 .../hdfs/internal/HDFSEventQueueFilter.java     |   73 +
 .../hdfs/internal/HDFSGatewayEventImpl.java     |  180 +
 .../hdfs/internal/HDFSIntegrationUtil.java      |  117 +
 .../HDFSParallelGatewaySenderQueue.java         |  471 +++
 .../hdfs/internal/HDFSStoreConfigHolder.java    |  559 +++
 .../cache/hdfs/internal/HDFSStoreCreation.java  |  198 +
 .../hdfs/internal/HDFSStoreFactoryImpl.java     |   77 +
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  638 +++
 .../hdfs/internal/HDFSStoreMutatorImpl.java     |  200 +
 .../HDFSWriteOnlyStoreEventListener.java        |  184 +
 .../hdfs/internal/HoplogListenerForRegion.java  |   72 +
 .../cache/hdfs/internal/PersistedEventImpl.java |  202 +
 .../hdfs/internal/QueuedPersistentEvent.java    |   27 +
 .../hdfs/internal/SignalledFlushObserver.java   |  122 +
 .../internal/SortedHDFSQueuePersistedEvent.java |   86 +
 .../internal/SortedHoplogPersistedEvent.java    |  114 +
 .../UnsortedHDFSQueuePersistedEvent.java        |   76 +
 .../internal/UnsortedHoplogPersistedEvent.java  |   92 +
 .../hdfs/internal/hoplog/AbstractHoplog.java    |  357 ++
 .../hoplog/AbstractHoplogOrganizer.java         |  430 ++
 .../cache/hdfs/internal/hoplog/BloomFilter.java |   36 +
 .../hoplog/CloseTmpHoplogsTimerTask.java        |  108 +
 .../hdfs/internal/hoplog/CompactionStatus.java  |   72 +
 .../cache/hdfs/internal/hoplog/FlushStatus.java |   72 +
 .../internal/hoplog/HDFSCompactionManager.java  |  330 ++
 .../internal/hoplog/HDFSFlushQueueArgs.java     |   93 +
 .../internal/hoplog/HDFSFlushQueueFunction.java |  287 ++
 .../hoplog/HDFSForceCompactionArgs.java         |  107 +
 .../hoplog/HDFSForceCompactionFunction.java     |  129 +
 .../HDFSForceCompactionResultCollector.java     |  131 +
 .../hoplog/HDFSLastCompactionTimeFunction.java  |   56 +
 .../internal/hoplog/HDFSRegionDirector.java     |  480 +++
 .../hdfs/internal/hoplog/HDFSStoreDirector.java |   78 +
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |  447 +++
 .../hdfs/internal/hoplog/HFileSortedOplog.java  |  853 ++++
 .../hoplog/HdfsSortedOplogOrganizer.java        | 2004 ++++++++++
 .../cache/hdfs/internal/hoplog/Hoplog.java      |  263 ++
 .../hdfs/internal/hoplog/HoplogConfig.java      |   74 +
 .../hdfs/internal/hoplog/HoplogListener.java    |   47 +
 .../hdfs/internal/hoplog/HoplogOrganizer.java   |  123 +
 .../hdfs/internal/hoplog/HoplogSetIterator.java |  166 +
 .../hdfs/internal/hoplog/HoplogSetReader.java   |  114 +
 .../internal/hoplog/SequenceFileHoplog.java     |  395 ++
 .../hoplog/mapred/AbstractGFRecordReader.java   |  106 +
 .../internal/hoplog/mapred/GFInputFormat.java   |   95 +
 .../internal/hoplog/mapred/GFOutputFormat.java  |   75 +
 .../mapreduce/AbstractGFRecordReader.java       |  140 +
 .../hoplog/mapreduce/GFInputFormat.java         |  124 +
 .../hdfs/internal/hoplog/mapreduce/GFKey.java   |   72 +
 .../hoplog/mapreduce/GFOutputFormat.java        |  198 +
 .../hoplog/mapreduce/HDFSSplitIterator.java     |  197 +
 .../internal/hoplog/mapreduce/HoplogUtil.java   |  463 +++
 .../hoplog/mapreduce/RWSplitIterator.java       |   48 +
 .../hoplog/mapreduce/StreamSplitIterator.java   |   46 +
 .../org/apache/hadoop/io/SequenceFile.java      | 3726 ++++++++++++++++++
 .../gemfire/cache/wan/GatewaySender.java        |    2 +
 .../gemstone/gemfire/internal/DSFIDFactory.java |    3 +
 .../internal/DataSerializableFixedID.java       |    1 +
 .../admin/remote/RemoteRegionAttributes.java    |   25 +
 .../cache/AbstractBucketRegionQueue.java        |   18 +-
 .../gemfire/internal/cache/AbstractRegion.java  |  147 +
 .../internal/cache/AbstractRegionEntry.java     |   30 +-
 .../internal/cache/AbstractRegionMap.java       |   86 +-
 .../gemfire/internal/cache/BucketAdvisor.java   |    1 +
 .../gemfire/internal/cache/BucketRegion.java    |  209 +-
 .../internal/cache/BucketRegionQueue.java       |    2 +-
 .../cache/CacheDistributionAdvisor.java         |   22 +-
 .../gemfire/internal/cache/CachePerfStats.java  |   75 +
 .../internal/cache/ColocationHelper.java        |    3 +
 .../cache/CustomEvictionAttributesImpl.java     |   35 +
 .../gemfire/internal/cache/DistTXState.java     |    2 +-
 .../cache/DistributedCacheOperation.java        |    3 +
 .../cache/DistributedPutAllOperation.java       |   20 +-
 .../internal/cache/DistributedRegion.java       |   31 +-
 .../gemfire/internal/cache/EntryEventImpl.java  |   31 +
 .../gemfire/internal/cache/EvictorService.java  |  284 ++
 .../internal/cache/GemFireCacheImpl.java        |  147 +
 .../gemfire/internal/cache/HARegion.java        |   15 +-
 .../internal/cache/HDFSLRURegionMap.java        |  111 +
 .../gemfire/internal/cache/HDFSRegionMap.java   |   32 +
 .../internal/cache/HDFSRegionMapDelegate.java   |  540 +++
 .../internal/cache/HDFSRegionMapImpl.java       |   74 +
 .../gemfire/internal/cache/InternalCache.java   |    4 +
 .../internal/cache/InternalDataView.java        |   28 +-
 .../internal/cache/InternalRegionArguments.java |   16 +
 .../gemfire/internal/cache/LocalRegion.java     |  226 +-
 .../internal/cache/LocalRegionDataView.java     |   35 +-
 .../internal/cache/NonLocalRegionEntry.java     |   20 +
 .../gemstone/gemfire/internal/cache/Oplog.java  |   13 +
 .../internal/cache/PartitionedRegion.java       |  482 ++-
 .../cache/PartitionedRegionDataStore.java       |   49 +-
 .../cache/PartitionedRegionDataView.java        |   27 +-
 .../internal/cache/PartitionedRegionHelper.java |    2 +
 .../gemfire/internal/cache/ProxyRegionMap.java  |   21 +
 .../gemfire/internal/cache/RegionEntry.java     |   20 +
 .../internal/cache/RegionMapFactory.java        |    6 +
 .../internal/cache/RemoteGetMessage.java        |    2 +-
 .../gemfire/internal/cache/TXEntry.java         |    3 +-
 .../gemfire/internal/cache/TXState.java         |   38 +-
 .../internal/cache/TXStateInterface.java        |   10 +-
 .../internal/cache/TXStateProxyImpl.java        |   30 +-
 .../gemfire/internal/cache/TXStateStub.java     |   32 +-
 .../cache/UserSpecifiedRegionAttributes.java    |   24 +-
 .../internal/cache/ValidatingDiskRegion.java    |   13 +
 .../partitioned/FetchBulkEntriesMessage.java    |    2 +-
 .../internal/cache/partitioned/GetMessage.java  |   22 +-
 .../cache/partitioned/PutAllPRMessage.java      |   16 +-
 .../internal/cache/partitioned/PutMessage.java  |    9 +
 .../persistence/soplog/ByteComparator.java      |   55 +
 .../persistence/soplog/CursorIterator.java      |   81 +
 .../soplog/DelegatingSerializedComparator.java  |   37 +
 .../soplog/HFileStoreStatistics.java            |  205 +
 .../persistence/soplog/KeyValueIterator.java    |   42 +
 .../soplog/SortedOplogStatistics.java           |  505 +++
 .../cache/persistence/soplog/SortedReader.java  |  255 ++
 .../persistence/soplog/TrackedReference.java    |  153 +
 .../cache/tier/sockets/BaseCommand.java         |    8 +-
 .../cache/tier/sockets/command/Get70.java       |    3 +-
 .../cache/tier/sockets/command/Request.java     |    2 +-
 .../internal/cache/tx/ClientTXRegionStub.java   |    4 +-
 .../cache/tx/DistributedTXRegionStub.java       |   14 +-
 .../cache/tx/PartitionedTXRegionStub.java       |    8 +-
 .../gemfire/internal/cache/tx/TXRegionStub.java |    4 +-
 .../cache/wan/AbstractGatewaySender.java        |   22 +-
 .../cache/wan/GatewaySenderAttributes.java      |    5 +
 ...rentParallelGatewaySenderEventProcessor.java |    3 +
 .../ConcurrentParallelGatewaySenderQueue.java   |   12 +
 .../ParallelGatewaySenderEventProcessor.java    |   22 +-
 .../parallel/ParallelGatewaySenderQueue.java    |   20 +-
 .../cache/xmlcache/AsyncEventQueueCreation.java |    9 +
 .../internal/cache/xmlcache/CacheCreation.java  |   50 +-
 .../internal/cache/xmlcache/CacheXml.java       |   31 +
 .../cache/xmlcache/CacheXmlGenerator.java       |    4 +
 .../internal/cache/xmlcache/CacheXmlParser.java |  176 +
 .../xmlcache/RegionAttributesCreation.java      |   55 +-
 .../gemfire/internal/i18n/LocalizedStrings.java |   30 +
 .../gemfire/internal/redis/RegionProvider.java  |    2 +-
 .../management/DistributedRegionMXBean.java     |   11 +
 .../management/DistributedSystemMXBean.java     |    8 +
 .../gemfire/management/MemberMXBean.java        |    7 +
 .../gemfire/management/RegionMXBean.java        |   10 +
 .../gemfire/management/cli/ConverterHint.java   |    1 +
 .../internal/beans/DistributedRegionBridge.java |    5 +
 .../internal/beans/DistributedRegionMBean.java  |    5 +
 .../internal/beans/DistributedSystemBridge.java |   19 +
 .../internal/beans/DistributedSystemMBean.java  |    7 +
 .../internal/beans/HDFSRegionBridge.java        |  173 +
 .../management/internal/beans/MemberMBean.java  |    5 +
 .../internal/beans/MemberMBeanBridge.java       |   27 +
 .../internal/beans/PartitionedRegionBridge.java |   13 +-
 .../management/internal/beans/RegionMBean.java  |    5 +
 .../internal/beans/RegionMBeanBridge.java       |    5 +
 .../beans/stats/RegionClusterStatsMonitor.java  |    7 +
 .../CreateAlterDestroyRegionCommands.java       |   12 +-
 .../cli/commands/HDFSStoreCommands.java         |  695 ++++
 .../cli/converters/HdfsStoreNameConverter.java  |   88 +
 .../cli/domain/RegionAttributesInfo.java        |   21 +-
 .../cli/functions/AlterHDFSStoreFunction.java   |  228 ++
 .../cli/functions/CreateHDFSStoreFunction.java  |  124 +
 .../functions/DescribeHDFSStoreFunction.java    |   86 +
 .../cli/functions/DestroyHDFSStoreFunction.java |  100 +
 .../cli/functions/ListHDFSStoresFunction.java   |  102 +
 .../cli/functions/RegionCreateFunction.java     |    8 +
 .../cli/functions/RegionFunctionArgs.java       |   67 +-
 .../internal/cli/i18n/CliStrings.java           |  112 +
 .../cli/util/HDFSStoreNotFoundException.java    |   47 +
 .../cli/util/RegionAttributesNames.java         |    4 +-
 .../HDFSStoreCommandsController.java            |  229 ++
 .../controllers/ShellCommandsController.java    |   39 +-
 .../support/MemberMXBeanAdapter.java            |    5 +
 .../geode.apache.org/schema/cache/cache-1.0.xsd |   31 +
 .../ColocatedRegionWithHDFSDUnitTest.java       |  188 +
 .../hdfs/internal/HDFSConfigJUnitTest.java      |  520 +++
 .../hdfs/internal/HDFSEntriesSetJUnitTest.java  |  227 ++
 .../internal/HdfsStoreMutatorJUnitTest.java     |  191 +
 .../hdfs/internal/RegionRecoveryDUnitTest.java  |  415 ++
 .../internal/RegionWithHDFSBasicDUnitTest.java  | 1600 ++++++++
 .../RegionWithHDFSOffHeapBasicDUnitTest.java    |  115 +
 ...RegionWithHDFSPersistenceBasicDUnitTest.java |   77 +
 .../hdfs/internal/RegionWithHDFSTestBase.java   |  719 ++++
 .../SignalledFlushObserverJUnitTest.java        |   98 +
 .../SortedListForAsyncQueueJUnitTest.java       |  565 +++
 .../internal/hoplog/BaseHoplogTestCase.java     |  394 ++
 .../hoplog/CardinalityEstimatorJUnitTest.java   |  188 +
 .../hoplog/HDFSCacheLoaderJUnitTest.java        |  106 +
 .../hoplog/HDFSCompactionManagerJUnitTest.java  |  449 +++
 .../hoplog/HDFSRegionDirectorJUnitTest.java     |   97 +
 .../internal/hoplog/HDFSStatsJUnitTest.java     |  250 ++
 .../HDFSUnsortedHoplogOrganizerJUnitTest.java   |  297 ++
 .../HdfsSortedOplogOrganizerJUnitTest.java      | 1044 +++++
 .../hoplog/HfileSortedOplogJUnitTest.java       |  540 +++
 .../hoplog/SortedOplogListIterJUnitTest.java    |  178 +
 .../hoplog/TieredCompactionJUnitTest.java       |  904 +++++
 .../hoplog/mapreduce/GFKeyJUnitTest.java        |   50 +
 .../mapreduce/HDFSSplitIteratorJUnitTest.java   |  265 ++
 .../hoplog/mapreduce/HoplogUtilJUnitTest.java   |  305 ++
 .../gemfire/cache30/Bug38741DUnitTest.java      |    2 +-
 .../HDFSQueueRegionOperationsJUnitTest.java     |   33 +
 ...FSQueueRegionOperationsOffHeapJUnitTest.java |   54 +
 .../cache/HDFSRegionOperationsJUnitTest.java    |  542 +++
 .../HDFSRegionOperationsOffHeapJUnitTest.java   |   78 +
 .../ParallelGatewaySenderQueueJUnitTest.java    |    2 +-
 .../HDFSRegionMBeanAttributeJUnitTest.java      |  169 +
 .../commands/HDFSStoreCommandsJUnitTest.java    |  838 ++++
 .../AlterHDFSStoreFunctionJUnitTest.java        |  324 ++
 .../CreateHDFSStoreFunctionJUnitTest.java       |  307 ++
 .../DescribeHDFSStoreFunctionJUnitTest.java     |  364 ++
 .../DestroyHDFSStoreFunctionJUnitTest.java      |  305 ++
 .../ListHDFSStoresFunctionJUnitTest.java        |  319 ++
 .../domain/CacheElementJUnitTest.java           |    1 +
 .../internal/JUnit4DistributedTestCase.java     |    3 +
 .../test/junit/categories/HoplogTest.java       |    7 +
 .../sanctionedDataSerializables.txt             |   92 +-
 .../codeAnalysis/sanctionedSerializables.txt    |   27 +-
 geode-lucene/build.gradle                       |    4 +
 .../tools/pulse/internal/data/Cluster.java      |    9 +
 .../pulse/internal/data/PulseConstants.java     |    1 +
 .../internal/service/ClusterRegionService.java  |   11 +
 .../internal/service/ClusterRegionsService.java |   11 +
 .../service/ClusterSelectedRegionService.java   |    6 +
 .../scripts/pulsescript/PulseCallbacks.js       |    2 +
 .../webapp/scripts/pulsescript/clusterDetail.js |    7 +-
 .../controllers/PulseControllerJUnitTest.java   |    3 +
 .../gemfire/tools/pulse/tests/Region.java       |    9 +-
 geode-pulse/src/test/resources/test.properties  |    6 +-
 geode-rebalancer/build.gradle                   |    7 +
 .../cache/wan/GatewaySenderFactoryImpl.java     |    4 +
 .../internal/cache/UpdateVersionDUnitTest.java  |    6 +-
 251 files changed, 39287 insertions(+), 459 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/build.gradle
----------------------------------------------------------------------
diff --git a/geode-core/build.gradle b/geode-core/build.gradle
index fedd63e..2206018 100755
--- a/geode-core/build.gradle
+++ b/geode-core/build.gradle
@@ -62,6 +62,27 @@ dependencies {
     ext.optional = true;
   }
   compile ('net.java.dev.jna:jna:' + project.'jna.version')
+  provided ('org.apache.hadoop:hadoop-common:' + project.'hadoop.version') {
+    transitive=false
+  }
+  provided ('org.apache.hadoop:hadoop-annotations:' + project.'hadoop.version') {
+    transitive=false
+  }
+  provided ('org.apache.hadoop:hadoop-hdfs:' + project.'hadoop.version') {
+    transitive=false
+  }
+  provided ('org.apache.hadoop:hadoop-mapreduce-client-core:' + project.'hadoop.version') {
+    transitive=false
+  }
+  provided ('org.apache.hbase:hbase:' + project.'hbase.version') {
+    transitive=false
+  }
+
+  compile ('com.google.guava:guava:' + project.'guava.version') {
+    ext.optional = true
+  }
+  //jsr305 is included only to prevent javadoc warnings about missing annotations in the guava jar
+  provided 'com.google.code.findbugs:jsr305:' + project.'jsr305.version'
 
   compile 'org.apache.logging.log4j:log4j-api:' + project.'log4j.version'
   compile 'org.apache.logging.log4j:log4j-core:' + project.'log4j.version'
@@ -110,6 +131,7 @@ dependencies {
   // External
   testCompile 'org.apache.bcel:bcel:' + project.'bcel.version'
   testRuntime 'org.apache.derby:derby:' + project.'derby.version'
+  testRuntime 'org.apache.hadoop:hadoop-auth:' + project.'hadoop.version'
   testCompile 'org.mockito:mockito-core:' + project.'mockito-core.version'
   testRuntime 'commons-collections:commons-collections:' + project.'commons-collections.version'
   testRuntime 'commons-configuration:commons-configuration:' + project.'commons-configuration.version'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
index 34eafb9..08e919d 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
@@ -29,8 +29,10 @@ import com.gemstone.gemfire.GemFireIOException;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 import com.gemstone.gemfire.cache.client.PoolManager;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 import com.gemstone.gemfire.compression.Compressor;
 import com.gemstone.gemfire.internal.cache.AbstractRegion;
+import com.gemstone.gemfire.internal.cache.CustomEvictionAttributesImpl;
 import com.gemstone.gemfire.internal.cache.DiskStoreFactoryImpl;
 import com.gemstone.gemfire.internal.cache.DiskWriteAttributesImpl;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
@@ -447,6 +449,8 @@ public class AttributesFactory<K,V> {
         .getPartitionAttributes();
     this.regionAttributes.evictionAttributes = (EvictionAttributesImpl)regionAttributes
         .getEvictionAttributes();
+    this.regionAttributes.customEvictionAttributes = regionAttributes
+        .getCustomEvictionAttributes();
 
     this.regionAttributes.membershipAttributes = regionAttributes.getMembershipAttributes();
     this.regionAttributes.subscriptionAttributes = regionAttributes.getSubscriptionAttributes();
@@ -460,6 +464,7 @@ public class AttributesFactory<K,V> {
     this.regionAttributes.multicastEnabled = regionAttributes.getMulticastEnabled();
     this.regionAttributes.gatewaySenderIds = new CopyOnWriteArraySet<String>(regionAttributes.getGatewaySenderIds());
     this.regionAttributes.asyncEventQueueIds = new CopyOnWriteArraySet<String>(regionAttributes.getAsyncEventQueueIds());
+    this.regionAttributes.hdfsStoreName = regionAttributes.getHDFSStoreName();
     this.regionAttributes.isLockGrantor = regionAttributes.isLockGrantor(); // fix for bug 47067
     if (regionAttributes instanceof UserSpecifiedRegionAttributes) {
       this.regionAttributes.setIndexes(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).getIndexes());
@@ -486,6 +491,10 @@ public class AttributesFactory<K,V> {
     }
     
     this.regionAttributes.compressor = regionAttributes.getCompressor();
+    this.regionAttributes.hdfsWriteOnly = regionAttributes.getHDFSWriteOnly();
+    if (regionAttributes instanceof UserSpecifiedRegionAttributes) {
+      this.regionAttributes.setHasHDFSWriteOnly(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).hasHDFSWriteOnly());
+    }
     this.regionAttributes.offHeap = regionAttributes.getOffHeap();
   }
 
@@ -720,6 +729,32 @@ public class AttributesFactory<K,V> {
      this.regionAttributes.setHasEvictionAttributes(true);
    }
 
+  /**
+   * Set custom {@link EvictionCriteria} for the region with start time and
+   * frequency of evictor task to be run in milliseconds, or evict incoming rows
+   * in case both start and frequency are specified as zero.
+   * 
+   * @param criteria
+   *          an {@link EvictionCriteria} to be used for eviction for HDFS
+   *          persistent regions
+   * @param start
+   *          the start time at which periodic evictor task should be first
+   *          fired to apply the provided {@link EvictionCriteria}; if this is
+   *          zero then current time is used for the first invocation of evictor
+   * @param interval
+   *          the periodic frequency at which to run the evictor task after the
+   *          initial start; if this is if both start and frequency are zero
+   *          then {@link EvictionCriteria} is applied on incoming insert/update
+   *          to determine whether it is to be retained
+   */
+  public void setCustomEvictionAttributes(EvictionCriteria<K, V> criteria,
+      long start, long interval) {
+    this.regionAttributes.customEvictionAttributes =
+        new CustomEvictionAttributesImpl(criteria, start, interval,
+            start == 0 && interval == 0);
+    this.regionAttributes.setHasCustomEviction(true);
+  }
+
    /** Sets the mirror type for the next <code>RegionAttributes</code> created.
    * @param mirrorType The type of mirroring to use for the region
    * @throws IllegalArgumentException if mirrorType is null
@@ -1261,6 +1296,31 @@ public class AttributesFactory<K,V> {
   }
   
   /**
+   * Sets the HDFSStore name attribute.
+   * This causes the region to use the {@link HDFSStore}.
+   * @param name the name of the HDFSstore
+   */
+  public void setHDFSStoreName(String name) {
+    //TODO:HDFS throw an exception if the region is already configured for a disk store and 
+    // vice versa
+    this.regionAttributes.hdfsStoreName = name;
+    this.regionAttributes.setHasHDFSStoreName(true);
+  }
+  
+  /**
+   * Sets the HDFS write only attribute. if the region
+   * is configured to be write only to HDFS, events that have 
+   * been evicted from memory cannot be read back from HDFS.
+   * Events are written to HDFS in the order in which they occurred.
+   */
+  public void setHDFSWriteOnly(boolean writeOnly) {
+    //TODO:HDFS throw an exception if the region is already configured for a disk store and 
+    // vice versa
+    this.regionAttributes.hdfsWriteOnly = writeOnly;
+    this.regionAttributes.setHasHDFSWriteOnly(true);
+  }
+  
+  /**
    * Sets this region's compressor for compressing entry values.
    * @since 8.0
    * @param compressor a compressor.
@@ -1436,6 +1496,12 @@ public class AttributesFactory<K,V> {
       }
     }
     
+    if (attrs.getHDFSStoreName() != null) {
+      if (!attrs.getDataPolicy().withHDFS() && (attrs.getPartitionAttributes() == null || attrs.getPartitionAttributes().getLocalMaxMemory() != 0)) {
+        throw new IllegalStateException(LocalizedStrings.HDFSSTORE_IS_USED_IN_NONHDFS_REGION.toLocalizedString());        
+      }
+    }
+
     if (!attrs.getStatisticsEnabled() &&
           (attrs.getRegionTimeToLive().getTimeout() != 0 ||
            attrs.getRegionIdleTimeout().getTimeout() != 0 ||
@@ -1598,8 +1664,11 @@ public class AttributesFactory<K,V> {
     SubscriptionAttributes subscriptionAttributes = new SubscriptionAttributes();
     boolean multicastEnabled = false;
     EvictionAttributesImpl evictionAttributes = new EvictionAttributesImpl();  // TODO need to determine the constructor
+    transient CustomEvictionAttributes customEvictionAttributes;
     String poolName = null;
     String diskStoreName = null;
+    String hdfsStoreName = null;
+    private boolean hdfsWriteOnly = false;
     boolean diskSynchronous = DEFAULT_DISK_SYNCHRONOUS;
     protected boolean isBucketRegion = false;
     private boolean isCloningEnabled = false;
@@ -1658,6 +1727,8 @@ public class AttributesFactory<K,V> {
       } else {
         buf.append("; diskStoreName=").append(diskStoreName);
       }
+      buf.append("; hdfsStoreName=").append(hdfsStoreName);
+      buf.append("; hdfsWriteOnly=").append(hdfsWriteOnly);
       buf.append("; GatewaySenderIds=").append(gatewaySenderIds);
       buf.append("; AsyncEventQueueIds=").append(asyncEventQueueIds);
       buf.append("; compressor=").append(compressor == null ? null : compressor.getClass().getName());
@@ -1932,6 +2003,14 @@ public class AttributesFactory<K,V> {
     }
 
     /**
+     * {@inheritDoc}
+     */
+    @Override
+    public CustomEvictionAttributes getCustomEvictionAttributes() {
+      return this.customEvictionAttributes;
+    }
+
+    /**
      * @deprecated this API is scheduled to be removed
      */
     public MembershipAttributes getMembershipAttributes() {
@@ -1989,6 +2068,16 @@ public class AttributesFactory<K,V> {
     }
 
     @Override
+    public String getHDFSStoreName() {
+      return hdfsStoreName;
+    }
+    
+    @Override
+    public boolean getHDFSWriteOnly() {
+      return hdfsWriteOnly;
+    }
+
+    @Override
     public Compressor getCompressor() {
       return this.compressor;
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
index 0a69437..eb46433 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/AttributesMutator.java
@@ -166,6 +166,20 @@ public interface AttributesMutator<K,V> {
   public EvictionAttributesMutator getEvictionAttributesMutator();
 
   /**
+   * Changes the evictor frequency for custom eviction attributes.
+   * 
+   * @param newStart
+   *          the new start time in millis since epoch for the evictor task
+   * 
+   * @param newInterval
+   *          the new interval between evictor task invocations in millis
+   * 
+   * @return the updated {@link CustomEvictionAttributes}
+   */
+  public CustomEvictionAttributes setCustomEvictionAttributes(long newStart,
+      long newInterval);
+
+  /**
    * Sets cloning on region
    * @param cloningEnable
    * @since 6.1

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/Cache.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
index 03874b3..742bcc5 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
@@ -25,6 +25,7 @@ import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
 import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.Pool;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.snapshot.CacheSnapshotService;
 import com.gemstone.gemfire.cache.util.GatewayConflictResolver;
@@ -35,6 +36,7 @@ import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 
 
 /** 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java
new file mode 100644
index 0000000..c2bc41b
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/CustomEvictionAttributes.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache;
+
+/**
+ * Custom eviction attributes including {@link EvictionCriteria} and evictor
+ * start time and frequency, if any.
+ * 
+ * @since gfxd 1.0
+ */
+public abstract class CustomEvictionAttributes {
+
+  private final EvictionCriteria<?, ?> criteria;
+
+  private final long evictorStartTime;
+  private final long evictorInterval;
+
+  private final boolean evictIncoming;
+
+  protected CustomEvictionAttributes(EvictionCriteria<?, ?> criteria,
+      long startTime, long interval, boolean evictIncoming) {
+    this.criteria = criteria;
+    this.evictorStartTime = startTime;
+    this.evictorInterval = interval;
+    this.evictIncoming = evictIncoming;
+  }
+
+  /**
+   * Get the {@link EvictionCriteria} for this custom eviction. The criteria
+   * will be applied to the region entries either periodically as per
+   * {@link #getEvictorStartTime()} and {@link #getEvictorInterval()}, or on
+   * incoming puts if {@link #isEvictIncoming()} is true.
+   */
+  @SuppressWarnings({ "rawtypes", "unchecked" })
+  public <K, V> EvictionCriteria<K, V> getCriteria() {
+    return (EvictionCriteria)this.criteria;
+  }
+
+  /**
+   * The absolute start time in milliseconds (as returned by
+   * {@link System#currentTimeMillis()}) when the evictor will be first fired.
+   * Thereafter the evictor will be fired periodically every
+   * {@link #getEvictorInterval()} milliseconds.
+   */
+  public final long getEvictorStartTime() {
+    return this.evictorStartTime;
+  }
+
+  /**
+   * The intervals at which the periodic evictor task is fired and
+   * {@link EvictionCriteria} evaluated to evict entries.
+   */
+  public final long getEvictorInterval() {
+    return this.evictorInterval;
+  }
+
+  /**
+   * If this returns true, then the criteria should always be applied to
+   * incoming entries and never as a periodic task.
+   */
+  public final boolean isEvictIncoming() {
+    return this.evictIncoming;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
index 80918d9..db75e10 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
@@ -31,6 +31,7 @@ import java.io.*;
  * <li><code>PERSISTENT_PARTITION</code> in addition to <code>PARTITION</code> also causes data to be stored to disk. The region initialization uses the data stored on disk.
  * <li><code>REPLICATE</code> causes data that this region is interested in to be stored in local memory. A distributed region will be initialized with the data from other caches. On distributed region operations that would cause the contents to differ with other caches are not allowed. This policy is allowed on local scope region but it behaves the same as <code>NORMAL</code>.
  * <li><code>PERSISTENT_REPLICATE</code> in addition to <code>REPLICATE</code> also causes data to be stored to disk. The region initialization uses the data stored on disk. Note that the persistence applies to both local scope and distributed scope.
+ * <li><code>HDFS_PARTITION</code> in addition to <code>PARTITION</code> also causes data to be stored to HDFS. The region initialization may use the data stored on HDFS. 
  * </ol>
  *
  *
@@ -95,6 +96,18 @@ public class DataPolicy implements java.io.Serializable {
    */
   public static final DataPolicy PERSISTENT_PARTITION = new DataPolicy(6, "PERSISTENT_PARTITION");
   
+  /**
+   * In addition to <code>PARTITION</code> also causes data to be stored to
+   * HDFS. The region initialization may use the data stored on HDFS.
+   */
+  public static final DataPolicy HDFS_PARTITION = new DataPolicy(7, "HDFS_PARTITION");
+  
+  /**
+   * In addition to <code>HDFS_PARTITION</code> also causes data to be stored on local
+   * disk. The data can be evicted from the local disk and still be read
+   * from HDFS.
+   */
+  public static final DataPolicy HDFS_PERSISTENT_PARTITION = new DataPolicy(10, "HDFS_PERSISTENT_PARTITION");
    /**
    * The data policy used by default; it is {@link #NORMAL}.
    */
@@ -164,7 +177,7 @@ public class DataPolicy implements java.io.Serializable {
    * @since 6.5
    */
   public boolean withPersistence() {
-    return this == PERSISTENT_PARTITION || this == PERSISTENT_REPLICATE;
+    return this == PERSISTENT_PARTITION || this == PERSISTENT_REPLICATE || this == HDFS_PERSISTENT_PARTITION;
   }
 
   /** Return whether this policy does partitioning.
@@ -174,7 +187,7 @@ public class DataPolicy implements java.io.Serializable {
    * @since 6.5
    */
   public boolean withPartitioning() {
-    return this == PARTITION || this == PERSISTENT_PARTITION;
+    return this == PARTITION || this == PERSISTENT_PARTITION || this == HDFS_PARTITION || this==HDFS_PERSISTENT_PARTITION;
   }
 
   /** Return whether this policy does preloaded.
@@ -244,6 +257,15 @@ public class DataPolicy implements java.io.Serializable {
     return this == PARTITION;
   }
   
+  /** Return whether this policy does persistence on HDFS.
+   * @return true if this policy does persistence on HDFS.
+   */
+  public boolean withHDFS() {
+	  return this == HDFS_PARTITION || this == HDFS_PERSISTENT_PARTITION;
+  }
+  
+  
+  
   /** Returns a string representation for this data policy.
      * @return the name of this data policy.
      */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java
new file mode 100644
index 0000000..8df201c
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/EvictionCriteria.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache;
+
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * Interface implemented by an EVICTION BY CRITERIA of
+ * {@link CustomEvictionAttributes}. This will be invoked by periodic evictor
+ * task that will get the keys to be evicted using this and then destroy from
+ * the region to which this is attached.
+ * 
+ * @since gfxd 1.0
+ */
+public interface EvictionCriteria<K, V> {
+
+  /**
+   * Get the (key, routing object) of the entries to be evicted from region
+   * satisfying EVICTION BY CRITERIA at this point of time.
+   * <p>
+   * The returned Map.Entry object by the Iterator may be reused internally so
+   * caller must extract the key, routing object from the entry on each
+   * iteration.
+   */
+  Iterator<Map.Entry<K, Object>> getKeysToBeEvicted(long currentMillis,
+      Region<K, V> region);
+
+  /**
+   * Last moment check if an entry should be evicted or not applying the
+   * EVICTION BY CRITERIA again under the region entry lock in case the entry
+   * has changed after the check in {@link #getKeysToBeEvicted}.
+   */
+  boolean doEvict(EntryEvent<K, V> event);
+
+  /**
+   * Return true if this eviction criteria is equivalent to the other one. This
+   * is used to ensure that custom eviction is configured identically on all the
+   * nodes of a cluster hosting the region to which this eviction criteria has
+   * been attached.
+   */
+  boolean isEquivalent(EvictionCriteria<K, V> other);
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
index 96a8ce6..a301318 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
@@ -27,6 +27,8 @@ import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientCacheFactory;
 import com.gemstone.gemfire.cache.control.ResourceManager;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
 import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.pdx.PdxSerializer;
@@ -258,4 +260,20 @@ public interface GemFireCache extends RegionService {
    * @since 6.6
    */
   public Properties getInitializerProps();
+	
+  /**
+   * Returns the HDFSStore by name or <code>null</code> if no HDFSStore is
+   * found.
+   * 
+   * @param name the name of the HDFSStore to find.
+   */
+  public HDFSStore findHDFSStore(String name);
+
+   /**
+	* Creates a {@link HDFSStoreFactory} for creating a {@link HDFSStore}
+	* 
+	* @return the HDFS store factory
+	*/
+  public HDFSStoreFactory createHDFSStoreFactory();
+  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
index a104751..7a63855 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/Operation.java
@@ -672,6 +672,19 @@ public final class Operation implements java.io.Serializable {
                     OP_DETAILS_REMOVEALL
                     );
     
+  /**
+   * An entry local destroy caused by an eviction.
+   * @see Region#localDestroy(Object)
+   */
+  public static final Operation CUSTOM_EVICT_DESTROY
+    = new Operation("EVICT_DESTROY",
+                    false, // isLocal
+                    false, // isRegion
+                    OP_TYPE_DESTROY,
+                    OP_DETAILS_EVICT
+                    );
+
+
   /** The name of this mirror type. */
   private final transient String name;
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
index 94cc11a..dd5c0e0 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionAttributes.java
@@ -20,6 +20,7 @@ import java.io.File;
 import java.util.Set;
 
 import com.gemstone.gemfire.cache.client.Pool;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 import com.gemstone.gemfire.compression.Compressor;
 
 /** Defines attributes for configuring a region.
@@ -147,6 +148,11 @@ public interface RegionAttributes<K,V> {
    */
   public EvictionAttributes getEvictionAttributes();
 
+  /**
+   * Return the {@link CustomEvictionAttributes}, if any, set for the region.
+   */
+  public CustomEvictionAttributes getCustomEvictionAttributes();
+
   /** Returns the cache listener for the region.
    * @throws IllegalStateException if more than one cache listener exists on this attributes
    * @return the region's <code>CacheListener</code>
@@ -447,6 +453,23 @@ public interface RegionAttributes<K,V> {
   public boolean getConcurrencyChecksEnabled();
   
   /**
+   * Returns the name of the {@link HDFSStore} that this region belongs
+   * to, if any.
+   * @return the name of the {@link HDFSStore} of this region; 
+   * <code>null</code> is returned if this region has no
+   * {@link HDFSStore}.
+   * @since 9.0
+   */
+  public String getHDFSStoreName();
+  
+  /**
+   * Returns true if this region is configured to
+   * be write-only to HDFS. 
+   * @since 9.0
+   */
+  public boolean getHDFSWriteOnly();
+  
+  /**
    * Returns the compressor used by this region's entry values.
    * @since 8.0
    * @return null if the region does not have compression enabled.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
index b919cc0..041a753 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
@@ -401,6 +401,30 @@ public class RegionFactory<K,V>
   }
 
   /**
+   * Set custom {@link EvictionCriteria} for the region with start time and
+   * interval of evictor task to be run in milliseconds, or evict incoming rows
+   * in case both start and frequency are specified as zero.
+   * 
+   * @param criteria
+   *          an {@link EvictionCriteria} to be used for eviction for HDFS
+   *          persistent regions
+   * @param start
+   *          the start time at which periodic evictor task should be first
+   *          fired to apply the provided {@link EvictionCriteria}; if this is
+   *          zero then current time is used for the first invocation of evictor
+   * @param interval
+   *          the periodic frequency at which to run the evictor task after the
+   *          initial start; if this is if both start and frequency are zero
+   *          then {@link EvictionCriteria} is applied on incoming insert/update
+   *          to determine whether it is to be retained
+   */
+  public RegionFactory<K, V> setCustomEvictionAttributes(
+      EvictionCriteria<K, V> criteria, long start, long interval) {
+    this.attrsFactory.setCustomEvictionAttributes(criteria, start, interval);
+    return this;
+  }
+
+  /**
    * Sets the scope for the next <code>RegionAttributes</code> created.
    *
    * @param scopeType
@@ -885,6 +909,31 @@ public class RegionFactory<K,V>
     this.attrsFactory.addAsyncEventQueueId(asyncEventQueueId);
     return this;
   }
+  /**
+   * Sets the HDFSStore name attribute.
+   * This causes the region to belong to the HDFSStore.
+   * @param name the name of the hdfsstore
+   * @return a reference to this RegionFactory object
+   * 
+   * @see AttributesFactory#setHDFSStoreName
+   * @since 9.0
+   */
+  public RegionFactory<K,V> setHDFSStoreName(String name) {
+    this.attrsFactory.setHDFSStoreName(name);
+    return this;
+  }
+  
+  /**
+   * Sets the HDFS write only attribute. if the region
+   * is configured to be write only to HDFS, events that have 
+   * been evicted from memory cannot be read back from HDFS.
+   * Events are written to HDFS in the order in which they occurred.
+   * @since 9.0
+   */
+  public RegionFactory<K,V> setHDFSWriteOnly(boolean writeOnly) {
+    this.attrsFactory.setHDFSWriteOnly(writeOnly);
+    return this;
+  }
 
   /**
    * Set the compressor to be used by this region for compressing

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
index 6aae635..8cd6e95 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
@@ -234,4 +234,54 @@ public enum RegionShortcut {
    * The actual RegionAttributes for a REPLICATE_PROXY region set the {@link DataPolicy} to {@link DataPolicy#EMPTY} and {@link Scope} to {@link Scope#DISTRIBUTED_ACK}.
    */
   REPLICATE_PROXY,  
+  
+  /**
+   * A PARTITION_HDFS has local state that is partitioned across each peer member 
+   * that created the region. 
+   * In addition its state is written to HDFS.
+   * The random access to the data in HDFS is also enabled. 
+   * The actual RegionAttributes for a PARTITION_HDFS region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}.
+   * The HDFS event queue's property random-access is set to true. 
+   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
+   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
+   */
+  PARTITION_HDFS,  
+  
+  /**
+   * A PARTITION_REDUNDANT_HDFS has local state that is partitioned across each peer member 
+   * that created the region. 
+   * In addition its state is written to HDFS and recovered from HDFS when the region is 
+   * created. The random access to the data in HDFS is also enabled. 
+   * In addition an extra copy of the data is kept in memory.
+   * The actual RegionAttributes for a PARTITION_REDUNDANT_HDFS region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION} 
+   * and the redundant-copies to 1. The HDFS event queue's property random-access is set to true.
+   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
+   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
+   */
+  PARTITION_REDUNDANT_HDFS,  
+  
+  /**
+   * A PARTITION_WRITEONLY_HDFS_STORE has local state that is partitioned across each peer member 
+   * that created the region. 
+   * In addition its state is written to HDFS and recovered from HDFS when the region is 
+   * created. The random access to the data in HDFS is disabled. 
+   * The actual RegionAttributes for a PARTITION_WRITEONLY_HDFS_STORE region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}. 
+   * The HDFS event queue's property write only is set as true. 
+   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
+   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
+   */
+  PARTITION_WRITEONLY_HDFS_STORE,  
+  
+  /**
+   * A PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE has local state that is partitioned across each peer member 
+   * that created the region. 
+   * In addition its state is written to HDFS and recovered from HDFS when the region is 
+   * created. The random access to the data in HDFS is disabled. 
+   * In addition an extra copy of the data is kept in memory.
+   * The actual RegionAttributes for a PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION} 
+   * and the redundant-copies to 1. The HDFS event queue's property write only is set as true.
+   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
+   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
+   */
+  PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
index 312e880..d15222b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
@@ -272,6 +272,7 @@ public class AsyncEventQueueFactoryImpl implements AsyncEventQueueFactory {
     this.attrs.maximumQueueMemory = asyncQueueCreation.getMaximumQueueMemory();
     this.attrs.isParallel = asyncQueueCreation.isParallel();
     this.attrs.isBucketSorted = ((AsyncEventQueueCreation)asyncQueueCreation).isBucketSorted();
+	this.attrs.isHDFSQueue = ((AsyncEventQueueCreation)asyncQueueCreation).isHDFSQueue();
     this.attrs.dispatcherThreads = asyncQueueCreation.getDispatcherThreads();
     this.attrs.policy = asyncQueueCreation.getOrderPolicy();
     this.attrs.eventFilters = asyncQueueCreation.getGatewayEventFilters();
@@ -288,6 +289,10 @@ public class AsyncEventQueueFactoryImpl implements AsyncEventQueueFactory {
     this.attrs.isBucketSorted = isbucketSorted;
     return this;
   }
+  public AsyncEventQueueFactory setIsHDFSQueue(boolean isHDFSQueue) {
+    this.attrs.isHDFSQueue = isHDFSQueue;
+    return this;
+  }
   public AsyncEventQueueFactory setIsMetaQueue(boolean isMetaQueue) {
     this.attrs.isMetaQueue = isMetaQueue;
     return this;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java
new file mode 100644
index 0000000..d9b6179
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSIOException.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package com.gemstone.gemfire.cache.hdfs;
+
+import com.gemstone.gemfire.GemFireIOException;
+
+/**
+ * Thrown when an error has occurred while attempted to use
+ * the HDFS file system. This error may indicate a failure of the HDFS
+ * system.
+ * 
+ * 
+ * @since 7.5
+ * 
+ */
+public class HDFSIOException extends GemFireIOException {
+
+  /**
+   * @param message
+   * @param cause
+   */
+  public HDFSIOException(String message, Throwable cause) {
+    super(message, cause);
+    // TODO Auto-generated constructor stub
+  }
+
+  /**
+   * @param message
+   */
+  public HDFSIOException(String message) {
+    super(message);
+    // TODO Auto-generated constructor stub
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
new file mode 100644
index 0000000..45ba370
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStore.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs;
+
+import com.gemstone.gemfire.cache.wan.GatewaySender;
+
+/**
+ * HDFS stores provide a means of persisting data on HDFS. There can be multiple
+ * instance of HDFS stores in a cluster. The regions connected using a HDFS
+ * store will share the same HDFS persistence attributes. A user will normally
+ * perform the following steps to enable HDFS persistence for a region:
+ * <ol>
+ * <li>[Optional] Creates a DiskStore for HDFS buffer reliability (HDFS buffers
+ * will be persisted locally till data lands on HDFS)
+ * <li>Creates a HDFS Store (connects to DiskStore created earlier)
+ * <li>Creates a Region connected to HDFS Store
+ * <li>Uses region API to create and query data
+ * </ol>
+ * <p>
+ * Instances of this interface are created using {@link HDFSStoreFactory#create}
+ * 
+ */
+
+public interface HDFSStore {
+  public static final String DEFAULT_HOME_DIR = "gemfire";
+  public static final float DEFAULT_BLOCK_CACHE_SIZE = 10f;
+  public static final int DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT = 256;
+  public static final int DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL = 3600;
+
+  public static final int DEFAULT_BATCH_SIZE_MB = 32;
+  public static final int DEFAULT_BATCH_INTERVAL_MILLIS = 60000;
+  public static final boolean DEFAULT_WRITEONLY_HDFSSTORE = false;
+  public static final boolean DEFAULT_BUFFER_PERSISTANCE = GatewaySender.DEFAULT_PERSISTENCE_ENABLED;
+  public static final boolean DEFAULT_DISK_SYNCHRONOUS = GatewaySender.DEFAULT_DISK_SYNCHRONOUS;
+  public static final int DEFAULT_MAX_BUFFER_MEMORY = GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY;
+  public static final int DEFAULT_DISPATCHER_THREADS = GatewaySender.DEFAULT_HDFS_DISPATCHER_THREADS;
+
+  public static final boolean DEFAULT_MINOR_COMPACTION = true;
+  public static final int DEFAULT_MINOR_COMPACTION_THREADS = 10;
+  public static final boolean DEFAULT_MAJOR_COMPACTION = true;
+  public static final int DEFAULT_MAJOR_COMPACTION_THREADS = 2;
+  public static final int DEFAULT_INPUT_FILE_SIZE_MAX_MB = 512;
+  public static final int DEFAULT_INPUT_FILE_COUNT_MAX = 10;
+  public static final int DEFAULT_INPUT_FILE_COUNT_MIN = 4;
+
+  public static final int DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS = 720;
+  public static final int DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS = 30;
+
+  /**
+   * @return A unique identifier for the HDFSStore
+   */
+  public String getName();
+
+  /**
+   * HDFSStore persists data on a HDFS cluster identified by cluster's NameNode
+   * URL or NameNode Service URL. NameNode URL can also be provided via
+   * hdfs-site.xml (see HDFSClientConfigFile). If the NameNode url is missing
+   * HDFSStore creation will fail. HDFS client can also load hdfs configuration
+   * files in the classpath. The following precedence order is applied
+   * <ol>
+   * <li>URL explicitly configured in the HdfsStore
+   * <li>URL provided in client configuration file:
+   * {@link #getHDFSClientConfigFile()}
+   * <li>URL provided in default configuration files loaded by hdfs-client
+   * </ol>
+   * 
+   * HDFSStore will use the selected URL only. It will fail if the selected URL
+   * is not reachable.
+   * 
+   * @return Namenode url explicitly configured by user
+   */
+  public String getNameNodeURL();
+
+  /**
+   * HomeDir is the HDFS directory path in which HDFSStore stores files. The
+   * value must not contain the NameNode URL. The owner of this node's JVM
+   * process must have read and write access to this directory. The path could
+   * be absolute or relative. If a relative path for HomeDir is provided, then
+   * the HomeDir is created relative to /user/JVM_owner_name or, if specified,
+   * relative to directory specified by the hdfs-root-dir property. As a best
+   * practice, HDFS store directories should be created relative to a single
+   * HDFS root directory. As an alternative, an absolute path beginning with the
+   * "/" character to override the default root location can be provided.
+   * 
+   * @return path
+   */
+  public String getHomeDir();
+
+  /**
+   * The full path to the HDFS client configuration file, for e.g. hdfs-site.xml
+   * or core-site.xml. This file must be accessible to any node where an
+   * instance of this HDFSStore will be created. If each node has a local copy
+   * of this configuration file, it is important for all the copies to be
+   * "identical". Alternatively, by default HDFS client can also load some HDFS
+   * configuration files if added in the classpath.
+   * 
+   * @return path
+   */
+  public String getHDFSClientConfigFile();
+
+  /**
+   * The maximum amount of memory in megabytes to be used by HDFSStore.
+   * HDFSStore buffers data in memory to optimize HDFS IO operations. Once the
+   * configured memory is utilized, data may overflow to disk.
+   * 
+   * @return max memory in MB
+   */
+  public int getMaxMemory();
+
+  /**
+   * @return the percentage of the heap to use for the block cache in the range
+   *         0 ... 100
+   */
+  public float getBlockCacheSize();
+
+  /**
+   * HDFSStore buffer data is persisted on HDFS in batches. The BatchSize
+   * defines the maximum size (in megabytes) of each batch that is written to
+   * HDFS. This parameter, along with BatchInterval determines the rate at which
+   * data is persisted on HDFS. A higher value causes fewer and bigger batches
+   * to be persisted to HDFS and hence big files are created on HDFS. But,
+   * bigger batches consume more memory.
+   * 
+   * @return batch size in MB
+   */
+  public int getBatchSize();
+
+  /**
+   * HDFSStore buffer data is persisted on HDFS in batches, and the
+   * BatchInterval defines the number of milliseconds that can elapse between
+   * writing batches to HDFS. This parameter, along with BatchSize determines
+   * the rate at which data is persisted on HDFS.
+   * 
+   * @return batch interval in milliseconds
+   */
+  public int getBatchInterval();
+
+  /**
+   * The maximum number of threads (per region) used to write batches to HDFS.
+   * If you have a large number of clients that add or update data in a region,
+   * then you may need to increase the number of dispatcher threads to avoid
+   * bottlenecks when writing data to HDFS.
+   * 
+   * @return The maximum number of threads
+   */
+  public int getDispatcherThreads();
+
+  /**
+   * Configure if HDFSStore in-memory buffer data, that has not been persisted
+   * on HDFS yet, should be persisted to a local disk to prevent buffer data
+   * loss. Persisting buffer data may impact write performance. If performance
+   * is critical and buffer data loss is acceptable, disable persistence.
+   * 
+   * @return true if buffer is persisted locally
+   */
+  public boolean getBufferPersistent();
+
+  /**
+   * The named DiskStore to use for any local disk persistence needs of
+   * HDFSStore, for e.g. store's buffer persistence and buffer overflow. If you
+   * specify a value, the named DiskStore must exist. If you specify a null
+   * value or you omit this option, default DiskStore is used.
+   * 
+   * @return disk store name
+   */
+  public String getDiskStoreName();
+
+  /**
+   * HDFS buffers can be persisted on local disk. Each region update record is
+   * written to the disk synchronously if synchronous disk write is enabled.
+   * Enable this option if the data being persisted is critical and no record
+   * should be lost in case of a crash. This high reliability mode may increase
+   * write latency. If synchronous mode is disabled, data is persisted in
+   * batches which usually results in better performance.
+   * 
+   * @return true if enabled
+   */
+  public boolean getSynchronousDiskWrite();
+
+  /**
+   * For HDFS write-only regions, this defines the maximum size (in megabytes)
+   * that an HDFS log file can reach before HDFSStore closes the file and begins
+   * writing to a new file. This option is ignored for HDFS read/write regions.
+   * Keep in mind that the files are not available for MapReduce processing
+   * until the file is closed; you can also set WriteOnlyFileRolloverInterval to
+   * specify the maximum amount of time an HDFS log file remains open.
+   * 
+   * @return max file size in MB.
+   */
+  public int getWriteOnlyFileRolloverSize();
+
+  /**
+   * For HDFS write-only regions, this defines the number of seconds that can
+   * elapse before HDFSStore closes an HDFS file and begins writing to a new
+   * file. This configuration is ignored for HDFS read/write regions.
+   * 
+   * @return interval in seconds
+   */
+  public int getWriteOnlyFileRolloverInterval();
+
+  /**
+   * Minor compaction reorganizes data in files to optimize read performance and
+   * reduce number of files created on HDFS. Minor compaction process can be
+   * I/O-intensive, tune the performance of minor compaction using
+   * MinorCompactionThreads. Minor compaction is not applicable to write-only
+   * regions.
+   * 
+   * @return true if auto minor compaction is enabled
+   */
+  public boolean getMinorCompaction();
+
+  /**
+   * The maximum number of threads that HDFSStore uses to perform minor
+   * compaction. You can increase the number of threads used for compaction as
+   * necessary in order to fully utilize the performance of your HDFS cluster.
+   * Minor compaction is not applicable to write-only regions.
+   * 
+   * @return maximum number of threads executing minor compaction
+   */
+  public int getMinorCompactionThreads();
+
+  /**
+   * Major compaction removes old values of a key and deleted records from the
+   * HDFS files, which can save space in HDFS and improve performance when
+   * reading from HDFS. As major compaction process can be long-running and
+   * I/O-intensive, tune the performance of major compaction using
+   * MajorCompactionInterval and MajorCompactionThreads. Major compaction is not
+   * applicable to write-only regions.
+   * 
+   * @return true if auto major compaction is enabled
+   */
+  public boolean getMajorCompaction();
+
+  /**
+   * The number of minutes after which HDFSStore performs the next major
+   * compaction cycle. Major compaction is not applicable to write-only regions.
+   * 
+   * @return interval in minutes
+   */
+  public int getMajorCompactionInterval();
+
+  /**
+   * The maximum number of threads that HDFSStore uses to perform major
+   * compaction. You can increase the number of threads used for compaction as
+   * necessary in order to fully utilize the performance of your HDFS cluster.
+   * Major compaction is not applicable to write-only regions.
+   * 
+   * @return maximum number of threads executing major compaction
+   */
+  public int getMajorCompactionThreads();
+
+  /**
+   * HDFSStore may create new files as part of periodic maintenance activity. It
+   * deletes old files asynchronously. PurgeInterval defines the number of
+   * minutes for which old files will remain available to be consumed
+   * externally, e.g. read by MR jobs. After this interval, old files are
+   * deleted. This configuration is not applicable to write-only regions
+   * 
+   * @return old file purge interval in minutes
+   */
+  public int getPurgeInterval();
+
+  /**
+   * Permanently deletes all HDFS files associated with this {@link HDFSStore}.
+   * This operation will fail if any region is still using this store for
+   * persistence.
+   * 
+   * @exception IllegalStateException
+   *              if any region using this hdfsStore still exists
+   */
+  public void destroy();
+
+  /**
+   * @return new instance of mutator object that can be used to alter properties
+   *         of this store
+   */
+  public HDFSStoreMutator createHdfsStoreMutator();
+
+  /**
+   * Identifies attributes configured in {@link HDFSStoreMutator} and applies
+   * the new attribute values to this instance of {@link HDFSStore} dynamically.
+   * Any property which is not set in {@link HDFSStoreMutator} remains
+   * unaltered. In most cases altering the attributes does not cause existing
+   * operations to terminate. The altered attributes are used in the next cycle
+   * of the operation they impact.
+   * 
+   * @return hdfsStore reference representing the old {@link HDFSStore}
+   */
+  public HDFSStore alter(HDFSStoreMutator mutator);
+
+  /**
+   * A file larger than this size, in megabytes, will not be compacted by minor
+   * compactor. Increasing this value will result in compaction of bigger files.
+   * This will lower the number of files on HDFS at the cost of increased IO.
+   * This option is for advanced users and will need tuning in special cases
+   * only. This option is not applicable to write-only regions.
+   * 
+   * @return size threshold (in MB)
+   */
+  public int getInputFileSizeMax();
+
+  /**
+   * A minimum number of files must exist in a bucket directory on HDFS before
+   * minor compaction will start compaction. Keeping a higher value for this
+   * option will reduce the frequency of minor compaction, which in turn may
+   * result in reduced IO overhead. However it may result in increased pressure
+   * on HDFS NameNode. This option is for advanced users and will need tuning in
+   * special cases only. This option is not applicable to write-only regions.
+   * 
+   * @return minimum number of files for minor compaction to get triggered
+   */
+  public int getInputFileCountMin();
+
+  /**
+   * The maximum number of files compacted by Minor compactor in a cycle.
+   * Keeping a higher value for this option will reduce the frequency of minor
+   * compaction, which in turn may result in reduced IO overhead. However it may
+   * result in large number of concurrent IO operations which in-turn may
+   * degrade the performance. This option is for advanced users and will need
+   * tuning in special cases only. This option is not applicable to write-only
+   * regions.
+   * 
+   * @return maximum number of files minor compacted in one cycle
+   */
+  public int getInputFileCountMax();
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
new file mode 100644
index 0000000..0d80a67
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreFactory.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs;
+
+import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.cache.Cache;
+
+/**
+ * Factory for creating instances of {@link HDFSStore}. To get an instance of
+ * this factory call Cache#createHDFSStoreFactory.
+ * <P>
+ * Usage
+ * <ol>
+ * <li> configure factory using <code>set</code> methods
+ * <li> call {@link #create} to produce a HDFSStore instance.
+ * </ol>
+ * 
+ */
+public interface HDFSStoreFactory {
+
+  /**
+   * @see HDFSStore#getName()
+   */
+  public HDFSStoreFactory setName(String name);
+
+  /**
+   * @see HDFSStore#getNameNodeURL()
+   */
+  public HDFSStoreFactory setNameNodeURL(String url);
+
+  /**
+   * @see HDFSStore#getHomeDir()
+   */
+  public HDFSStoreFactory setHomeDir(String dir);
+
+  /**
+   * @see HDFSStore#getHDFSClientConfigFile()
+   */
+  public HDFSStoreFactory setHDFSClientConfigFile(String filePath);
+
+  /**
+   * @see HDFSStore#getHDFSClientConfigFile()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 or more than 100
+   */
+  public HDFSStoreFactory setBlockCacheSize(float value);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_WRITE_ONLY_FILE_SIZE_LIMIT}
+   * @see HDFSStore#getWriteOnlyFileRolloverSize()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setWriteOnlyFileRolloverSize(int maxFileSize);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL}
+   * @see HDFSStore#getWriteOnlyFileRolloverInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setWriteOnlyFileRolloverInterval(int interval);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_MINOR_COMPACTION}
+   * @see HDFSStore#getMinorCompaction()
+   */
+  public HDFSStoreFactory setMinorCompaction(boolean auto);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_MINOR_COMPACTION_THREADS}
+   * @see HDFSStore#getMinorCompactionThreads()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setMinorCompactionThreads(int count);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION}
+   * @see HDFSStore#getMajorCompaction()
+   */
+  public HDFSStoreFactory setMajorCompaction(boolean auto);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS}
+   * @see HDFSStore#getMajorCompactionInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setMajorCompactionInterval(int interval);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_MAJOR_COMPACTION_THREADS}
+   * @see HDFSStore#getMajorCompactionThreads()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setMajorCompactionThreads(int count);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_SIZE_MAX_MB}
+   * @see HDFSStore#getInputFileSizeMax()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setInputFileSizeMax(int size);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_COUNT_MIN}
+   * @see HDFSStore#getInputFileCountMin()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setInputFileCountMin(int count);
+
+  /**
+   * Default value {@link HDFSStore#DEFAULT_INPUT_FILE_COUNT_MAX}
+   * @see HDFSStore#getInputFileCountMax()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setInputFileCountMax(int count);
+
+  /**
+   * @see HDFSStore#getPurgeInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setPurgeInterval(int interval);
+
+  /**
+   * @see HDFSStore#getDiskStoreName()
+   */
+  public HDFSStoreFactory setDiskStoreName(String name);
+
+  /**
+   * @see HDFSStore#getMaxMemory()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setMaxMemory(int memory);
+
+  /**
+   * @see HDFSStore#getBatchInterval()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setBatchInterval(int interval);
+
+  /**
+   * @see HDFSStore#getBatchSize()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setBatchSize(int size);
+
+  /**
+   * @see HDFSStore#getBufferPersistent()
+   */
+  public HDFSStoreFactory setBufferPersistent(boolean isPersistent);
+
+  /**
+   * @see HDFSStore#getSynchronousDiskWrite()
+   */
+  public HDFSStoreFactory setSynchronousDiskWrite(boolean isSynchronous);
+
+  /**
+   * @see HDFSStore#getDispatcherThreads()
+   * @exception IllegalArgumentException
+   *              if the {@code value} is less than 0 
+   */
+  public HDFSStoreFactory setDispatcherThreads(int dispatcherThreads);
+
+  /**
+   * Validates all attribute values and assigns defaults where applicable.
+   * Creates a new instance of {@link HDFSStore} based on the current attribute
+   * values configured in this factory.
+   * 
+   * @param name
+   *          the name of the HDFSStore
+   * @return the newly created HDFSStore.
+   * @throws GemFireConfigException
+   *           if the configuration is invalid
+   * @throws StoreExistsException
+   *           if a {@link HDFSStore} with the same name exists
+   */
+  public HDFSStore create(String name) throws GemFireConfigException, StoreExistsException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
new file mode 100644
index 0000000..d98c9cd
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/HDFSStoreMutator.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs;
+
+/**
+ * HDFSStoreMutator provides a means to dynamically alter {@link HDFSStore}'s
+ * behavior. Instances of this interface are created using
+ * {@link HDFSStore#createHdfsStoreMutator} and applied using
+ * {@link HDFSStore#alter}
+ * 
+ */
+public interface HDFSStoreMutator {
+  /**
+   * {@link HDFSStoreFactory#setWriteOnlyFileRolloverSize(int)}
+   */
+  public HDFSStoreMutator setWriteOnlyFileRolloverSize(int maxFileSize);
+
+  /**
+   * {@link HDFSStore#getWriteOnlyFileRolloverSize()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getWriteOnlyFileRolloverSize();
+
+  /**
+   * {@link HDFSStoreFactory#setWriteOnlyFileRolloverInterval(int)}
+   */
+  public HDFSStoreMutator setWriteOnlyFileRolloverInterval(int interval);
+
+  /**
+   * {@link HDFSStore#getWriteOnlyFileRolloverInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getWriteOnlyFileRolloverInterval();
+
+  /**
+   * {@link HDFSStore#getMinorCompaction()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. null if not
+   *         set
+   */
+  public Boolean getMinorCompaction();
+
+  /**
+   * {@link HDFSStoreFactory#setMinorCompaction(boolean)}
+   */
+  public HDFSStoreMutator setMinorCompaction(boolean auto);
+
+  /**
+   * {@link HDFSStoreFactory#setMinorCompactionThreads(int)}
+   */
+  public HDFSStoreMutator setMinorCompactionThreads(int count);
+
+  /**
+   * {@link HDFSStore#getMinorCompactionThreads()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMinorCompactionThreads();
+
+  /**
+   * {@link HDFSStoreFactory#setMajorCompaction(boolean)}
+   */
+  public HDFSStoreMutator setMajorCompaction(boolean auto);
+
+  /**
+   * {@link HDFSStore#getMajorCompaction()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. null if not
+   *         set
+   */
+  public Boolean getMajorCompaction();
+
+  /**
+   * {@link HDFSStoreFactory#setMajorCompactionInterval(int)}
+   */
+  public HDFSStoreMutator setMajorCompactionInterval(int interval);
+
+  /**
+   * {@link HDFSStore#getMajorCompactionInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMajorCompactionInterval();
+
+  /**
+   * {@link HDFSStoreFactory#setMajorCompactionThreads(int)}
+   */
+  public HDFSStoreMutator setMajorCompactionThreads(int count);
+
+  /**
+   * {@link HDFSStore#getMajorCompactionThreads()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getMajorCompactionThreads();
+
+  /**
+   * {@link HDFSStoreFactory#setInputFileSizeMax(int)}
+   */
+  public HDFSStoreMutator setInputFileSizeMax(int size);
+
+  /**
+   * {@link HDFSStore#getInputFileSizeMax()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getInputFileSizeMax();
+
+  /**
+   * {@link HDFSStoreFactory#setInputFileCountMin(int)}
+   */
+  public HDFSStoreMutator setInputFileCountMin(int count);
+
+  /**
+   * {@link HDFSStore#getInputFileCountMin()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getInputFileCountMin();
+
+  /**
+   * {@link HDFSStoreFactory#setInputFileCountMax(int)}
+   */
+  public HDFSStoreMutator setInputFileCountMax(int count);
+
+  /**
+   * {@link HDFSStore#getInputFileCountMax()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getInputFileCountMax();
+
+  /**
+   * {@link HDFSStoreFactory#setPurgeInterval(int)}
+   */
+  public HDFSStoreMutator setPurgeInterval(int interval);
+
+  /**
+   * {@link HDFSStore#getPurgeInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getPurgeInterval();
+
+  /**
+   * {@link HDFSStore#getBatchSize()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getBatchSize();
+
+  /**
+   * {@link HDFSStoreFactory#setBatchSize(int)}
+   */
+  public HDFSStoreMutator setBatchSize(int size);
+
+  /**
+   * {@link HDFSStore#getBatchInterval()}
+   * 
+   * @return value to be used when mutator is executed on hdfsStore. -1 if not
+   *         set
+   */
+  public int getBatchInterval();
+
+  /**
+   * {@link HDFSStoreFactory#setBatchInterval(int)}
+   */
+  public HDFSStoreMutator setBatchInterval(int interval);
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java
new file mode 100644
index 0000000..de21b23
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/StoreExistsException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs;
+
+import com.gemstone.gemfire.cache.CacheException;
+
+/**
+ * Thrown when attempting to create a {@link HDFSStore} if one already exists.
+ * 
+ */
+public class StoreExistsException extends CacheException {
+  private static final long serialVersionUID = 1L;
+
+  public StoreExistsException(String storeName) {
+    super(storeName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java
new file mode 100644
index 0000000..789d497
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/FailureTracker.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import org.apache.commons.lang.mutable.MutableInt;
+import org.apache.commons.lang.mutable.MutableLong;
+
+/**
+ * Class for tracking failures and backing off if necessary.
+ *
+ */
+public class FailureTracker  extends ThreadLocal<MutableInt> {
+  private final long minTime;
+  private final long maxTime;
+  private final float rate;
+  private final FailureCount waitTime = new FailureCount();
+  
+  
+  /**
+   * @param minTime the minimum wait time after a failure in ms.
+   * @param maxTime the maximum wait tim after a failure, in ms.
+   * @param rate the rate of growth of the failures
+   */
+  public FailureTracker(long minTime, long maxTime, float rate) {
+    this.minTime = minTime;
+    this.maxTime = maxTime;
+    this.rate = rate;
+  }
+  
+  /**
+   * Wait for the current wait time.
+   */
+  public void sleepIfRetry() throws InterruptedException {
+      Thread.sleep(waitTime());
+  }
+
+  /**
+   * @return the wait time = rate^(num_failures) * minTime
+   */
+  public long waitTime() {
+    return waitTime.get().longValue();
+  }
+  
+  public void record(boolean success) {
+    if(success) {
+      success();
+    } else {
+      failure();
+    }
+    
+  }
+  
+  public void success() {
+    waitTime.get().setValue(0);
+    
+  }
+  public void failure() {
+    long current = waitTime.get().intValue();
+    if(current == 0) {
+      current=minTime;
+    }
+    else if(current < maxTime) {
+      current = (long) (current * rate);
+    }
+    waitTime.get().setValue(Math.min(current, maxTime));
+  }
+
+
+  private static class FailureCount extends ThreadLocal<MutableLong> {
+
+    @Override
+    protected MutableLong initialValue() {
+      return new MutableLong();
+    }
+  }
+
+
+  
+}



[18/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java
new file mode 100644
index 0000000..f7d746d
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogOrganizer.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.concurrent.Future;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+
+/**
+ * Manages bucket level operations on sorted oplog files including creation, reading, serde, bloom
+ * buffering and compaction. Abstracts existence of multiple sorted oplog files
+ */
+public interface HoplogOrganizer<T extends PersistedEventImpl> extends HoplogSetReader<byte[], T>,
+    HoplogListener, Closeable {
+
+  /**
+   * Iterates on the input buffer and persists it in a new sorted oplog. This invocation may block
+   * if there are too many outstanding write requests.
+   * 
+   * @param bufferIter
+   *          ordered iterator on a buffer of objects to be persisted
+   * @param count
+   *          number of K,V pairs expected to be part of flush, 0 if unknown
+   * @throws IOException
+   */
+  public void flush(Iterator<? extends QueuedPersistentEvent> bufferIter, int count) 
+      throws IOException, ForceReattemptException;
+  
+  
+  /**
+   * Clear the data in HDFS. This method assumes that the
+   * dispatcher thread has already been paused, so there should be
+   * no concurrent flushes to HDFS when this method is called.
+   * 
+   * @throws IOException
+   */
+  public void clear() throws IOException;
+
+  /**
+   * returns the compactor associated with this set
+   */
+  public Compactor getCompactor();
+  
+  /**
+   * Called to execute bucket maintenance activities, like purge expired files
+   * and create compaction task. Long running activities must be executed
+   * asynchronously, not on this thread, to avoid impact on other buckets
+   * @throws IOException 
+   */
+  public void performMaintenance() throws IOException;
+
+  /**
+   * Schedules a compaction task and returns immediately.
+   * 
+   * @param isMajor true for major compaction, false for minor compaction
+   * @return future for status of compaction request
+   */
+  public Future<CompactionStatus> forceCompaction(boolean isMajor);
+
+  /**
+   * Returns the timestamp of the last completed major compaction
+   * 
+   * @return the timestamp or 0 if a major compaction has not taken place yet
+   */
+  public long getLastMajorCompactionTimestamp();
+
+  public interface Compactor {
+    /**
+     * Requests a compaction operation be performed on this set of sorted oplogs.
+     *
+     * @param isMajor true for major compaction
+     * @param isForced true if the compaction should be carried out even if there
+     * is only one hoplog to compact
+     * 
+     * @return true if compaction was performed, false otherwise
+     * @throws IOException
+     */
+    boolean compact(boolean isMajor, boolean isForced) throws IOException;
+
+    /**
+     * Stop the current compaction operation in the middle and suspend
+     * compaction operations. The current current compaction data
+     * will be thrown away, and no more compaction will be performend
+     * until resume is called. 
+     */
+    void suspend();
+    
+    /**
+     * Resume compaction operations. 
+     */
+    void resume();
+
+    /**
+     * @return true if the compactor is not ready or busy
+     */
+    boolean isBusy(boolean isMajor);
+
+    /**
+     * @return the hdfsStore configuration used by this compactor
+     */
+    public HDFSStore getHdfsStore();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java
new file mode 100644
index 0000000..16939db
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetIterator.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog.HFileReader.HFileSortedIterator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.ByteComparator;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+
+/**
+ * Provides a merged iterator on set of {@link HFileSortedOplog}
+ */
+public class HoplogSetIterator implements HoplogIterator<ByteBuffer, ByteBuffer> {
+  private final List<HFileSortedIterator> iters;
+
+  // Number of entries remaining to be iterated by this scanner
+  private int entriesRemaining;
+
+  // points at the current iterator holding the next entry
+  private ByteBuffer currentKey;
+  private ByteBuffer currentValue;
+
+  public HoplogSetIterator(List<TrackedReference<Hoplog>> targets) throws IOException {
+    iters = new ArrayList<HFileSortedIterator>();
+    for (TrackedReference<Hoplog> oplog : targets) {
+      HFileSortedIterator iter = (HFileSortedIterator) oplog.get().getReader().scan();
+      if (!iter.hasNext()) {
+        // the oplog is empty, exclude from iterator
+        continue;
+      }
+
+      // initialize the iterator
+      iter.nextBB();
+      iters.add(iter);
+      entriesRemaining += oplog.get().getReader().getEntryCount();
+    }
+  }
+
+  public boolean hasNext() {
+    return entriesRemaining > 0;
+  }
+
+  @Override
+  public ByteBuffer next() throws IOException {
+    return nextBB();
+  }
+  public ByteBuffer nextBB() throws IOException {
+    if (!hasNext()) {
+      throw new NoSuchElementException();
+    }
+
+    seekToMinKeyIter();
+
+    return currentKey;
+  }
+
+  private void seekToMinKeyIter() throws IOException {
+    HFileSortedIterator currentIter = null;
+    ByteBuffer minKey = null;
+
+    // scan through all hoplog iterators to reach to the iterator with smallest
+    // key on the head and remove duplicate keys
+    for (Iterator<HFileSortedIterator> iterator = iters.iterator(); iterator.hasNext();) {
+      HFileSortedIterator iter = iterator.next();
+      
+      ByteBuffer tmpK = iter.getKeyBB();
+      ByteBuffer tmpV = iter.getValueBB();
+      if (minKey == null || ByteComparator.compareBytes(tmpK.array(), tmpK.arrayOffset(), tmpK.remaining(), minKey.array(), minKey.arrayOffset(), minKey.remaining()) < 0) {
+        minKey = tmpK;
+        currentKey = tmpK;
+        currentValue = tmpV;
+        currentIter = iter;
+      } else {
+        // remove possible duplicate key entries from iterator
+        if (seekHigherKeyInIter(minKey, iter) == null) {
+          // no more keys left in this iterator
+          iter.close();
+          iterator.remove();
+        }
+      }
+    }
+    
+    //seek next key in current iter
+    if (currentIter != null && seekHigherKeyInIter(minKey, currentIter) == null) {
+      // no more keys left in this iterator
+      currentIter.close();
+      iters.remove(currentIter);
+    }
+  }
+
+  private ByteBuffer seekHigherKeyInIter(ByteBuffer key, HFileSortedIterator iter) throws IOException {
+    ByteBuffer newK = iter.getKeyBB();
+
+    // remove all duplicates by incrementing iterator when a key is less than
+    // equal to current key
+    while (ByteComparator.compareBytes(newK.array(), newK.arrayOffset(), newK.remaining(), key.array(), key.arrayOffset(), key.remaining()) <= 0) {
+      entriesRemaining--;
+      if (iter.hasNext()) {
+        newK = iter.nextBB();
+      } else {
+        newK = null;
+        break;
+      }
+    }
+    return newK;
+  }
+
+  @Override
+  public ByteBuffer getKey() {
+    return getKeyBB();
+  }
+  public ByteBuffer getKeyBB() {
+    if (currentKey == null) {
+      throw new IllegalStateException();
+    }
+    return currentKey;
+  }
+
+  @Override
+  public ByteBuffer getValue() {
+    return getValueBB();
+  }
+  public ByteBuffer getValueBB() {
+    if (currentValue == null) {
+      throw new IllegalStateException();
+    }
+    return currentValue;
+  }
+
+  @Override
+  public void remove() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void close() {
+    for (HoplogIterator<byte[], byte[]> iter : iters) {
+      iter.close();
+    }
+  }
+
+  public int getRemainingEntryCount() {
+    return entriesRemaining;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java
new file mode 100644
index 0000000..789a616
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogSetReader.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * Reads a sorted oplog file or a merged set of sorted oplogs.
+ */
+public interface HoplogSetReader<K, V> {
+  /**
+   * Returns the value associated with the given key.
+   */
+  V read(K key) throws IOException;
+
+  /**
+   * Iterators over the entire contents of the sorted file.
+   * 
+   * @return the sorted iterator
+   * @throws IOException
+   */
+  HoplogIterator<K, V> scan() throws IOException;
+
+  /**
+   * Scans the available keys and allows iteration over the interval [from, to) where the starting
+   * key is included and the ending key is excluded from the results.
+   * 
+   * @param from
+   *          the start key
+   * @param to
+   *          the end key
+   * @return the sorted iterator
+   * @throws IOException
+   */
+  HoplogIterator<K, V> scan(K from, K to) throws IOException;
+
+  /**
+   * Scans the keys and allows iteration between the given keys.
+   * 
+   * @param from
+   *          the start key
+   * @param fromInclusive
+   *          true if the start key is included in the scan
+   * @param to
+   *          the end key
+   * @param toInclusive
+   *          true if the end key is included in the scan
+   * @return the sorted iterator
+   * @throws IOException
+   */
+  HoplogIterator<K, V> scan(K from, boolean fromInclusive, K to, boolean toInclusive) throws IOException;
+  
+  
+  /**
+   * Scans the available keys and allows iteration over the offset 
+   * specified as parameters
+   * 
+   * 
+   * @param startOffset
+   *          the start offset
+   * @param length
+   *          bytes to read
+   * @return the sorted iterator
+   * @throws IOException
+   */
+  HoplogIterator<K, V> scan(long startOffset, long length) throws IOException;
+
+  /**
+   * Using Cardinality estimator provides an approximate number of entries
+   * 
+   * @return the number of entries
+   */
+  long sizeEstimate();
+
+  /**
+   * Returns true if the reader has been closed.
+   * @return true if closed
+   */
+  boolean isClosed();
+
+  /**
+   * Allows sorted iteration through a set of keys and values.
+   */
+  public interface HoplogIterator<K, V> {
+    K getKey();
+
+    V getValue();
+
+    /** moves to next element and returns the key object */
+    K next() throws IOException;
+    
+    boolean hasNext();
+    
+    void close();
+    
+    void remove();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java
new file mode 100644
index 0000000..a2926ff
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SequenceFileHoplog.java
@@ -0,0 +1,395 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+  
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumMap;
+
+import com.gemstone.gemfire.internal.hll.ICardinality;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile;
+import com.gemstone.gemfire.cache.hdfs.internal.org.apache.hadoop.io.SequenceFile.Reader;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import com.gemstone.gemfire.internal.Version;
+
+import org.apache.logging.log4j.Logger;
+
+/**
+ * Implements Sequence file based {@link Hoplog}
+ * 
+ *
+ */
+public class SequenceFileHoplog extends AbstractHoplog{
+  
+   public SequenceFileHoplog(FileSystem inputFS, Path filePath,  
+      SortedOplogStatistics stats)
+  throws IOException
+  {
+     super(inputFS, filePath, stats);
+  }
+  @Override
+  public void close() throws IOException {
+    // Nothing to do 
+  }
+
+  @Override
+  public HoplogReader getReader() throws IOException {
+    return new SequenceFileReader();
+  }
+
+  @Override
+  /**
+   * gets the writer for sequence file. 
+   * 
+   * @param keys is not used for SequenceFileHoplog class 
+   */
+  public HoplogWriter createWriter(int keys) throws IOException {
+    return new SequenceFileHoplogWriter();
+  }
+
+  @Override
+  public boolean isClosed() {
+    return false;
+  }
+  
+  @Override
+  public void close(boolean clearCache) throws IOException {
+    // Nothing to do 
+  }
+
+  /**
+   * Currently, hsync does not update the file size on namenode. So, if last time the 
+   * process died after calling hsync but before calling file close, the file is 
+   * left with an inconsistent file size. This is a workaround that - open the file stream in append 
+   * mode and close it. This fixes the file size on the namenode.
+   * 
+   * @throws IOException
+   * @return true if the file size was fixed 
+   */
+  public boolean fixFileSize() throws IOException {
+    // Try to fix the file size
+    // Loop so that the expected expceptions can be ignored 3
+   // times
+    if (logger.isDebugEnabled())
+      logger.debug("{}Fixing size of hoplog " + path, logPrefix);
+    Exception e = null;
+    boolean exceptionThrown = false;
+    for (int i =0; i < 3; i++) {
+      try {
+        FSDataOutputStream stream = fsProvider.getFS().append(path);
+        stream.close();
+        stream = null;
+      } catch (IOException ie) {
+        exceptionThrown = true;
+        e = ie;
+        if (logger.isDebugEnabled())
+        logger.debug("{}Retry run " + (i + 1) + ": Hoplog " + path + " is still a temporary " +
+            "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
+            "fix the hoplog because an exception was thrown " + e, logPrefix );
+      }
+      // As either RecoveryInProgressException was thrown or 
+      // Already being created exception was thrown, wait for 
+      // sometime before next retry. 
+      if (exceptionThrown) {
+        try {
+          Thread.sleep(5000);
+        } catch (InterruptedException e1) {
+        } 
+        exceptionThrown = false;
+      } else {
+        // no exception was thrown, break;
+        return true;
+      }
+    }
+    logger.info (logPrefix, LocalizedMessage.create(LocalizedStrings.DEBUG, "Hoplog " + path + " is still a temporary " +
+        "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
+        "fix the hoplog because an exception was thrown " + e));
+    
+    return false;
+  }
+  
+  @Override
+  public String toString() {
+    return "SequenceFileHplog[" + getFileName() + "]";
+  }
+  
+  private class SequenceFileHoplogWriter implements HoplogWriter {
+    
+    private SequenceFile.Writer writer = null;
+    
+    public SequenceFileHoplogWriter() throws IOException{
+      writer = AbstractHoplog.getSequenceFileWriter(path, conf, logger);
+    }
+   
+    @Override
+    public void close() throws IOException {
+      writer.close();
+      if (logger.isDebugEnabled())
+        logger.debug("{}Completed creating hoplog " + path, logPrefix);
+    }
+    
+    @Override
+    public void hsync() throws IOException {
+      writer.hsyncWithSizeUpdate();
+      if (logger.isDebugEnabled())
+        logger.debug("{}hsync'ed a batch of data to hoplog " + path, logPrefix);
+    }
+    
+    @Override
+    public void append(byte[] key, byte[] value) throws IOException {
+      writer.append(new BytesWritable(key), new BytesWritable(value));
+    }
+
+    @Override
+    public void append(ByteBuffer key, ByteBuffer value) throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public void close(EnumMap<Meta, byte[]> metadata) throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+    @Override
+    public long getCurrentSize() throws IOException {
+      return writer.getLength();
+    }
+    
+  }
+  /**
+   * Sequence file reader. This is currently to be used only by MapReduce jobs and 
+   * test functions
+   * 
+   */
+  public class SequenceFileReader implements HoplogReader, Closeable {
+    @Override
+    public byte[] read(byte[] key) throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public HoplogIterator<byte[], byte[]> scan()
+        throws IOException {
+      return  new SequenceFileIterator(fsProvider.getFS(), path, 0, Long.MAX_VALUE, conf, logger);
+    }
+
+    @Override
+    public HoplogIterator<byte[], byte[]> scan(
+        byte[] from, byte[] to) throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+    
+    @Override
+    public HoplogIterator<byte[], byte[]> scan(
+        long startOffset, long length) throws IOException {
+      return  new SequenceFileIterator(fsProvider.getFS(), path, startOffset, length, conf, logger);
+    }
+    
+    @Override
+    public HoplogIterator<byte[], byte[]> scan(
+        byte[] from, boolean fromInclusive, byte[] to, boolean toInclusive)
+        throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public boolean isClosed() {
+      throw new UnsupportedOperationException("Not supported for Sequence files.");
+    }
+    
+    @Override
+    public void close() throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files. Close the iterator instead.");
+    }
+
+    @Override
+    public ByteBuffer get(byte[] key) throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public BloomFilter getBloomFilter() throws IOException {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public long getEntryCount() {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public ICardinality getCardinalityEstimator() {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public long sizeEstimate() {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+
+  }
+  
+  /**
+   * Sequence file iterator. This is currently to be used only by MapReduce jobs and 
+   * test functions
+   * 
+   */
+  public static class SequenceFileIterator implements HoplogIterator<byte[], byte[]> {
+    
+    SequenceFile.Reader reader = null;
+    private BytesWritable prefetchedKey = null;
+    private BytesWritable prefetchedValue = null;
+    private byte[] currentKey;
+    private byte[] currentValue;
+    boolean hasNext = false;
+    Logger logger; 
+    Path path;
+    private long start;
+    private long end;
+    
+    public SequenceFileIterator(FileSystem fs, Path path, long startOffset, 
+        long length, Configuration conf, Logger logger) 
+        throws IOException {
+      Reader.Option optPath = SequenceFile.Reader.file(path);
+      
+      // Hadoop has a configuration parameter io.serializations that is a list of serialization 
+      // classes which can be used for obtaining serializers and deserializers. This parameter 
+      // by default contains avro classes. When a sequence file is created, it calls 
+      // SerializationFactory.getSerializer(keyclass). This internally creates objects using 
+      // reflection of all the classes that were part of io.serializations. But since, there is 
+      // no avro class available it throws an exception. 
+      // Before creating a sequenceFile, override the io.serializations parameter and pass only the classes 
+      // that are important to us. 
+      String serializations[] = conf.getStrings("io.serializations",
+          new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
+      conf.setStrings("io.serializations",
+          new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
+      // create reader
+      boolean emptyFile = false;
+      try {
+        reader = new SequenceFile.Reader(conf, optPath);
+      }catch (EOFException e) {
+        // this is ok as the file has ended. just return false that no more records available
+        emptyFile = true;
+      }
+      // reset the configuration to its original value 
+      conf.setStrings("io.serializations", serializations);
+      this.logger = logger;
+      this.path = path;
+      
+      if (emptyFile) {
+        hasNext = false;
+      } else {
+        // The file should be read from the first sync marker after the start position and 
+        // until the first sync marker after the end position is seen. 
+        this.end = startOffset + length;
+        if (startOffset > reader.getPosition()) {
+          reader.sync(startOffset);                  // sync to start
+        }
+        this.start = reader.getPosition();
+        this.hasNext = this.start < this.end;
+        if (hasNext)
+          readNext();
+      } 
+    }
+  
+
+    public Version getVersion(){
+      String version = reader.getMetadata().get(new Text(Meta.GEMFIRE_VERSION.name())).toString();
+      return Version.fromOrdinalOrCurrent(Short.parseShort(version)); 
+    }
+    @Override
+    public boolean hasNext() {
+      return hasNext;
+    }
+
+    @Override
+    public byte[] next() {
+      currentKey = prefetchedKey.getBytes();
+      currentValue = prefetchedValue.getBytes();
+      
+      readNext();
+
+      return currentKey;
+    }
+    
+    private void readNext() {
+      try {
+        long pos = reader.getPosition();
+        prefetchedKey = new BytesWritable();
+        prefetchedValue = new BytesWritable();
+        hasNext = reader.next(prefetchedKey, prefetchedValue);
+        // The file should be read from the first sync marker after the start position and 
+        // until the first sync marker after the end position is seen. 
+        if (pos >= end && reader.syncSeen()) {
+          hasNext = false;
+        }
+      } catch (EOFException e) {
+        // this is ok as the file has ended. just return false that no more records available
+        hasNext = false;
+      } 
+      catch (IOException e) {
+        hasNext = false;
+        logger.error(LocalizedMessage.create(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path), e);
+        throw new HDFSIOException(
+            LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
+      }
+    }
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException("Not supported for Sequence files");
+    }
+
+    @Override
+    public void close() {
+      IOUtils.closeStream(reader);
+    }
+
+    @Override
+    public byte[] getKey() {
+      return currentKey;
+    }
+
+    @Override
+    public byte[] getValue() {
+      return currentValue;
+    }
+    
+    /** Returns true iff the previous call to next passed a sync mark.*/
+    public boolean syncSeen() { return reader.syncSeen(); }
+
+    /** Return the current byte position in the input file. */
+    public synchronized long getPosition() throws IOException {
+      return reader.getPosition();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java
new file mode 100644
index 0000000..f5b63cc
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/AbstractGFRecordReader.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapred;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.lib.CombineFileSplit;
+
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFKey;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HDFSSplitIterator;
+
+public class AbstractGFRecordReader
+    extends
+    com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.AbstractGFRecordReader
+    implements RecordReader<GFKey, PersistedEventImpl> {
+
+  /**
+   * Initializes instance of record reader using file split and job
+   * configuration
+   * 
+   * @param split
+   * @param conf
+   * @throws IOException
+   */
+  public void initialize(CombineFileSplit split, JobConf conf) throws IOException {
+    CombineFileSplit cSplit = (CombineFileSplit) split;
+    Path[] path = cSplit.getPaths();
+    long[] start = cSplit.getStartOffsets();
+    long[] len = cSplit.getLengths();
+
+    FileSystem fs = cSplit.getPath(0).getFileSystem(conf);
+    this.splitIterator = HDFSSplitIterator.newInstance(fs, path, start, len, 0l, 0l);
+  }
+
+  @Override
+  public boolean next(GFKey key, PersistedEventImpl value) throws IOException {
+    /*
+     * if there are more records in the hoplog, iterate to the next record. Set
+     * key object as is. 
+     */
+
+    if (!super.hasNext()) {
+      key.setKey(null);
+      // TODO make value null;
+      return false;
+    }
+
+    super.next();
+
+    key.setKey(super.getKey().getKey());
+    PersistedEventImpl usersValue = super.getValue();
+    value.copy(usersValue);
+    return true;
+  }
+
+  @Override
+  public GFKey createKey() {
+    return new GFKey();
+  }
+
+  @Override
+  public PersistedEventImpl createValue() {
+    if(this.isSequential) {
+      return new UnsortedHoplogPersistedEvent();
+    } else {
+      return new SortedHoplogPersistedEvent();
+    }
+  }
+
+  @Override
+  public long getPos() throws IOException {
+    // there is no efficient way to find the position of key in hoplog file.
+    return 0;
+  }
+
+  @Override
+  public void close() throws IOException {
+    super.close();
+  }
+
+  @Override
+  public float getProgress() throws IOException {
+    return super.getProgressRatio();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java
new file mode 100644
index 0000000..0e0e455
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFInputFormat.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapred;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobConfigurable;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.lib.CombineFileSplit;
+
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFKey;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.HoplogOptimizedSplitter;
+
+public class GFInputFormat extends
+    com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFInputFormat
+    implements InputFormat<GFKey, PersistedEventImpl>, JobConfigurable {
+
+  @Override
+  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
+    this.conf = job;
+
+    Collection<FileStatus> hoplogs = getHoplogs();
+    return createSplits(job, hoplogs);
+  }
+
+  /**
+   * Creates an input split for every block occupied by hoplogs of the input
+   * regions
+   * 
+   * @param job 
+   * @param hoplogs
+   * @return array of input splits of type file input split
+   * @throws IOException
+   */
+  private InputSplit[] createSplits(JobConf job, Collection<FileStatus> hoplogs)
+      throws IOException {
+    if (hoplogs == null || hoplogs.isEmpty()) {
+      return new InputSplit[0];
+    }
+
+    HoplogOptimizedSplitter splitter = new HoplogOptimizedSplitter(hoplogs);
+    List<org.apache.hadoop.mapreduce.InputSplit> mr2Splits = splitter.getOptimizedSplits(conf);
+    InputSplit[] splits = new InputSplit[mr2Splits.size()];
+    int i = 0;
+    for (org.apache.hadoop.mapreduce.InputSplit inputSplit : mr2Splits) {
+      org.apache.hadoop.mapreduce.lib.input.CombineFileSplit mr2Spit;
+      mr2Spit = (org.apache.hadoop.mapreduce.lib.input.CombineFileSplit) inputSplit;
+      
+      CombineFileSplit split = new CombineFileSplit(job, mr2Spit.getPaths(),
+          mr2Spit.getStartOffsets(), mr2Spit.getLengths(),
+          mr2Spit.getLocations());
+      splits[i] = split;
+      i++;
+    }
+
+    return splits;
+  }
+
+  @Override
+  public RecordReader<GFKey, PersistedEventImpl> getRecordReader(
+      InputSplit split, JobConf job, Reporter reporter) throws IOException {
+
+    CombineFileSplit cSplit = (CombineFileSplit) split;
+    AbstractGFRecordReader reader = new AbstractGFRecordReader();
+    reader.initialize(cSplit, job);
+    return reader;
+  }
+
+  @Override
+  public void configure(JobConf job) {
+    this.conf = job;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java
new file mode 100644
index 0000000..1494e9f
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapred/GFOutputFormat.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapred;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.Progressable;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.client.ClientCache;
+
+/**
+ * Output format for gemfire. The records provided to writers created by this
+ * output format are PUT in a live gemfire cluster.
+ * 
+ */
+public class GFOutputFormat extends
+    com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFOutputFormat
+    implements OutputFormat<Object, Object> {
+
+  @Override
+  public RecordWriter<Object, Object> getRecordWriter(
+      FileSystem ignored, JobConf job, String name, Progressable progress)
+      throws IOException {
+    ClientCache cache = getClientCacheInstance(job);
+    return new GFRecordWriter(cache, job);
+  }
+  
+  @Override
+  public void checkOutputSpecs(FileSystem ignored, JobConf job)
+      throws IOException {
+    validateConfiguration(job);
+  }
+
+  public class GFRecordWriter implements RecordWriter<Object, Object> {
+    private ClientCache clientCache;
+    private Region<Object, Object> region;
+
+    public GFRecordWriter(ClientCache cache, Configuration conf) {
+      this.clientCache = cache;
+      region = getRegionInstance(conf, clientCache);
+    }
+    
+    @Override
+    public void write(Object key, Object value) throws IOException {
+      executePut(region, key, value);
+    }
+
+    @Override
+    public void close(Reporter reporter) throws IOException {
+      closeClientCache(clientCache);
+      // TODO update reporter
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java
new file mode 100644
index 0000000..2c71b18
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/AbstractGFRecordReader.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
+
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
+import com.gemstone.gemfire.internal.util.BlobHelper;
+
+public class AbstractGFRecordReader extends
+    RecordReader<GFKey, PersistedEventImpl> {
+
+  // constant overhead of each KV in hfile. This is used in computing the
+  // progress of record reader
+  protected long RECORD_OVERHEAD = 8;
+
+  // accounting for number of bytes already read from the hfile
+  private long bytesRead;
+  
+  protected boolean isSequential;
+  
+  protected HDFSSplitIterator splitIterator;
+
+  @Override
+  public void initialize(InputSplit split, TaskAttemptContext context)
+  throws IOException, InterruptedException {
+    CombineFileSplit cSplit = (CombineFileSplit) split;
+    Path[] path = cSplit.getPaths();
+    long[] start = cSplit.getStartOffsets();
+    long[] len = cSplit.getLengths();
+
+    Configuration conf = context.getConfiguration();
+    FileSystem fs = cSplit.getPath(0).getFileSystem(conf);
+    
+    this.splitIterator = HDFSSplitIterator.newInstance(fs, path, start, len, 0l, 0l);
+  }
+  
+  @Override
+  public boolean nextKeyValue() throws IOException, InterruptedException {
+    return next();
+  }
+
+  protected boolean next() throws IOException {
+    if (!hasNext()) {
+      return false;
+    }
+    
+    splitIterator.next();
+    bytesRead += (splitIterator.getKey().length + splitIterator.getValue().length);
+    bytesRead += RECORD_OVERHEAD;
+    return true;
+  }
+  
+  protected boolean hasNext() throws IOException {
+    return splitIterator.hasNext();
+  }
+
+  @Override
+  public GFKey getCurrentKey() throws IOException, InterruptedException {
+    return getKey();
+  }
+
+  protected GFKey getKey() throws IOException {
+    try {
+      GFKey key = new GFKey();
+      key.setKey(BlobHelper.deserializeBlob(splitIterator.getKey()));
+      return key;
+    } catch (ClassNotFoundException e) {
+      // TODO resolve logging
+      return null;
+    }
+  }
+
+  @Override
+  public PersistedEventImpl getCurrentValue() throws IOException,
+      InterruptedException {
+    return getValue();
+  }
+
+  protected PersistedEventImpl getValue() throws IOException {
+    try {
+      byte[] valueBytes = splitIterator.getValue();
+      if(isSequential) {
+        return UnsortedHoplogPersistedEvent.fromBytes(valueBytes);
+      } else {
+        return SortedHoplogPersistedEvent.fromBytes(valueBytes);
+      }
+    } catch (ClassNotFoundException e) {
+      // TODO resolve logging
+      return null;
+    }
+  }
+
+  @Override
+  public float getProgress() throws IOException, InterruptedException {
+    return getProgressRatio();
+  }
+
+  protected float getProgressRatio() throws IOException {
+    if (!splitIterator.hasNext()) {
+      return 1.0f;
+    } else if (bytesRead > splitIterator.getLength()) {
+      // the record reader is expected to read more number of bytes as it
+      // continues till beginning of next block. hence if extra reading has
+      // started return fixed value
+      return 0.95f;
+    } else {
+      return Math.min(1.0f, bytesRead / (float) (splitIterator.getLength()));
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    splitIterator.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java
new file mode 100644
index 0000000..ff64ceb
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFInputFormat.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.HoplogOptimizedSplitter;
+
+public class GFInputFormat extends InputFormat<GFKey, PersistedEventImpl>
+    implements Configurable {
+  public static final String HOME_DIR = "mapreduce.input.gfinputformat.homedir";
+  public static final String INPUT_REGION = "mapreduce.input.gfinputformat.inputregion";
+  public static final String START_TIME = "mapreduce.input.gfinputformat.starttime";
+  public static final String END_TIME = "mapreduce.input.gfinputformat.endtime";
+  public static final String CHECKPOINT = "mapreduce.input.gfinputformat.checkpoint";
+  
+  protected Configuration conf;
+
+  @Override
+  public List<InputSplit> getSplits(JobContext job) throws IOException {
+    this.conf = job.getConfiguration();
+    
+    Collection<FileStatus> hoplogs = getHoplogs();
+    return createSplits(hoplogs);
+  }
+
+  /**
+   * Identifies filters provided in the job configuration and creates a list of
+   * sorted hoplogs. If there are no sorted hoplogs, checks if the region has
+   * sequential hoplogs
+   * 
+   * @return list of hoplogs
+   * @throws IOException
+   */
+  protected Collection<FileStatus> getHoplogs() throws IOException {
+    String regionName = conf.get(INPUT_REGION);
+    System.out.println("GFInputFormat: Region Name is " + regionName);
+    if (regionName == null || regionName.trim().isEmpty()) {
+      // incomplete job configuration, region name must be provided
+      return new ArrayList<FileStatus>();
+    }
+
+    String home = conf.get(HOME_DIR, HDFSStore.DEFAULT_HOME_DIR);
+    regionName = HdfsRegionManager.getRegionFolder(regionName);
+    Path regionPath = new Path(home + "/" + regionName);
+    FileSystem fs = regionPath.getFileSystem(conf);
+
+    long start = conf.getLong(START_TIME, 0l);
+    long end = conf.getLong(END_TIME, 0l);
+    boolean checkpoint = conf.getBoolean(CHECKPOINT, true);
+
+    // if the region contains flush hoplogs then the region is of type RW.
+    Collection<FileStatus> hoplogs;
+    hoplogs = HoplogUtil.filterHoplogs(fs, regionPath, start, end, checkpoint);
+    return hoplogs == null ? new ArrayList<FileStatus>() : hoplogs;
+  }
+  
+  /**
+   * Creates an input split for every block occupied by hoplogs of the input
+   * regions
+   * 
+   * @param hoplogs
+   * @return list of input splits of type file input split
+   * @throws IOException
+   */
+  private List<InputSplit> createSplits(Collection<FileStatus> hoplogs)
+      throws IOException {
+    List<InputSplit> splits = new ArrayList<InputSplit>();
+    if (hoplogs == null || hoplogs.isEmpty()) {
+      return splits;
+    }
+    
+    HoplogOptimizedSplitter splitter = new HoplogOptimizedSplitter(hoplogs);
+    return splitter.getOptimizedSplits(conf);
+  }
+
+  @Override
+  public RecordReader<GFKey, PersistedEventImpl> createRecordReader(
+      InputSplit split, TaskAttemptContext context) throws IOException,
+      InterruptedException {
+    return new AbstractGFRecordReader();
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java
new file mode 100644
index 0000000..5bba2c7
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKey.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
+
+import com.gemstone.gemfire.internal.util.BlobHelper;
+
+public class GFKey implements WritableComparable<GFKey> {
+  private Object key;
+
+  public Object getKey() {
+    return key;
+  }
+
+  public void setKey(Object key) {
+    this.key = key;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    byte[] bytes = BlobHelper.serializeToBlob(key);
+    out.writeInt(bytes.length);
+    out.write(bytes, 0, bytes.length);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    int len = in.readInt();
+    byte[] bytes = new byte[len];
+    in.readFully(bytes, 0, len);
+    try {
+      key = BlobHelper.deserializeBlob(bytes);
+    } catch (ClassNotFoundException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+  }
+
+  @Override
+  public int compareTo(GFKey o) {
+    try {
+      byte[] b1 = BlobHelper.serializeToBlob(key);
+      byte[] b2 = BlobHelper.serializeToBlob(o.key);
+      return WritableComparator.compareBytes(b1, 0, b1.length, b2, 0, b2.length);
+    } catch (IOException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+    
+    return 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java
new file mode 100644
index 0000000..3be2ab0
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFOutputFormat.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.InvalidJobConfException;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionExistsException;
+import com.gemstone.gemfire.cache.client.ClientCache;
+import com.gemstone.gemfire.cache.client.ClientCacheFactory;
+import com.gemstone.gemfire.cache.client.ClientRegionFactory;
+import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
+import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.management.internal.cli.converters.ConnectionEndpointConverter;
+
+/**
+ * Output format for gemfire. The records provided to writers created by this
+ * output format are PUT in a live gemfire cluster.
+ * 
+ */
+public class GFOutputFormat extends OutputFormat<Object, Object> {
+  public static final String REGION = "mapreduce.output.gfoutputformat.outputregion";
+  public static final String LOCATOR_HOST = "mapreduce.output.gfoutputformat.locatorhost";
+  public static final String LOCATOR_PORT = "mapreduce.output.gfoutputformat.locatorport";
+  public static final String SERVER_HOST = "mapreduce.output.gfoutputformat.serverhost";
+  public static final String SERVER_PORT = "mapreduce.output.gfoutputformat.serverport";
+
+  @Override
+  public RecordWriter<Object, Object> getRecordWriter(TaskAttemptContext context)
+      throws IOException, InterruptedException {
+    Configuration conf = context.getConfiguration();
+    ClientCache cache = getClientCacheInstance(conf);
+    return new GFRecordWriter(cache, context.getConfiguration());
+  }
+
+  public ClientCache getClientCacheInstance(Configuration conf) {
+    // if locator host is provided create a client cache instance using
+    // connection to locator. If locator is not provided and server host is also
+    // not provided, connect using default locator
+    ClientCache cache;
+    String serverHost = conf.get(SERVER_HOST);
+    if (serverHost == null || serverHost.isEmpty()) {
+      cache = createGFWriterUsingLocator(conf);
+    } else {
+      cache = createGFWriterUsingServer(conf);
+    }
+    return cache;
+  }
+
+  /**
+   * Creates instance of {@link ClientCache} by connecting to GF cluster through
+   * locator
+   */
+  public ClientCache createGFWriterUsingLocator(Configuration conf) {
+    // if locator host is not provided assume localhost
+    String locator = conf.get(LOCATOR_HOST,
+        ConnectionEndpointConverter.DEFAULT_LOCATOR_HOST);
+    // if locator port is not provided assume default locator port 10334
+    int port = conf.getInt(LOCATOR_PORT,
+        ConnectionEndpointConverter.DEFAULT_LOCATOR_PORT);
+
+    // create gemfire client cache instance
+    ClientCacheFactory ccf = new ClientCacheFactory();
+    ccf.addPoolLocator(locator, port);
+    ClientCache cache = ccf.create();
+    return cache;
+  }
+
+  /**
+   * Creates instance of {@link ClientCache} by connecting to GF cluster through
+   * GF server
+   */
+  public ClientCache createGFWriterUsingServer(Configuration conf) {
+    String server = conf.get(SERVER_HOST);
+    // if server port is not provided assume default server port, 40404
+    int port = conf.getInt(SERVER_PORT, CacheServer.DEFAULT_PORT);
+
+    // create gemfire client cache instance
+    ClientCacheFactory ccf = new ClientCacheFactory();
+    ccf.addPoolServer(server, port);
+    ClientCache cache = ccf.create();
+    return cache;
+  }
+
+  public Region<Object, Object> getRegionInstance(Configuration conf,
+      ClientCache cache) {
+    Region<Object, Object> region;
+
+    // create gemfire region in proxy mode
+    String regionName = conf.get(REGION);
+    ClientRegionFactory<Object, Object> regionFactory = cache
+        .createClientRegionFactory(ClientRegionShortcut.PROXY);
+    try {
+      region = regionFactory.create(regionName);
+    } catch (RegionExistsException e) {
+      region = cache.getRegion(regionName);
+    }
+
+    return region;
+  }
+
+  /**
+   * Puts a K-V pair in region
+   * @param region
+   * @param key
+   * @param value
+   */
+  public void executePut(Region<Object, Object> region, Object key, Object value) {
+    region.put(key, value);
+  }
+
+  /**
+   * Closes client cache instance
+   * @param clientCache
+   */
+  public void closeClientCache(ClientCache clientCache) {
+    if (clientCache != null && !clientCache.isClosed()) {
+      clientCache.close();
+    }
+  }
+
+  /**
+   * Validates correctness and completeness of job's output configuration
+   * 
+   * @param conf
+   * @throws InvalidJobConfException
+   */
+  protected void validateConfiguration(Configuration conf)
+      throws InvalidJobConfException {
+    // User must configure the output region name.
+    String region = conf.get(REGION);
+    if (region == null || region.trim().isEmpty()) {
+      throw new InvalidJobConfException("Output Region name not provided.");
+    }
+
+    // TODO validate if a client connected to gemfire cluster can be created
+  }
+  
+  @Override
+  public void checkOutputSpecs(JobContext context) throws IOException,
+      InterruptedException {
+    Configuration conf = context.getConfiguration();
+    validateConfiguration(conf);
+  }
+
+  @Override
+  public OutputCommitter getOutputCommitter(TaskAttemptContext context)
+      throws IOException, InterruptedException {
+    return new FileOutputCommitter(FileOutputFormat.getOutputPath(context),
+        context);
+  }
+
+  public class GFRecordWriter extends RecordWriter<Object, Object> {
+    private ClientCache clientCache;
+    private Region<Object, Object> region;
+
+    public GFRecordWriter(ClientCache cache, Configuration conf) {
+      this.clientCache = cache;
+      region = getRegionInstance(conf, clientCache);
+    }
+
+    @Override
+    public void write(Object key, Object value) throws IOException,
+        InterruptedException {
+      executePut(region, key, value);
+    }
+
+    @Override
+    public void close(TaskAttemptContext context) throws IOException,
+        InterruptedException {
+      closeClientCache(clientCache);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java
new file mode 100644
index 0000000..869ad0d
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIterator.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplog;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+
+/**
+ * Iterates over the records in part of a hoplog. This iterator
+ * is passed from the map reduce job into the gemfirexd LanguageConnectionContext
+ * for gemfirexd to use as the iterator during the map phase.
+ *
+ */
+public abstract class HDFSSplitIterator {
+  // data object for holding path, offset and length, of all the blocks this
+  // iterator needs to iterate on
+  private CombineFileSplit split;
+
+  // the following members are pointers to current hoplog which is being
+  // iterated upon
+  private int currentHopIndex = 0;
+  private AbstractHoplog hoplog;
+  protected HoplogIterator<byte[], byte[]> iterator;
+  byte[] key;
+  byte[] value;
+  
+  private long bytesRead;
+  protected long RECORD_OVERHEAD = 8;
+
+  private long startTime = 0l;
+  private long endTime = 0l;
+
+  protected FileSystem fs;
+  private static final Logger logger = LogService.getLogger();
+  protected final String logPrefix = "<" + "HDFSSplitIterator" + "> ";
+
+  public HDFSSplitIterator(FileSystem fs, Path[] paths, long[] offsets, long[] lengths, long startTime, long endTime) throws IOException {
+    this.fs = fs;
+    this.split = new CombineFileSplit(paths, offsets, lengths, null);
+    while(currentHopIndex < split.getNumPaths() && !fs.exists(split.getPath(currentHopIndex))){
+      logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_CLEANED_UP_BY_JANITOR, split.getPath(currentHopIndex)));
+      currentHopIndex++;
+    }
+    if(currentHopIndex == split.getNumPaths()){
+      this.hoplog = null;
+      iterator = null;
+    } else {
+      this.hoplog = getHoplog(fs,split.getPath(currentHopIndex));
+      iterator = hoplog.getReader().scan(split.getOffset(currentHopIndex), split.getLength(currentHopIndex));
+    }
+    this.startTime = startTime;
+    this.endTime = endTime;
+  }
+
+  /**
+   * Get the appropriate iterator for the file type.
+   */
+  public static HDFSSplitIterator newInstance(FileSystem fs, Path[] path,
+      long[] start, long[] len, long startTime, long endTime)
+      throws IOException {
+    String fileName = path[0].getName();
+    if (fileName.endsWith(AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION)) {
+      return new StreamSplitIterator(fs, path, start, len, startTime, endTime);
+    } else {
+      return new RWSplitIterator(fs, path, start, len, startTime, endTime);
+    }
+  }
+
+  public final boolean hasNext() throws IOException {
+    while (currentHopIndex < split.getNumPaths()) {
+      if (iterator != null) {
+        if(iterator.hasNext()) {
+          return true;
+        } else {
+          iterator.close();
+          iterator = null;
+          hoplog.close();
+          hoplog = null;
+        }
+      }
+      
+      if (iterator == null) {
+        // Iterator is null if this is first read from this iterator or all the
+        // entries from the previous iterator have been read. create iterator on
+        // the next hoplog.
+        currentHopIndex++;
+        while (currentHopIndex < split.getNumPaths() && !fs.exists(split.getPath(currentHopIndex))){
+          logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_CLEANED_UP_BY_JANITOR, split.getPath(currentHopIndex).toString()));
+          currentHopIndex++;
+        }
+        if (currentHopIndex >= split.getNumPaths()) {
+          return false;
+        }
+        hoplog = getHoplog(fs, split.getPath(currentHopIndex));
+        iterator = hoplog.getReader().scan(split.getOffset(currentHopIndex), split.getLength(currentHopIndex));
+      }
+    }
+    
+    return false;
+  } 
+
+  public final boolean next() throws IOException {
+    while (hasNext()) {
+      key = iterator.next();
+      value = iterator.getValue();
+      bytesRead += (key.length + value.length);
+      bytesRead += RECORD_OVERHEAD;
+      
+      // if any filter is set, check if the event's timestamp matches the
+      // filter. The events returned by the iterator may not be time ordered. So
+      // it is important to check filters everytime.
+      if (startTime > 0 || endTime > 0) {
+        try {
+          PersistedEventImpl event = getDeserializedValue();
+          long timestamp = event.getTimstamp();
+          if (startTime > 0l && timestamp < startTime) {
+            continue;
+          }
+          
+          if (endTime > 0l && timestamp > endTime) {
+            continue;
+          }
+        } catch (ClassNotFoundException e) {
+          throw new HDFSIOException("Error reading from HDFS", e);
+        } 
+      }
+        
+      return true;
+    }
+    
+    return false;
+  }
+
+  public final long getBytesRead() {
+    return this.bytesRead;
+  }
+
+  public final byte[] getKey() {
+    return key;
+  }
+
+  public abstract PersistedEventImpl getDeserializedValue()
+      throws ClassNotFoundException, IOException;
+
+  protected abstract AbstractHoplog getHoplog(FileSystem fs, Path path)
+      throws IOException;
+
+  public final byte[] getValue() {
+    return value;
+  }
+
+  public final long getLength() {
+    return split.getLength();
+  }
+
+  public void close() throws IOException {
+    if (iterator != null) {
+      iterator.close();
+      iterator = null;
+    }
+    
+    if (hoplog != null) {
+      hoplog.close();
+      hoplog.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java
new file mode 100644
index 0000000..c4c0d1c
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtil.java
@@ -0,0 +1,463 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
+
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer.HoplogComparator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+
+public class HoplogUtil {
+  /**
+   * @param regionPath
+   *          HDFS path of the region
+   * @param fs
+   *          file system associated with the region
+   * @param type
+   *          type of hoplog to be fetched; flush hoplog or sequence hoplog
+   * @return All hoplog file paths belonging to the region provided
+   * @throws IOException
+   */
+  public static Collection<FileStatus> getAllRegionHoplogs(Path regionPath,
+      FileSystem fs, String type) throws IOException {
+    return getRegionHoplogs(regionPath, fs, type, 0, 0);
+  }
+
+  /**
+   * @param regionPath
+   *          Region path
+   * @param fs
+   *          file system associated with the region
+   * @param type
+   *          type of hoplog to be fetched; flush hoplog or sequence hoplog
+   * @param start
+   *          Exclude files that do not contain records mutated after start time
+   * @param end
+   *          Exclude files that do not contain records mutated before end time
+   * @return All hoplog file paths belonging to the region provided
+   * @throws IOException
+   */
+  public static Collection<FileStatus> getRegionHoplogs(Path regionPath,
+      FileSystem fs, String type, long start, long end) throws IOException {
+    Collection<Collection<FileStatus>> allBuckets = getBucketHoplogs(
+        regionPath, fs, type, start, end);
+
+    ArrayList<FileStatus> hoplogs = new ArrayList<FileStatus>();
+    for (Collection<FileStatus> bucket : allBuckets) {
+      for (FileStatus file : bucket) {
+        hoplogs.add(file);
+      }
+    }
+    return hoplogs;
+  }
+
+  public static Collection<Collection<FileStatus>> getBucketHoplogs(Path regionPath,
+      FileSystem fs, String type, long start, long end) throws IOException {
+    Collection<Collection<FileStatus>> allBuckets = new ArrayList<Collection<FileStatus>>();
+
+    // hoplog files names follow this pattern
+    String HOPLOG_NAME_REGEX = AbstractHoplogOrganizer.HOPLOG_NAME_REGEX + type;
+    String EXPIRED_HOPLOG_NAME_REGEX = HOPLOG_NAME_REGEX + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
+    final Pattern pattern = Pattern.compile(HOPLOG_NAME_REGEX);
+    final Pattern expiredPattern = Pattern.compile(EXPIRED_HOPLOG_NAME_REGEX);
+    
+    Path cleanUpIntervalPath = new Path(regionPath.getParent(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
+    long intervalDurationMillis = readCleanUpIntervalMillis(fs, cleanUpIntervalPath);
+
+    // a region directory contains directories for individual buckets. A bucket
+    // has a integer name.
+    FileStatus[] bucketDirs = fs.listStatus(regionPath);
+    
+    for (FileStatus bucket : bucketDirs) {
+      if (!bucket.isDirectory()) {
+        continue;
+      }
+      try {
+        Integer.valueOf(bucket.getPath().getName());
+      } catch (NumberFormatException e) {
+        continue;
+      }
+
+      ArrayList<FileStatus> bucketHoplogs = new ArrayList<FileStatus>();
+
+      // identify all the flush hoplogs and seq hoplogs by visiting all the
+      // bucket directories
+      FileStatus[] bucketFiles = fs.listStatus(bucket.getPath());
+      
+      Map<String, Long> expiredHoplogs = getExpiredHoplogs(fs, bucketFiles, expiredPattern);
+      
+      FileStatus oldestHopAfterEndTS = null;
+      long oldestHopTS = Long.MAX_VALUE;
+      long currentTimeStamp = System.currentTimeMillis();
+      for (FileStatus file : bucketFiles) {
+        if (!file.isFile()) {
+          continue;
+        }
+
+        Matcher match = pattern.matcher(file.getPath().getName());
+        if (!match.matches()) {
+          continue;
+        }
+        
+        long timeStamp = AbstractHoplogOrganizer.getHoplogTimestamp(match);
+        if (start > 0 && timeStamp < start) {
+          // this hoplog contains records less than the start time stamp
+          continue;
+        }
+
+        if (end > 0 && timeStamp > end) {
+          // this hoplog contains records mutated after end time stamp. Ignore
+          // this hoplog if it is not the oldest.
+          if (oldestHopTS > timeStamp) {
+            oldestHopTS = timeStamp;
+            oldestHopAfterEndTS = file;
+          }
+          continue;
+        }
+        long expiredTimeStamp = expiredTime(file, expiredHoplogs);
+        if (expiredTimeStamp > 0 && intervalDurationMillis > 0) {
+          if ((currentTimeStamp - expiredTimeStamp) > 0.8 * intervalDurationMillis) {
+            continue;
+          }
+        }
+        bucketHoplogs.add(file);
+      }
+
+      if (oldestHopAfterEndTS != null) {
+        long expiredTimeStamp = expiredTime(oldestHopAfterEndTS, expiredHoplogs);
+        if (expiredTimeStamp <= 0 || intervalDurationMillis <=0  || 
+            (currentTimeStamp - expiredTimeStamp) <= 0.8 * intervalDurationMillis) {
+          bucketHoplogs.add(oldestHopAfterEndTS);
+        }
+      }
+
+      if (bucketHoplogs.size() > 0) {
+        allBuckets.add(bucketHoplogs);
+      }
+    }
+    
+    return allBuckets;
+  }
+  
+  private static Map<String, Long> getExpiredHoplogs(FileSystem fs, FileStatus[] bucketFiles, 
+      Pattern expiredPattern) throws IOException{
+    Map<String, Long> expiredHoplogs = new HashMap<String,Long>();
+    
+    for(FileStatus file : bucketFiles) {
+      if(!file.isFile()) {
+        continue;
+      }
+      String fileName = file.getPath().getName();
+      Matcher match = expiredPattern.matcher(fileName);
+      if (!match.matches()){
+        continue;
+      }
+      expiredHoplogs.put(fileName,file.getModificationTime());
+    }
+    return expiredHoplogs;
+  }
+  
+  private static long expiredTime(FileStatus file, Map<String, Long> expiredHoplogs){
+    String expiredMarkerName = file.getPath().getName() + 
+        AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
+    
+    long expiredTimeStamp = -1;
+    if (expiredHoplogs.containsKey(expiredMarkerName)) {
+      expiredTimeStamp = expiredHoplogs.get(expiredMarkerName);
+    }
+    return expiredTimeStamp;
+  }
+  
+  public static long readCleanUpIntervalMillis(FileSystem fs, Path cleanUpIntervalPath) throws IOException{
+    if (fs.exists(cleanUpIntervalPath)) {
+      FSDataInputStream input = new FSDataInputStream(fs.open(cleanUpIntervalPath));
+      long intervalDurationMillis = input.readLong();
+      input.close();
+      return intervalDurationMillis;
+    } else {
+      return -1l;
+    }
+  }
+  
+  public static void exposeCleanupIntervalMillis(FileSystem fs, Path path, long intervalDurationMillis){
+    FSDataInputStream input = null;
+    FSDataOutputStream output = null;
+    try {
+      if(fs.exists(path)){
+        input = new FSDataInputStream(fs.open(path));
+        if (intervalDurationMillis == input.readLong()) {
+          input.close();
+          return;
+        }
+        input.close();
+        fs.delete(path, true);
+      } 
+      output = fs.create(path);
+      output.writeLong(intervalDurationMillis);
+      output.close();
+    } catch (IOException e) {
+      return;
+    } finally {
+      try {
+        if (input != null){
+          input.close();
+        }
+        if (output != null) {
+          output.close();
+        }
+      } catch(IOException e2) {
+        
+      } 
+    }
+  }
+
+  /**
+   * @param regionPath
+   * @param fs
+   * @return list of latest checkpoint files of all buckets in the region
+   * @throws IOException
+   */
+  public static Collection<FileStatus> getCheckpointFiles(Path regionPath,
+      FileSystem fs) throws IOException {
+    ArrayList<FileStatus> latestSnapshots = new ArrayList<FileStatus>();
+
+    Collection<Collection<FileStatus>> allBuckets = getBucketHoplogs(
+        regionPath, fs, AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION, 0, 0);
+
+    // extract the latest major compacted hoplog from each bucket
+    for (Collection<FileStatus> bucket : allBuckets) {
+      FileStatus latestSnapshot = null;
+      for (FileStatus file : bucket) {
+        if (latestSnapshot == null) {
+          latestSnapshot = file;
+        } else {
+          String name1 = latestSnapshot.getPath().getName();
+          String name2 = file.getPath().getName();
+          
+          if (HoplogComparator.compareByName(name1, name2) > 0) {
+            latestSnapshot = file;
+          }
+        }
+      }
+      
+      if (latestSnapshot != null) {
+        latestSnapshots.add(latestSnapshot);
+      }
+    }
+
+    return latestSnapshots;
+  }
+  
+  /**
+   * Creates a mapping of hoplog to hdfs blocks on disk
+   * 
+   * @param files
+   *          list of hoplog file status objects
+   * @return array of hdfs block location objects associated with a hoplog
+   * @throws IOException
+   */
+  public static Map<FileStatus, BlockLocation[]> getBlocks(Configuration config,
+      Collection<FileStatus> files) throws IOException {
+    Map<FileStatus, BlockLocation[]> blocks = new HashMap<FileStatus, BlockLocation[]>();
+    if (files == null || files.isEmpty()) {
+      return blocks;
+    }
+
+    FileSystem fs = files.iterator().next().getPath().getFileSystem(config);
+
+    for (FileStatus hoplog : files) {
+      long length = hoplog.getLen();
+      BlockLocation[] fileBlocks = fs.getFileBlockLocations(hoplog, 0, length);
+      blocks.put(hoplog, fileBlocks);
+    }
+
+    return blocks;
+  }
+  
+  /**
+   * Filters out hoplogs of a region that do not match time filters and creates
+   * a list of hoplogs that may be used by hadoop jobs.
+   * 
+   * @param fs
+   *          file system instance
+   * @param path
+   *          region path
+   * @param start
+   *          start time in milliseconds
+   * @param end
+   *          end time in milliseconds
+   * @param snapshot
+   *          if true latest snapshot hoplog will be included in the final
+   *          return list
+   * @return filtered collection of hoplogs
+   * @throws IOException
+   */
+  public static Collection<FileStatus> filterHoplogs(FileSystem fs, Path path,
+      long start, long end, boolean snapshot) throws IOException {
+    ArrayList<FileStatus> hoplogs = new ArrayList<FileStatus>();
+
+    // if the region contains flush hoplogs or major compacted files then the
+    // region is of type RW.
+    // check if the intent is to operate on major compacted files only
+    if (snapshot) {
+      hoplogs.addAll(getCheckpointFiles(path, fs));
+    } else {
+      hoplogs.addAll(getRegionHoplogs(path, fs,
+          AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, start, end));
+    }
+
+    if (hoplogs == null || hoplogs.isEmpty()) {
+      // there are no sorted hoplogs. Check if sequence hoplogs are present
+      // there is no checkpoint mode for write only tables
+      hoplogs.addAll(getRegionHoplogs(path, fs,
+          AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION, start, end));
+    }
+
+    return hoplogs == null ? new ArrayList<FileStatus>() : hoplogs;
+  }
+  
+  private HoplogUtil() {
+    //static methods only.
+  }
+  
+  /**
+   * This class creates MR splits from hoplog files. This class leverages
+   * CombineFileInputFormat to create locality, node and rack, aware splits
+   * 
+   */
+  public static class HoplogOptimizedSplitter extends CombineFileInputFormat<Long, Long> {
+    private Collection<FileStatus> hoplogs;
+
+    public HoplogOptimizedSplitter(Collection<FileStatus> hoplogs) {
+      this.hoplogs = hoplogs;
+    }
+    
+    @Override
+    protected List<FileStatus> listStatus(JobContext job) throws IOException {
+      /**
+       * listStatus in super collects fileStatus for each file again. It also
+       * tries to recursively list files in subdirectories. None of this is
+       * applicable in this case. Splitter has already collected fileStatus for
+       * all files. So bypassing super's method will improve performance as NN
+       * chatter will be reduced. Specially helpful if NN is not colocated.
+       */
+      return new ArrayList<FileStatus>(hoplogs);
+    }
+    
+    /**
+     * Creates an array of splits for the input list of hoplogs. Each split is
+     * roughly the size of an hdfs block. Hdfs blocks of a hoplog may be smaller
+     * than hdfs block size, for e.g. if the hoplog is very small. The method
+     * keeps adding hdfs blocks of a hoplog to a split till the split is less
+     * than hdfs block size and the block is local to the split.
+     */
+    public List<InputSplit> getOptimizedSplits(Configuration conf) throws IOException {
+      
+      if (hoplogs == null || hoplogs.isEmpty()) {
+        return null;
+      }
+      Path[] paths = new Path[hoplogs.size()];
+      int i = 0;
+      for (FileStatus file : hoplogs) {
+        paths[i] = file.getPath();
+        i++;
+      }
+
+      FileStatus hoplog = hoplogs.iterator().next();
+      long blockSize = hoplog.getBlockSize();
+      setMaxSplitSize(blockSize);
+
+      Job job = Job.getInstance(conf);
+      setInputPaths(job, paths);
+      List<InputSplit> splits = super.getSplits(job);
+      
+      // in some cases a split may not get populated with host location
+      // information. If such a split is created, fill location information of
+      // the first file in the split
+      ArrayList<CombineFileSplit> newSplits = new ArrayList<CombineFileSplit>();
+      for (Iterator<InputSplit> iter = splits.iterator(); iter.hasNext();) {
+        CombineFileSplit split = (CombineFileSplit) iter.next();
+        if (split.getLocations() != null && split.getLocations().length > 0) {
+          continue;
+        }
+        
+        paths = split.getPaths();
+        if (paths.length == 0) {
+          continue;
+        }
+        long[] starts = split.getStartOffsets();
+        long[] ends = split.getLengths();
+        
+        FileSystem fs = paths[0].getFileSystem(conf);
+        FileStatus file = fs.getFileStatus(paths[0]);
+        BlockLocation[] blks = fs.getFileBlockLocations(file, starts[0], ends[0]);
+        if (blks != null && blks.length > 0) {
+          // hosts found. Need to create a new split and replace the one missing
+          // hosts.
+          iter.remove();
+          String hosts[] = blks[0].getHosts();
+          split = new CombineFileSplit(paths, starts, ends, hosts);
+          newSplits.add(split);
+        }
+      }
+      splits.addAll(newSplits);
+      
+      return splits;
+    }
+    
+    @Override
+    public List<InputSplit> getSplits(JobContext job) throws IOException {
+      // a call to this method is invalid. This class is only meant to create
+      // optimized splits independent of the api type
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public RecordReader<Long, Long> createRecordReader(InputSplit split,
+        TaskAttemptContext arg1) throws IOException {
+      // Record reader creation is managed by GFInputFormat. This method should
+      // not be called
+      throw new IllegalStateException();
+    }
+  }
+}



[08/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
new file mode 100644
index 0000000..11ee960
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
@@ -0,0 +1,1600 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import static com.gemstone.gemfire.test.dunit.Invoke.invokeInEveryVM;
+import static com.gemstone.gemfire.test.dunit.Wait.*;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.gemstone.gemfire.Delta;
+import com.gemstone.gemfire.InvalidDeltaException;
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.internal.cache.DistributedPutAllOperation;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
+import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.logging.log4j.Logger;
+import org.junit.Ignore;
+
+/**
+ * A class for testing the basic HDFS functionality
+ * 
+ * @author Hemant Bhanawat
+ */
+@SuppressWarnings({ "serial", "rawtypes", "deprecation", "unchecked", "unused" })
+public class RegionWithHDFSBasicDUnitTest extends RegionWithHDFSTestBase {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private IgnoredException ee0;
+  private IgnoredException ee1;
+
+  public RegionWithHDFSBasicDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  public void preSetUp() throws Exception {
+    super.preSetUp();
+    ee0 = IgnoredException.addIgnoredException("com.gemstone.gemfire.cache.RegionDestroyedException");
+    ee1 = IgnoredException.addIgnoredException("com.gemstone.gemfire.cache.RegionDestroyedException");
+  }
+
+  @Override
+  public void preTearDownCacheTestCase() throws Exception {
+    ee0.remove();
+    ee1.remove();
+    super.preTearDownCacheTestCase();
+  }
+
+  @Override
+  protected SerializableCallable getCreateRegionCallable(
+      final int totalnumOfBuckets, final int batchSizeMB,
+      final int maximumEntries, final String folderPath,
+      final String uniqueName, final int batchInterval,
+      final boolean queuePersistent, final boolean writeonly,
+      final long timeForRollover, final long maxFileSize) {
+    SerializableCallable createRegion = new SerializableCallable("Create HDFS region") {
+      public Object call() throws Exception {
+        AttributesFactory af = new AttributesFactory();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setTotalNumBuckets(totalnumOfBuckets);
+        paf.setRedundantCopies(1);
+
+        af.setHDFSStoreName(uniqueName);
+        af.setPartitionAttributes(paf.create());
+
+        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
+        // Going two level up to avoid home directories getting created in
+        // VM-specific directory. This avoids failures in those tests where
+        // datastores are restarted and bucket ownership changes between VMs.
+        homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
+        logger.info("Setting homeDir to {}", homeDir);
+        hsf.setHomeDir(homeDir);
+        hsf.setBatchSize(batchSizeMB);
+        hsf.setBufferPersistent(queuePersistent);
+        hsf.setMaxMemory(3);
+        hsf.setBatchInterval(batchInterval);
+        if (timeForRollover != -1) {
+          hsf.setWriteOnlyFileRolloverInterval((int) timeForRollover);
+          System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
+        }
+        if (maxFileSize != -1) {
+          hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
+        }
+        hsf.create(uniqueName);
+
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
+
+        af.setHDFSWriteOnly(writeonly);
+        Region r = createRootRegion(uniqueName, af.create());
+        ((LocalRegion) r).setIsTest();
+
+        return 0;
+      }
+    };
+    return createRegion;
+  }
+
+  @Override
+  protected void doPuts(final String uniqueName, int start, int end) {
+    Region r = getRootRegion(uniqueName);
+    for (int i = start; i < end; i++) {
+      r.put("K" + i, "V" + i);
+    }
+  }
+
+  @Override
+  protected void doPutAll(final String uniqueName, Map map) {
+    Region r = getRootRegion(uniqueName);
+    r.putAll(map);
+  }
+
+  @Override
+  protected void doDestroys(final String uniqueName, int start, int end) {
+    Region r = getRootRegion(uniqueName);
+    for (int i = start; i < end; i++) {
+      r.destroy("K" + i);
+    }
+  }
+
+  @Override
+  protected void checkWithGet(String uniqueName, int start, int end, boolean expectValue) {
+    Region r = getRootRegion(uniqueName);
+    for (int i = start; i < end; i++) {
+      String expected = expectValue ? "V" + i : null;
+      assertEquals("Mismatch on key " + i, expected, r.get("K" + i));
+    }
+  }
+
+  @Override
+  protected void checkWithGetAll(String uniqueName, ArrayList arrayl) {
+    Region r = getRootRegion(uniqueName);
+    Map map = r.getAll(arrayl);
+    logger.info("Read entries {}", map.size());
+    for (Object e : map.keySet()) {
+      String v = e.toString().replaceFirst("K", "V");
+      assertTrue( "Reading entries failed for key " + e + " where value = " + map.get(e), v.equals(map.get(e)));
+    }
+  }
+
+  /**
+   * Tests if gets go to primary even if the value resides on secondary.
+   */
+  public void testValueFetchedFromLocal() {
+    disconnectFromDS();
+
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    String homeDir = "./testValueFetchedFromLocal";
+
+    createServerRegion(vm0, 7, 1, 50, homeDir, "testValueFetchedFromLocal", 1000);
+    createServerRegion(vm1, 7, 1, 50, homeDir, "testValueFetchedFromLocal", 1000);
+
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testValueFetchedFromLocal");
+        for (int i = 0; i < 25; i++) {
+          r.put("K" + i, "V" + i);
+        }
+        return null;
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testValueFetchedFromLocal");
+        for (int i = 0; i < 25; i++) {
+          String s = null;
+          String k = "K" + i;
+          s = (String) r.get(k);
+          String v = "V" + i;
+          assertTrue( "The expected key " + v+ " didn't match the received value " + s, v.equals(s));
+        }
+        // with only two members and 1 redundant copy, we will have all data locally, make sure that some
+        // get operations results in a remote get operation
+        assertTrue( "gets should always go to primary, ", ((LocalRegion)r).getCountNotFoundInLocal() != 0 );
+        return null;
+      }
+    });
+  
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testValueFetchedFromLocal");
+        assertTrue( "HDFS queue or HDFS should not have been accessed. They were accessed " + ((LocalRegion)r).getCountNotFoundInLocal()  + " times", 
+            ((LocalRegion)r).getCountNotFoundInLocal() == 0 );
+        return null;
+      }
+    });
+  }
+
+  public void testHDFSQueueSizeTest() {
+    disconnectFromDS();
+
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    String homeDir = "./testHDFSQueueSize";
+
+    createServerRegion(vm0, 1, 10, 50, homeDir, "testHDFSQueueSize", 100000);
+    createServerRegion(vm1, 1, 10, 50, homeDir, "testHDFSQueueSize", 100000);
+
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testHDFSQueueSize");
+        byte[] b = new byte[1024];
+        byte[] k = new byte[1];
+        for (int i = 0; i < 1; i++) {
+          r.put(k, b);
+        }
+        ConcurrentParallelGatewaySenderQueue hdfsqueue = (ConcurrentParallelGatewaySenderQueue)((AbstractGatewaySender)((PartitionedRegion)r).getHDFSEventQueue().getSender()).getQueue();
+        HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hdfsqueue.getRegion()).getDataStore().getLocalBucketById(0);
+        if (hdfsBQ.getBucketAdvisor().isPrimary()) {
+          assertTrue("size should not as expected on primary " + hdfsBQ.queueSizeInBytes.get(), hdfsBQ.queueSizeInBytes.get() > 1024 && hdfsBQ.queueSizeInBytes.get() < 1150);
+        } else {
+          assertTrue("size should be 0 on secondary", hdfsBQ.queueSizeInBytes.get()==0);
+        }
+        return null;
+
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testHDFSQueueSize");
+        ConcurrentParallelGatewaySenderQueue hdfsqueue = (ConcurrentParallelGatewaySenderQueue)((AbstractGatewaySender)((PartitionedRegion)r).getHDFSEventQueue().getSender()).getQueue();
+        HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hdfsqueue.getRegion()).getDataStore().getLocalBucketById(0);
+        if (hdfsBQ.getBucketAdvisor().isPrimary()) {
+          assertTrue("size should not as expected on primary " + hdfsBQ.queueSizeInBytes.get(), hdfsBQ.queueSizeInBytes.get() > 1024 && hdfsBQ.queueSizeInBytes.get() < 1150);
+        } else {
+          assertTrue("size should be 0 on secondary", hdfsBQ.queueSizeInBytes.get()==0);
+        }
+        return null;
+
+      }
+    });
+  }
+
+  /**
+   * Does put for write only HDFS store
+   */
+  public void testBasicPutsForWriteOnlyHDFSStore() {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    String homeDir = "./testPutsForWriteOnlyHDFSStore";
+
+    createServerRegion(vm0, 7, 1, 20, homeDir, "testPutsForWriteOnlyHDFSStore",
+        100, true, false);
+    createServerRegion(vm1, 7, 1, 20, homeDir, "testPutsForWriteOnlyHDFSStore",
+        100, true, false);
+
+    // Do some puts
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testPutsForWriteOnlyHDFSStore");
+        for (int i = 0; i < 200; i++) {
+          r.put("K" + i, "V" + i);
+        }
+        return null;
+      }
+    });
+
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testPutsForWriteOnlyHDFSStore");
+
+        for (int i = 200; i < 400; i++) {
+          r.put("K" + i, "V" + i);
+        }
+
+        return null;
+      }
+    });
+
+  }
+
+  /**
+   * Does put for write only HDFS store
+   */
+  public void testDelta() {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    String homeDir = "./testDelta";
+
+    // Expected from com.gemstone.gemfire.internal.cache.ServerPingMessage.send()
+    IgnoredException ee1 = IgnoredException.addIgnoredException("java.lang.InterruptedException");
+    IgnoredException ee2 = IgnoredException.addIgnoredException("java.lang.InterruptedException");
+    
+    createServerRegion(vm0, 7, 1, 20, homeDir, "testDelta", 100);
+    createServerRegion(vm1, 7, 1, 20, homeDir, "testDelta", 100);
+
+    // Do some puts
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testDelta");
+        for (int i = 0; i < 100; i++) {
+          r.put("K" + i, new CustomerDelta("V" + i, "address"));
+        }
+        for (int i = 0; i < 50; i++) {
+          CustomerDelta cd = new CustomerDelta("V" + i, "address");
+          cd.setAddress("updated address");
+          r.put("K" + i, cd);
+        }
+        return null;
+      }
+    });
+
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testDelta");
+
+        for (int i = 100; i < 200; i++) {
+          r.put("K" + i, new CustomerDelta("V" + i, "address"));
+        }
+        for (int i = 100; i < 150; i++) {
+          CustomerDelta cd = new CustomerDelta("V" + i, "address");
+          cd.setAddress("updated address");
+          r.put("K" + i, cd);
+        }
+
+        return null;
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testDelta");
+        for (int i = 0; i < 50; i++) {
+          CustomerDelta custDela =  new CustomerDelta ("V" + i, "updated address" );
+          String k = "K" + i;
+          CustomerDelta s = (CustomerDelta) r.get(k);
+
+          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
+        }
+        for (int i = 50; i < 100; i++) {
+          CustomerDelta custDela = new CustomerDelta("V" + i, "address");
+          String k = "K" + i;
+          CustomerDelta s = (CustomerDelta) r.get(k);
+
+          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
+        }
+        for (int i = 100; i < 150; i++) {
+          CustomerDelta custDela =  new CustomerDelta ("V" + i, "updated address" );
+          String k = "K" + i;
+          CustomerDelta s = (CustomerDelta) r.get(k);
+
+          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
+        }
+        for (int i = 150; i < 200; i++) {
+          CustomerDelta custDela =  new CustomerDelta ("V" + i, "address" );
+          String k = "K" + i;
+          CustomerDelta s = (CustomerDelta) r.get(k);
+
+          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
+        }
+        return null;
+      }
+    });
+    ee1.remove();
+    ee2.remove();
+
+  }
+
+  /**
+   * Puts byte arrays and fetches them back to ensure that serialization of byte
+   * arrays is proper
+   * 
+   */
+  public void testByteArrays() {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    String homeDir = "./testByteArrays";
+
+    createServerRegion(vm0, 7, 1, 20, homeDir, "testByteArrays", 100);
+    createServerRegion(vm1, 7, 1, 20, homeDir, "testByteArrays", 100);
+
+    // Do some puts
+    vm0.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testByteArrays");
+        byte[] b1 = { 0x11, 0x44, 0x77 };
+        byte[] b2 = { 0x22, 0x55 };
+        byte[] b3 = { 0x33 };
+        for (int i = 0; i < 100; i++) {
+          int x = i % 3;
+          if (x == 0) {
+            r.put("K" + i, b1);
+          } else if (x == 1) {
+            r.put("K" + i, b2);
+          } else {
+            r.put("K" + i, b3);
+          }
+        }
+        return null;
+      }
+    });
+
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testByteArrays");
+
+        byte[] b1 = { 0x11, 0x44, 0x77 };
+        byte[] b2 = { 0x22, 0x55 };
+        byte[] b3 = { 0x33 };
+        for (int i = 100; i < 200; i++) {
+          int x = i % 3;
+          if (x == 0) {
+            r.put("K" + i, b1);
+          } else if (x == 1) {
+            r.put("K" + i, b2);
+          } else {
+            r.put("K" + i, b3);
+          }
+        }
+        return null;
+      }
+    });
+    vm1.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion("testByteArrays");
+        byte[] b1 = { 0x11, 0x44, 0x77 };
+        byte[] b2 = { 0x22, 0x55 };
+        byte[] b3 = { 0x33 };
+        for (int i = 0; i < 200; i++) {
+          int x = i % 3;
+          String k = "K" + i;
+          byte[] s = (byte[]) r.get(k);
+          if (x == 0) {
+            assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b1, s));
+          } else if (x == 1) {
+            assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b2, s));
+          } else {
+            assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b3, s));
+          }
+
+        }
+        return null;
+      }
+    });
+  }
+
+  private static class CustomerDelta implements Serializable, Delta {
+    private String name;
+    private String address;
+    private boolean nameChanged;
+    private boolean addressChanged;
+
+    public CustomerDelta(CustomerDelta o) {
+      this.address = o.address;
+      this.name = o.name;
+    }
+
+    public CustomerDelta(String name, String address) {
+      this.name = name;
+      this.address = address;
+    }
+
+    public void fromDelta(DataInput in) throws IOException,
+        InvalidDeltaException {
+      boolean nameC = in.readBoolean();
+      if (nameC) {
+        this.name = in.readUTF();
+      }
+      boolean addressC = in.readBoolean();
+      if (addressC) {
+        this.address = in.readUTF();
+      }
+    }
+
+    public boolean hasDelta() {
+      return nameChanged || addressChanged;
+    }
+
+    public void toDelta(DataOutput out) throws IOException {
+      out.writeBoolean(nameChanged);
+      if (this.nameChanged) {
+        out.writeUTF(name);
+      }
+      out.writeBoolean(addressChanged);
+      if (this.addressChanged) {
+        out.writeUTF(address);
+      }
+    }
+
+    public void setName(String name) {
+      this.nameChanged = true;
+      this.name = name;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public void setAddress(String address) {
+      this.addressChanged = true;
+      this.address = address;
+    }
+
+    public String getAddress() {
+      return address;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (!(obj instanceof CustomerDelta)) {
+        return false;
+      }
+      CustomerDelta other = (CustomerDelta) obj;
+      return this.name.equals(other.name) && this.address.equals(other.address);
+    }
+
+    @Override
+    public int hashCode() {
+      return this.address.hashCode() + this.name.hashCode();
+    }
+
+    @Override
+    public String toString() {
+      return "name=" + this.name + "address=" + address;
+    }
+  }
+
+  public void testClearRegionDataInQueue() throws Throwable {
+    doTestClearRegion(100000, false);
+
+  }
+
+  public void testClearRegionDataInHDFS() throws Throwable {
+    doTestClearRegion(1, true);
+  }
+
+  public void doTestClearRegion(int batchInterval, boolean waitForWriteToHDFS) throws Throwable {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    final int numEntries = 400;
+
+    String name = getName();
+    final String folderPath = "./" + name;
+    // Create some regions. Note that we want a large batch interval
+    // so that we will have some entries sitting in the queue when
+    // we do a clear.
+    final String uniqueName = name;
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, batchInterval,
+        false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, batchInterval,
+        false, true);
+
+    doPuts(vm0, uniqueName, numEntries);
+
+    // Make sure some files have been written to hdfs.
+    if (waitForWriteToHDFS) {
+      verifyDataInHDFS(vm0, uniqueName, true, true, waitForWriteToHDFS, numEntries);
+    }
+
+    // Do a clear
+    simulateClear(uniqueName, vm0, vm1);
+
+    validateEmpty(vm0, numEntries, uniqueName);
+    validateEmpty(vm1, numEntries, uniqueName);
+
+    // Double check that there is no data in hdfs now
+    verifyDataInHDFS(vm0, uniqueName, false, false, waitForWriteToHDFS, numEntries);
+    verifyDataInHDFS(vm1, uniqueName, false, false, waitForWriteToHDFS, numEntries);
+
+    closeCache(vm0);
+    closeCache(vm1);
+
+    AsyncInvocation async0 = createServerRegionAsync(vm0, 7, 31, 200, folderPath,
+        uniqueName, 100000, false, true);
+    AsyncInvocation async1 = createServerRegionAsync(vm1, 7, 31, 200, folderPath, 
+        uniqueName, 100000, false, true);
+    async0.getResult();
+    async1.getResult();
+
+    validateEmpty(vm0, numEntries, uniqueName);
+    validateEmpty(vm1, numEntries, uniqueName);
+  }
+
+  private void simulateClear(final String name, VM... vms) throws Throwable {
+    simulateClearForTests(true);
+    try {
+
+      // Gemfire PRs don't support clear
+      // gemfirexd does a clear by taking gemfirexd ddl locks
+      // and then clearing each primary bucket on the primary.
+      // Simulate that by clearing all primaries on each vm.
+      // See GemFireContainer.clear
+
+      SerializableCallable clear = new SerializableCallable("clear") {
+        public Object call() throws Exception {
+          PartitionedRegion r = (PartitionedRegion) getRootRegion(name);
+
+          r.clearLocalPrimaries();
+
+          return null;
+        }
+      };
+
+      // Invoke the clears concurrently
+      AsyncInvocation[] async = new AsyncInvocation[vms.length];
+      for (int i = 0; i < vms.length; i++) {
+        async[i] = vms[i].invokeAsync(clear);
+      }
+
+      // Get the clear results.
+      for (int i = 0; i < async.length; i++) {
+        async[i].getResult();
+      }
+
+    } finally {
+      simulateClearForTests(false);
+    }
+  }
+
+  protected void simulateClearForTests(final boolean isGfxd) throws Exception {
+    SerializableRunnable setGfxd = new SerializableRunnable() {
+      @Override
+      public void run() {
+        if (isGfxd) {
+          LocalRegion.simulateClearForTests(true);
+        } else {
+          LocalRegion.simulateClearForTests(false);
+        }
+      }
+    };
+    setGfxd.run();
+    invokeInEveryVM(setGfxd);
+  }
+
+  /**
+   * Test that we can locally destroy a member, without causing problems with
+   * the data in HDFS. This was disabled due to ticket 47793.
+   * 
+   * @throws InterruptedException
+   */
+  public void testLocalDestroy() throws InterruptedException {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    int numEntries = 200;
+
+    final String folderPath = "./testLocalDestroy";
+    final String uniqueName = "testLocalDestroy";
+
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+
+    doPuts(vm0, uniqueName, numEntries);
+
+    // Make sure some files have been written to hdfs and wait for
+    // the queue to drain.
+    verifyDataInHDFS(vm0, uniqueName, true, true, true, numEntries);
+
+    validate(vm0, uniqueName, numEntries);
+
+    SerializableCallable localDestroy = new SerializableCallable("local destroy") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        r.localDestroyRegion();
+        return null;
+      }
+    };
+
+    vm0.invoke(localDestroy);
+
+    verifyNoQOrPR(vm0);
+
+    validate(vm1, uniqueName, numEntries);
+
+    vm1.invoke(localDestroy);
+
+    verifyNoQOrPR(vm1);
+
+    closeCache(vm0);
+    closeCache(vm1);
+
+    // Restart vm0 and see if the data is still available from HDFS
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+
+    validate(vm0, uniqueName, numEntries);
+  }
+
+  /**
+   * Test that doing a destroyRegion removes all data from HDFS.
+   * 
+   * @throws InterruptedException
+   */
+  public void testGlobalDestroyWithHDFSData() throws InterruptedException {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    final String folderPath = "./testGlobalDestroyWithHDFSData";
+    final String uniqueName = "testGlobalDestroyWithHDFSData";
+    int numEntries = 200;
+
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+
+    doPuts(vm0, uniqueName, numEntries);
+
+    // Make sure some files have been written to hdfs.
+    verifyDataInHDFS(vm0, uniqueName, true, true, false, numEntries);
+
+    SerializableCallable globalDestroy = new SerializableCallable("destroy") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        r.destroyRegion();
+        return null;
+      }
+    };
+
+    vm0.invoke(globalDestroy);
+
+    // make sure data is not in HDFS
+    verifyNoQOrPR(vm0);
+    verifyNoQOrPR(vm1);
+    verifyNoHDFSData(vm0, uniqueName);
+    verifyNoHDFSData(vm1, uniqueName);
+
+    closeCache(vm0);
+    closeCache(vm1);
+
+    // Restart vm0 and make sure it's still empty
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+
+    // make sure it's empty
+    validateEmpty(vm0, numEntries, uniqueName);
+    validateEmpty(vm1, numEntries, uniqueName);
+
+  }
+
+  /**
+   * Test that doing a destroyRegion removes all data from HDFS.
+   */
+  public void _testGlobalDestroyWithQueueData() {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    final String folderPath = "./testGlobalDestroyWithQueueData";
+    final String uniqueName = "testGlobalDestroyWithQueueData";
+    int numEntries = 200;
+
+    // set a large queue timeout so that data is still in the queue
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 10000, false,
+        true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 10000, false,
+        true);
+
+    doPuts(vm0, uniqueName, numEntries);
+
+    SerializableCallable globalDestroy = new SerializableCallable("destroy") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        r.destroyRegion();
+        return null;
+      }
+    };
+
+    vm0.invoke(globalDestroy);
+
+    // make sure data is not in HDFS
+    verifyNoQOrPR(vm0);
+    verifyNoQOrPR(vm1);
+    verifyNoHDFSData(vm0, uniqueName);
+    verifyNoHDFSData(vm1, uniqueName);
+
+    closeCache(vm0);
+    closeCache(vm1);
+
+    // Restart vm0 and make sure it's still empty
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+
+    // make sure it's empty
+    validateEmpty(vm0, numEntries, uniqueName);
+    validateEmpty(vm1, numEntries, uniqueName);
+
+  }
+
+  /**
+   * Make sure all async event queues and PRs a destroyed in a member
+   */
+  public void verifyNoQOrPR(VM vm) {
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
+        assertEquals(Collections.EMPTY_SET, cache.getAsyncEventQueues());
+        assertEquals(Collections.EMPTY_SET, cache.getPartitionedRegions());
+      }
+    });
+
+  }
+
+  /**
+   * Make sure all of the data for a region in HDFS is destroyed
+   */
+  public void verifyNoHDFSData(final VM vm, final String uniqueName) {
+    vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws IOException {
+        HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
+        FileSystem fs = hdfsStore.getFileSystem();
+        Path path = new Path(hdfsStore.getHomeDir(), uniqueName);
+        if (fs.exists(path)) {
+          dumpFiles(vm, uniqueName);
+          fail("Found files in " + path);
+        }
+        return null;
+      }
+    });
+  }
+
+  protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
+      final int start, final int end, final String suffix) throws Exception {
+    return doAsyncPuts(vm, regionName, start, end, suffix, "");
+  }
+
+  protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
+      final int start, final int end, final String suffix, final String value)
+      throws Exception {
+    return vm.invokeAsync(new SerializableCallable("doAsyncPuts") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(regionName);
+        String v = "V";
+        if (!value.equals("")) {
+          v = value;
+        }
+        logger.info("Putting entries ");
+        for (int i = start; i < end; i++) {
+          r.put("K" + i, v + i + suffix);
+        }
+        return null;
+      }
+
+    });
+  }
+
+  public void _testGlobalDestroyFromAccessor() {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+
+    final String folderPath = "./testGlobalDestroyFromAccessor";
+    final String uniqueName = "testGlobalDestroyFromAccessor";
+    int numEntries = 200;
+
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerAccessor(vm2, 7, 40, uniqueName);
+
+    doPuts(vm0, uniqueName, numEntries);
+
+    // Make sure some files have been written to hdfs.
+    verifyDataInHDFS(vm0, uniqueName, true, true, false, numEntries);
+
+    SerializableCallable globalDestroy = new SerializableCallable("destroy") {
+      public Object call() throws Exception {
+        Region r = getRootRegion(uniqueName);
+        r.destroyRegion();
+        return null;
+      }
+    };
+
+    // Destroy the region from an accessor
+    vm2.invoke(globalDestroy);
+
+    // make sure data is not in HDFS
+    verifyNoQOrPR(vm0);
+    verifyNoQOrPR(vm1);
+    verifyNoHDFSData(vm0, uniqueName);
+    verifyNoHDFSData(vm1, uniqueName);
+
+    closeCache(vm0);
+    closeCache(vm1);
+    closeCache(vm2);
+
+    // Restart vm0 and make sure it's still empty
+    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
+
+    // make sure it's empty
+    validateEmpty(vm0, numEntries, uniqueName);
+    validateEmpty(vm1, numEntries, uniqueName);
+  }
+
+  /**
+   * create a server with maxfilesize as 2 MB. Insert 4 entries of 1 MB each.
+   * There should be 2 files with 2 entries each.
+   * 
+   * @throws Throwable
+   */
+  public void testWOFileSizeParam() throws Throwable {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    String homeDir = "./testWOFileSizeParam";
+    final String uniqueName = getName();
+    String value = "V";
+    for (int i = 0; i < 20; i++) {
+      value += value;
+    }
+
+    createServerRegion(vm0, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
+    createServerRegion(vm1, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
+
+    AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 3, "vm0", value);
+    AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 2, 4, "vm1", value);
+
+    a1.join();
+    a2.join();
+
+    Thread.sleep(4000);
+
+    cacheClose(vm0, false);
+    cacheClose(vm1, false);
+
+    // Start the VMs in parallel for the persistent version subclass
+    AsyncInvocation async1 = createServerRegionAsync(vm0, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
+    AsyncInvocation async2 = createServerRegionAsync(vm1, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
+    async1.getResult();
+    async2.getResult();
+
+    // There should be two files in bucket 0.
+    verifyTwoHDFSFilesWithTwoEntries(vm0, uniqueName, value);
+
+    cacheClose(vm0, false);
+    cacheClose(vm1, false);
+
+    disconnectFromDS();
+
+  }
+
+  /**
+   * Create server with file rollover time as 5 seconds. Insert few entries and
+   * then sleep for 7 seconds. A file should be created. Do it again. At the end, two
+   * files with inserted entries should be created.
+   * 
+   * @throws Throwable
+   */
+  public void testWOTimeForRollOverParam() throws Throwable {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    String homeDir = "./testWOTimeForRollOverParam";
+    final String uniqueName = getName();
+
+    createServerRegion(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
+    createServerRegion(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
+
+    AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 8, "vm0");
+    AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 4, 10, "vm1");
+
+    a1.join();
+    a2.join();
+
+    Thread.sleep(7000);
+
+    a1 = doAsyncPuts(vm0, uniqueName, 10, 18, "vm0");
+    a2 = doAsyncPuts(vm1, uniqueName, 14, 20, "vm1");
+
+    a1.join();
+    a2.join();
+
+    Thread.sleep(7000);
+
+    cacheClose(vm0, false);
+    cacheClose(vm1, false);
+
+    AsyncInvocation async1 = createServerRegionAsync(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
+    AsyncInvocation async2 = createServerRegionAsync(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
+    async1.getResult();
+    async2.getResult();
+
+    // There should be two files in bucket 0.
+    // Each should have entry 1 to 10 and duplicate from 4 to 7
+    verifyTwoHDFSFiles(vm0, uniqueName);
+
+    cacheClose(vm0, false);
+    cacheClose(vm1, false);
+
+    disconnectFromDS();
+
+  }
+
+  private void createServerAccessor(VM vm, final int totalnumOfBuckets,
+      final int maximumEntries, final String uniqueName) {
+    SerializableCallable createRegion = new SerializableCallable() {
+      public Object call() throws Exception {
+        AttributesFactory af = new AttributesFactory();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setTotalNumBuckets(totalnumOfBuckets);
+        paf.setRedundantCopies(1);
+        // make this member an accessor.
+        paf.setLocalMaxMemory(0);
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
+        af.setPartitionAttributes(paf.create());
+
+        Region r = createRootRegion(uniqueName, af.create());
+        assertTrue(!((PartitionedRegion) r).isDataStore());
+
+        return null;
+      }
+    };
+
+    vm.invoke(createRegion);
+  }
+
+  @Override
+  protected void verifyHDFSData(VM vm, String uniqueName) throws Exception {
+
+    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
+    HashMap<String, String> entriesMap = new HashMap<String, String>();
+    for (HashMap<String, String> v : filesToEntriesMap.values()) {
+      entriesMap.putAll(v);
+    }
+    verifyInEntriesMap(entriesMap, 1, 50, "vm0");
+    verifyInEntriesMap(entriesMap, 40, 100, "vm1");
+    verifyInEntriesMap(entriesMap, 40, 100, "vm2");
+    verifyInEntriesMap(entriesMap, 90, 150, "vm3");
+
+  }
+
+  protected void verifyTwoHDFSFiles(VM vm, String uniqueName) throws Exception {
+
+    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
+
+    assertTrue("there should be exactly two files, but there are "
+        + filesToEntriesMap.size(), filesToEntriesMap.size() == 2);
+    long timestamp = Long.MAX_VALUE;
+    String olderFile = null;
+    for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
+        .entrySet()) {
+      String fileName = e.getKey().substring(
+          0,
+          e.getKey().length()
+              - AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION.length());
+      long newTimeStamp = Long.parseLong(fileName.substring(
+          fileName.indexOf("-") + 1, fileName.lastIndexOf("-")));
+      if (newTimeStamp < timestamp) {
+        olderFile = e.getKey();
+        timestamp = newTimeStamp;
+      }
+    }
+    verifyInEntriesMap(filesToEntriesMap.get(olderFile), 1, 8, "vm0");
+    verifyInEntriesMap(filesToEntriesMap.get(olderFile), 4, 10, "vm1");
+    filesToEntriesMap.remove(olderFile);
+    verifyInEntriesMap(filesToEntriesMap.values().iterator().next(), 10, 18, "vm0");
+    verifyInEntriesMap(filesToEntriesMap.values().iterator().next(), 14, 20, "vm1");
+  }
+
+  protected void verifyTwoHDFSFilesWithTwoEntries(VM vm, String uniqueName,
+      String value) throws Exception {
+
+    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
+    
+    assertTrue( "there should be exactly two files, but there are " + filesToEntriesMap.size(), filesToEntriesMap.size() == 2);
+    HashMap<String, String> entriesMap =  new HashMap<String, String>();
+    for (HashMap<String, String>  v : filesToEntriesMap.values()) {
+      entriesMap.putAll(v);
+    }
+    assertTrue( "Expected key K1 received  " + entriesMap.get(value+ "1vm0"), entriesMap.get(value+ "1vm0").equals("K1"));
+    assertTrue( "Expected key K2 received  " + entriesMap.get(value+ "2vm0"), entriesMap.get(value+ "2vm0").equals("K2"));
+    assertTrue( "Expected key K2 received  " + entriesMap.get(value+ "2vm1"), entriesMap.get(value+ "2vm1").equals("K2"));
+    assertTrue( "Expected key K3 received  " + entriesMap.get(value+ "3vm1"), entriesMap.get(value+ "3vm1").equals("K3"));
+ }
+
+  /**
+   * verify that a PR accessor can be started
+   */
+  public void testPRAccessor() {
+    Host host = Host.getHost(0);
+    VM accessor = host.getVM(0);
+    VM datastore1 = host.getVM(1);
+    VM datastore2 = host.getVM(2);
+    VM accessor2 = host.getVM(3);
+    final String regionName = getName();
+    final String storeName = "store_" + regionName;
+
+    SerializableCallable createRegion = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory();
+        homeDir = new File("../" + regionName).getCanonicalPath();
+        storefactory.setHomeDir(homeDir);
+        storefactory.create(storeName);
+        AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        af.setHDFSStoreName(storeName);
+        Region r = getCache().createRegionFactory(af.create()).create(regionName);
+        r.put("key1", "value1");
+        return null;
+      }
+    };
+
+    SerializableCallable createAccessorRegion = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory();
+        homeDir = new File("../" + regionName).getCanonicalPath();
+        storefactory.setHomeDir(homeDir);
+        storefactory.create(storeName);
+        // DataPolicy PARTITION with localMaxMemory 0 cannot be created
+        AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
+        af.setDataPolicy(DataPolicy.PARTITION);
+        PartitionAttributesFactory<Integer, String> paf = new PartitionAttributesFactory<Integer, String>();
+        paf.setLocalMaxMemory(0);
+        af.setPartitionAttributes(paf.create());
+        // DataPolicy PARTITION with localMaxMemory 0 can be created if hdfsStoreName is set
+        af.setHDFSStoreName(storeName);
+        // No need to check with different storeNames (can never be done in GemFireXD)
+        Region r = getCache().createRegionFactory(af.create()).create(regionName);
+        r.localDestroyRegion();
+        // DataPolicy HDFS_PARTITION with localMaxMemory 0 can be created
+        af = new AttributesFactory<Integer, String>();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        af.setPartitionAttributes(paf.create());
+        getCache().createRegionFactory(af.create()).create(regionName);
+        return null;
+      }
+    };
+
+    datastore1.invoke(createRegion);
+    accessor.invoke(createAccessorRegion);
+    datastore2.invoke(createRegion);
+    accessor2.invoke(createAccessorRegion);
+  }
+
+  /**
+   * verify that PUT dml does not read from hdfs
+   */
+  public void testPUTDMLSupport() {
+    doPUTDMLWork(false);
+  }
+
+  public void testPUTDMLBulkSupport() {
+    doPUTDMLWork(true);
+  }
+
+  private void doPUTDMLWork(final boolean isPutAll) {
+    Host host = Host.getHost(0);
+    VM vm1 = host.getVM(0);
+    VM vm2 = host.getVM(1);
+    final String regionName = getName();
+
+    createServerRegion(vm1, 7, 1, 50, "./" + regionName, regionName, 1000);
+    createServerRegion(vm2, 7, 1, 50, "./" + regionName, regionName, 1000);
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        LocalRegion lr = (LocalRegion) r;
+        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+        long readsFromHDFS = stats.getRead().getCount();
+        assertEquals(0, readsFromHDFS);
+        if (isPutAll) {
+          Map m = new HashMap();
+          // map with only one entry
+          m.put("key0", "value0");
+          DistributedPutAllOperation ev = lr.newPutAllOperation(m, null);
+          lr.basicPutAll(m, ev, null);
+          m.clear();
+          // map with multiple entries
+          for (int i = 1; i < 100; i++) {
+            m.put("key" + i, "value" + i);
+          }
+          ev = lr.newPutAllOperation(m, null);
+          lr.basicPutAll(m, ev, null);
+        } else {
+          for (int i = 0; i < 100; i++) {
+            r.put("key" + i, "value" + i);
+          }
+        }
+        return null;
+      }
+    });
+
+    SerializableCallable getHDFSReadCount = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+        return stats.getRead().getCount();
+      }
+    };
+
+    long vm1Count = (Long) vm1.invoke(getHDFSReadCount);
+    long vm2Count = (Long) vm2.invoke(getHDFSReadCount);
+    assertEquals(100, vm1Count + vm2Count);
+
+    pause(10 * 1000);
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        // do puts using the new api
+        LocalRegion lr = (LocalRegion) getCache().getRegion(regionName);
+        if (isPutAll) {
+          Map m = new HashMap();
+          // map with only one entry
+          m.put("key0", "value0");
+          DistributedPutAllOperation ev = lr.newPutAllForPUTDmlOperation(m, null);
+          lr.basicPutAll(m, ev, null);
+          m.clear();
+          // map with multiple entries
+          for (int i = 1; i < 200; i++) {
+            m.put("key" + i, "value" + i);
+          }
+          ev = lr.newPutAllForPUTDmlOperation(m, null);
+          lr.basicPutAll(m, ev, null);
+        } else {
+          for (int i = 0; i < 200; i++) {
+            EntryEventImpl ev = lr.newPutEntryEvent("key" + i, "value" + i, null);
+            lr.validatedPut(ev, System.currentTimeMillis());
+          }
+        }
+        return null;
+      }
+    });
+
+    // verify the stat for hdfs reads has not incremented
+    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
+    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
+    assertEquals(100, vm1Count + vm2Count);
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        for (int i = 0; i < 200; i++) {
+          assertEquals("value" + i, r.get("key" + i));
+        }
+        return null;
+      }
+    });
+  }
+
+  /**
+   * verify that get on operational data does not read from HDFS
+   */
+  public void testGetOperationalData() {
+    Host host = Host.getHost(0);
+    VM vm1 = host.getVM(0);
+    VM vm2 = host.getVM(1);
+    final String regionName = getName();
+
+    createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 1000);
+    createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 1000);
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+        long readsFromHDFS = stats.getRead().getCount();
+        assertEquals(0, readsFromHDFS);
+        for (int i = 0; i < 100; i++) {
+          logger.info("SWAP:DOING PUT:key{}", i);
+          r.put("key" + i, "value" + i);
+        }
+        return null;
+      }
+    });
+
+    SerializableCallable getHDFSReadCount = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+        return stats.getRead().getCount();
+      }
+    };
+
+    long vm1Count = (Long) vm1.invoke(getHDFSReadCount);
+    long vm2Count = (Long) vm2.invoke(getHDFSReadCount);
+    assertEquals(100, vm1Count + vm2Count);
+
+    pause(10 * 1000);
+
+    // verify that get increments the read stat
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        for (int i = 0; i < 200; i++) {
+          if (i < 100) {
+            logger.info("SWAP:DOING GET:key", i);
+            assertEquals("value" + i, r.get("key" + i));
+          } else {
+            assertNull(r.get("key" + i));
+          }
+        }
+        return null;
+      }
+    });
+
+    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
+    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
+    // initial 100 + 150 for get (since 50 are in memory)
+    assertEquals(250, vm1Count + vm2Count);
+
+    // do gets with readFromHDFS set to false
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        LocalRegion lr = (LocalRegion) r;
+        int numEntries = 0;
+        for (int i = 0; i < 200; i++) {
+          logger.info("SWAP:DOING GET NO READ:key", i);
+          Object val = lr.get("key"+i, null, true, false, false, null,  null, false, false/*allowReadFromHDFS*/);
+          if (val != null) {
+            numEntries++;
+          }
+        }
+        assertEquals(50, numEntries); // entries in memory
+        return null;
+      }
+    });
+
+    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
+    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
+    // get should not have incremented
+    assertEquals(250, vm1Count + vm2Count);
+
+    /**MergeGemXDHDFSToGFE Have not merged this API as this api is not called by any code*/
+    /*
+    // do gets using DataView
+    SerializableCallable getUsingDataView = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        LocalRegion lr = (LocalRegion) r;
+        PartitionedRegion pr = (PartitionedRegion) lr;
+        long numEntries = 0;
+        for (int i=0; i<200; i++) {
+          InternalDataView idv = lr.getDataView();
+          logger.debug("SWAP:DATAVIEW");
+          Object val = idv.getLocally("key"+i, null, PartitionedRegionHelper.getHashKey(pr, "key"+i), lr, true, true, null, null, false, false);
+          if (val != null) {
+            numEntries++;
+          }
+        }
+        return numEntries;
+      }
+    };
+
+    vm1Count = (Long) vm1.invoke(getUsingDataView);
+    vm2Count = (Long) vm2.invoke(getUsingDataView);
+    assertEquals(50 * 2, vm1Count + vm2Count);// both VMs will find 50 entries*/
+
+    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
+    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
+    // get should not have incremented
+    assertEquals(250, vm1Count + vm2Count);
+
+  }
+
+  public void testSizeEstimate() {
+    Host host = Host.getHost(0);
+    VM vm1 = host.getVM(0);
+    VM vm2 = host.getVM(1);
+    VM vm3 = host.getVM(2);
+    final String regionName = getName();
+
+    createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 1000);
+    createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 1000);
+    createServerRegion(vm3, 7, 1, 50, "./"+regionName, regionName, 1000);
+
+    final int size = 226;
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        // LocalRegion lr = (LocalRegion) r;
+        for (int i = 0; i < size; i++) {
+          r.put("key" + i, "value" + i);
+        }
+        // before flush
+        // assertEquals(size, lr.sizeEstimate());
+        return null;
+      }
+    });
+
+    pause(10 * 1000);
+
+    vm2.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        LocalRegion lr = (LocalRegion) r;
+        logger.debug("SWAP:callingsizeEstimate");
+        long estimate = lr.sizeEstimate();
+        double err = Math.abs(estimate - size) / (double) size;
+        System.out.println("SWAP:estimate:" + estimate);
+        assertTrue(err < 0.2);
+        return null;
+      }
+    });
+  }
+
+  public void testForceAsyncMajorCompaction() throws Exception {
+    doForceCompactionTest(true, false);
+  }
+
+  public void testForceSyncMajorCompaction() throws Exception {
+    // more changes
+    doForceCompactionTest(true, true);
+  }
+
+  private void doForceCompactionTest(final boolean isMajor, final boolean isSynchronous) throws Exception {
+    Host host = Host.getHost(0);
+    VM vm1 = host.getVM(0);
+    VM vm2 = host.getVM(1);
+    VM vm3 = host.getVM(2);
+    final String regionName = getName();
+
+    createServerRegion(vm1, 7, 1, 50, "./" + regionName, regionName, 1000);
+    createServerRegion(vm2, 7, 1, 50, "./" + regionName, regionName, 1000);
+    createServerRegion(vm3, 7, 1, 50, "./" + regionName, regionName, 1000);
+
+    SerializableCallable noCompaction = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+        if (isMajor) {
+          assertEquals(0, stats.getMajorCompaction().getCount());
+        } else {
+          assertEquals(0, stats.getMinorCompaction().getCount());
+        }
+        return null;
+      }
+    };
+
+    vm1.invoke(noCompaction);
+    vm2.invoke(noCompaction);
+    vm3.invoke(noCompaction);
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Region r = getCache().getRegion(regionName);
+        for (int i = 0; i < 500; i++) {
+          r.put("key" + i, "value" + i);
+          if (i % 100 == 0) {
+            // wait for flush
+            pause(3000);
+          }
+        }
+        pause(3000);
+        PartitionedRegion pr = (PartitionedRegion) r;
+        long lastCompactionTS = pr.lastMajorHDFSCompaction();
+        assertEquals(0, lastCompactionTS);
+        long beforeCompact = System.currentTimeMillis();
+        pr.forceHDFSCompaction(true, isSynchronous ? 0 : 1);
+        if (isSynchronous) {
+          final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+          assertTrue(stats.getMajorCompaction().getCount() > 0);
+          assertTrue(pr.lastMajorHDFSCompaction() >= beforeCompact);
+        }
+        return null;
+      }
+    });
+
+    if (!isSynchronous) {
+      SerializableCallable verifyCompactionStat = new SerializableCallable() {
+        @Override
+        public Object call() throws Exception {
+          final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+          waitForCriterion(new WaitCriterion() {
+            @Override
+            public boolean done() {
+              return stats.getMajorCompaction().getCount() > 0;
+            }
+
+            @Override
+            public String description() {
+              return "Major compaction stat not > 0";
+            }
+          }, 30 * 1000, 1000, true);
+          return null;
+        }
+      };
+
+      vm1.invoke(verifyCompactionStat);
+      vm2.invoke(verifyCompactionStat);
+      vm3.invoke(verifyCompactionStat);
+    } else {
+      SerializableCallable verifyCompactionStat = new SerializableCallable() {
+        @Override
+        public Object call() throws Exception {
+          final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
+          assertTrue(stats.getMajorCompaction().getCount() > 0);
+          return null;
+        }
+      };
+      vm2.invoke(verifyCompactionStat);
+      vm3.invoke(verifyCompactionStat);
+    }
+  }
+
+  public void testFlushQueue() throws Exception {
+    doFlushQueue(false);
+  }
+
+  public void testFlushQueueWO() throws Exception {
+    doFlushQueue(true);
+  }
+
+  private void doFlushQueue(boolean wo) throws Exception {
+    Host host = Host.getHost(0);
+    VM vm1 = host.getVM(0);
+    VM vm2 = host.getVM(1);
+    VM vm3 = host.getVM(2);
+    final String regionName = getName();
+
+    createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
+    createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
+    createServerRegion(vm3, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionName);
+        for (int i = 0; i < 500; i++) {
+          pr.put("key" + i, "value" + i);
+        }
+
+        pr.flushHDFSQueue(0);
+        return null;
+      }
+    });
+
+    SerializableCallable verify = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionName);
+        assertEquals(0, pr.getHDFSEventQueueStats().getEventQueueSize());
+        return null;
+      }
+    };
+
+    vm1.invoke(verify);
+    vm2.invoke(verify);
+    vm3.invoke(verify);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
new file mode 100644
index 0000000..f04307f
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
@@ -0,0 +1,115 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import static com.gemstone.gemfire.test.dunit.Invoke.invokeInEveryVM;
+
+import java.io.File;
+import java.util.Properties;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+
+@SuppressWarnings({ "serial", "rawtypes", "deprecation" })
+public class RegionWithHDFSOffHeapBasicDUnitTest extends
+    RegionWithHDFSBasicDUnitTest {
+  static {
+    System.setProperty("gemfire.trackOffHeapRefCounts", "true");
+  }
+  
+  public RegionWithHDFSOffHeapBasicDUnitTest(String name) {
+    super(name);
+  }
+  
+  @Override
+  public void preTearDownCacheTestCase() throws Exception {
+    SerializableRunnable checkOrphans = new SerializableRunnable() {
+
+      @Override
+      public void run() {
+        if(hasCache()) {
+          OffHeapTestUtil.checkOrphans();
+        }
+      }
+    };
+    try {
+      checkOrphans.run();
+      invokeInEveryVM(checkOrphans);
+    } finally {
+      // proceed with tearDown2 anyway.
+      super.preTearDownCacheTestCase();
+    }
+  }
+
+   public void testDelta() {
+     //do nothing, deltas aren't supported with off heap.
+   }
+  
+  @Override
+  protected SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets,
+                                                         final int batchSizeMB, final int maximumEntries, final String folderPath,
+                                                         final String uniqueName, final int batchInterval, final boolean queuePersistent,
+                                                         final boolean writeonly, final long timeForRollover, final long maxFileSize) {
+    SerializableCallable createRegion = new SerializableCallable() {
+      public Object call() throws Exception {
+        AttributesFactory af = new AttributesFactory();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setTotalNumBuckets(totalnumOfBuckets);
+        paf.setRedundantCopies(1);
+        
+        af.setHDFSStoreName(uniqueName);
+        af.setPartitionAttributes(paf.create());
+        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
+        // Going two level up to avoid home directories getting created in
+        // VM-specific directory. This avoids failures in those tests where
+        // datastores are restarted and bucket ownership changes between VMs.
+        homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
+        hsf.setHomeDir(homeDir);
+        hsf.setBatchSize(batchSizeMB);
+        hsf.setBufferPersistent(queuePersistent);
+        hsf.setMaxMemory(3);
+        hsf.setBatchInterval(batchInterval);
+        if (timeForRollover != -1) {
+          hsf.setWriteOnlyFileRolloverInterval((int)timeForRollover);
+          System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
+        }
+        if (maxFileSize != -1) {
+          hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
+        }
+        hsf.create(uniqueName);
+        
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
+        
+        af.setHDFSWriteOnly(writeonly);
+        af.setOffHeap(true);;
+        Region r = createRootRegion(uniqueName, af.create());
+        ((LocalRegion)r).setIsTest();
+        
+        return 0;
+      }
+    };
+    return createRegion;
+  }
+
+  @Override
+  public Properties getDistributedSystemProperties() {
+    Properties props = super.getDistributedSystemProperties();
+    props.setProperty("off-heap-memory-size", "50m");
+    return props;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
new file mode 100644
index 0000000..685080d
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
@@ -0,0 +1,77 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.File;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+
+
+@SuppressWarnings({ "serial", "rawtypes", "deprecation" })
+public class RegionWithHDFSPersistenceBasicDUnitTest extends
+    RegionWithHDFSBasicDUnitTest {
+
+  public RegionWithHDFSPersistenceBasicDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  protected SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets,
+                                                         final int batchSizeMB, final int maximumEntries, final String folderPath,
+                                                         final String uniqueName, final int batchInterval, final boolean queuePersistent,
+                                                         final boolean writeonly, final long timeForRollover, final long maxFileSize) {
+    SerializableCallable createRegion = new SerializableCallable() {
+      public Object call() throws Exception {
+        AttributesFactory af = new AttributesFactory();
+        af.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION);
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setTotalNumBuckets(totalnumOfBuckets);
+        paf.setRedundantCopies(1);
+        
+        af.setHDFSStoreName(uniqueName);
+        
+        af.setPartitionAttributes(paf.create());
+        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
+        // Going two level up to avoid home directories getting created in
+        // VM-specific directory. This avoids failures in those tests where
+        // datastores are restarted and bucket ownership changes between VMs.
+        homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
+        hsf.setHomeDir(homeDir);
+        hsf.setBatchSize(batchSizeMB);
+        hsf.setBufferPersistent(queuePersistent);
+        hsf.setMaxMemory(3);
+        hsf.setBatchInterval(batchInterval);
+        if (timeForRollover != -1) {
+          hsf.setWriteOnlyFileRolloverInterval((int)timeForRollover);
+          System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
+        }
+        if (maxFileSize != -1) {
+          hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
+        }
+        hsf.create(uniqueName);
+        
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
+        
+        af.setHDFSWriteOnly(writeonly);
+        Region r = createRootRegion(uniqueName, af.create());
+        ((LocalRegion)r).setIsTest();
+        
+        return 0;
+      }
+    };
+    return createRegion;
+  }
+}


[21/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java
new file mode 100644
index 0000000..cdf7452
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueFunction.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.FunctionException;
+import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
+import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
+import com.gemstone.gemfire.internal.cache.execute.LocalResultCollector;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor;
+import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+
+public class HDFSFlushQueueFunction implements Function, InternalEntity{
+  private static final int MAX_RETRIES = Integer.getInteger("gemfireXD.maxFlushQueueRetries", 3);
+  private static final boolean VERBOSE = Boolean.getBoolean("hdfsFlushQueueFunction.VERBOSE");
+  private static final Logger logger = LogService.getLogger();
+  private static final String ID = HDFSFlushQueueFunction.class.getName();
+  
+  public static void flushQueue(PartitionedRegion pr, int maxWaitTime) {
+    
+    Set<Integer> buckets = new HashSet<Integer>(pr.getRegionAdvisor().getBucketSet());
+
+    maxWaitTime *= 1000;
+    long start = System.currentTimeMillis();
+    
+    int retries = 0;
+    long remaining = 0;
+    while (retries++ < MAX_RETRIES && (remaining = waitTime(start, maxWaitTime)) > 0) {
+      if (logger.isDebugEnabled() || VERBOSE) {
+        logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flushing buckets " + buckets 
+            + ", attempt = " + retries 
+            + ", remaining = " + remaining));
+      }
+      
+      HDFSFlushQueueArgs args = new HDFSFlushQueueArgs(buckets, remaining);
+      
+      HDFSFlushQueueResultCollector rc = new HDFSFlushQueueResultCollector(buckets);
+      AbstractExecution exec = (AbstractExecution) FunctionService
+          .onRegion(pr)
+          .withArgs(args)
+          .withCollector(rc);
+      exec.setWaitOnExceptionFlag(true);
+      
+      try {
+        exec.execute(ID);
+        if (rc.getResult()) {
+          if (logger.isDebugEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flushed all buckets successfully")); 
+          }
+          return;
+        }
+      } catch (FunctionException e) {
+        if (logger.isDebugEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Encountered error flushing queue"), e); 
+        }
+      }
+      
+      buckets.removeAll(rc.getSuccessfulBuckets());
+      for (int bucketId : buckets) {
+        remaining = waitTime(start, maxWaitTime);
+        if (logger.isDebugEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Waiting for bucket " + bucketId)); 
+        }
+        pr.getNodeForBucketWrite(bucketId, new PartitionedRegion.RetryTimeKeeper((int) remaining));
+      }
+    }
+    
+    pr.checkReadiness();
+    throw new FunctionException("Unable to flush the following buckets: " + buckets);
+  }
+  
+  private static long waitTime(long start, long max) {
+    if (max == 0) {
+      return Integer.MAX_VALUE;
+    }
+    return start + max - System.currentTimeMillis();
+  }
+  
+  @Override
+  public void execute(FunctionContext context) {
+    RegionFunctionContext rfc = (RegionFunctionContext) context;
+    PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
+    
+    HDFSFlushQueueArgs args = (HDFSFlushQueueArgs) rfc.getArguments();
+    Set<Integer> buckets = new HashSet<Integer>(args.getBuckets());
+    buckets.retainAll(pr.getDataStore().getAllLocalPrimaryBucketIds());
+
+    Map<Integer, AsyncFlushResult> flushes = new HashMap<Integer, AsyncFlushResult>();
+    for (int bucketId : buckets) {
+      try {
+        HDFSBucketRegionQueue brq = getQueue(pr, bucketId);
+        if (brq != null) {
+          if (logger.isDebugEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Flushing bucket " + bucketId)); 
+          }
+          flushes.put(bucketId, brq.flush());
+        }
+      } catch (ForceReattemptException e) {
+        if (logger.isDebugEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Encountered error flushing bucket " + bucketId), e); 
+        }
+      }
+    }
+    
+    try {
+      long start = System.currentTimeMillis();
+      for (Map.Entry<Integer, AsyncFlushResult> flush : flushes.entrySet()) {
+        long remaining = waitTime(start, args.getMaxWaitTime());
+        if (logger.isDebugEnabled() || VERBOSE) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Waiting for bucket " + flush.getKey() 
+              + " to complete flushing, remaining = " + remaining)); 
+        }
+        
+        if (flush.getValue().waitForFlush(remaining, TimeUnit.MILLISECONDS)) {
+          if (logger.isDebugEnabled() || VERBOSE) {
+            logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Bucket " + flush.getKey() + " flushed successfully")); 
+          }
+          rfc.getResultSender().sendResult(new FlushStatus(flush.getKey()));
+        }
+      }
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+    
+    if (logger.isDebugEnabled() || VERBOSE) {
+      logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Sending final flush result")); 
+    }
+    rfc.getResultSender().lastResult(FlushStatus.last());
+  }
+
+  private HDFSBucketRegionQueue getQueue(PartitionedRegion pr, int bucketId) 
+      throws ForceReattemptException {
+    AsyncEventQueueImpl aeq = pr.getHDFSEventQueue();
+    AbstractGatewaySender gw = (AbstractGatewaySender) aeq.getSender();
+    AbstractGatewaySenderEventProcessor ep = gw.getEventProcessor();
+    if (ep == null) {
+      return null;
+    }
+    
+    ConcurrentParallelGatewaySenderQueue queue = (ConcurrentParallelGatewaySenderQueue) ep.getQueue();
+    return queue.getBucketRegionQueue(pr, bucketId);
+  }
+  
+  @Override
+  public String getId() {
+    return ID;
+  }
+
+  @Override
+  public boolean hasResult() {
+    return true;
+  }
+
+  @Override
+  public boolean optimizeForWrite() {
+    return true;
+  }
+
+  @Override
+  public boolean isHA() {
+    return false;
+  }
+  
+  public static class HDFSFlushQueueResultCollector implements LocalResultCollector<Object, Boolean> {
+    private final CountDownLatch complete;
+    private final Set<Integer> expectedBuckets;
+    private final Set<Integer> successfulBuckets;
+
+    private volatile ReplyProcessor21 processor;
+    
+    public HDFSFlushQueueResultCollector(Set<Integer> expectedBuckets) {
+      this.expectedBuckets = expectedBuckets;
+      
+      complete = new CountDownLatch(1);
+      successfulBuckets = new HashSet<Integer>();
+    }
+    
+    public Set<Integer> getSuccessfulBuckets() {
+      synchronized (successfulBuckets) {
+        return new HashSet<Integer>(successfulBuckets);
+      }
+    }
+    
+    @Override
+    public Boolean getResult() throws FunctionException {
+      try {
+        complete.await();
+        synchronized (successfulBuckets) {
+          LogWriterI18n logger = InternalDistributedSystem.getLoggerI18n();
+          if (logger.fineEnabled() || VERBOSE) {
+            logger.info(LocalizedStrings.DEBUG, "Expected buckets: " + expectedBuckets);
+            logger.info(LocalizedStrings.DEBUG, "Successful buckets: " + successfulBuckets);
+          }
+          return expectedBuckets.equals(successfulBuckets);
+        }
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        GemFireCacheImpl.getExisting().getCancelCriterion().checkCancelInProgress(e);
+        throw new FunctionException(e);
+      }
+    }
+
+    @Override
+    public Boolean getResult(long timeout, TimeUnit unit)
+        throws FunctionException, InterruptedException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public synchronized void addResult(DistributedMember memberID, Object result) {
+      if (result instanceof FlushStatus) {
+        FlushStatus status = (FlushStatus) result;
+        if (!status.isLast()) {
+          synchronized (successfulBuckets) {
+            successfulBuckets.add(status.getBucketId());
+          }        
+        }
+      }
+    }
+
+    @Override
+    public void endResults() {    	
+      complete.countDown();
+    }
+
+    @Override
+    public void clearResults() {
+    }
+
+    @Override
+    public void setProcessor(ReplyProcessor21 processor) {
+      this.processor = processor;
+    }
+
+    @Override
+    public ReplyProcessor21 getProcessor() {
+      return processor;
+    }
+
+	@Override
+	public void setException(Throwable exception) {
+		// TODO Auto-generated method stub
+		
+	}
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java
new file mode 100644
index 0000000..ec0f9ff
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.gemstone.gemfire.DataSerializer;
+import com.gemstone.gemfire.internal.VersionedDataSerializable;
+import com.gemstone.gemfire.internal.Version;
+
+/**
+ * Arguments passed to the HDFSForceCompactionFunction
+ * 
+ */
+@SuppressWarnings("serial")
+public class HDFSForceCompactionArgs implements VersionedDataSerializable {
+
+  private static Version[] serializationVersions = new Version[]{ Version.GFE_81 };
+
+  private HashSet<Integer> buckets;
+
+  private boolean isMajor;
+
+  private int maxWaitTime;
+
+  public HDFSForceCompactionArgs() {
+  }
+
+  public HDFSForceCompactionArgs(Set<Integer> buckets, boolean isMajor, Integer maxWaitTime) {
+    this.buckets = new HashSet<Integer>(buckets);
+    this.isMajor = isMajor;
+    this.maxWaitTime = maxWaitTime;
+  }
+
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    DataSerializer.writeHashSet(buckets, out);
+    out.writeBoolean(isMajor);
+    out.writeInt(maxWaitTime);
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException,
+      ClassNotFoundException {
+    this.buckets = DataSerializer.readHashSet(in);
+    this.isMajor = in.readBoolean();
+    this.maxWaitTime = in.readInt();
+  }
+
+  @Override
+  public Version[] getSerializationVersions() {
+    return serializationVersions;
+  }
+
+  public Set<Integer> getBuckets() {
+    return (Set<Integer>) buckets;
+  }
+
+  public void setBuckets(Set<Integer> buckets) {
+    this.buckets = new HashSet<Integer>(buckets);
+  }
+
+  public boolean isMajor() {
+    return isMajor;
+  }
+
+  public void setMajor(boolean isMajor) {
+    this.isMajor = isMajor;
+  }
+
+  public boolean isSynchronous() {
+    return maxWaitTime == 0;
+  }
+
+  public int getMaxWaitTime() {
+    return this.maxWaitTime;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getCanonicalName()).append("@")
+    .append(System.identityHashCode(this))
+    .append(" buckets:").append(buckets)
+    .append(" isMajor:").append(isMajor)
+    .append(" maxWaitTime:").append(maxWaitTime);
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java
new file mode 100644
index 0000000..d26ac1b
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionFunction.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+/**
+ * Function responsible for forcing a compaction on all members
+ * of the system
+ *
+ */
+@SuppressWarnings("serial")
+public class HDFSForceCompactionFunction implements Function, InternalEntity {
+
+  public static final int FORCE_COMPACTION_MAX_RETRIES = Integer.getInteger("gemfireXD.maxCompactionRetries", 3);
+
+  public static final int BUCKET_ID_FOR_LAST_RESULT = -1;
+
+  public static final String ID = "HDFSForceCompactionFunction";
+
+  private static final Logger logger = LogService.getLogger();
+  
+  @Override
+  public void execute(FunctionContext context) {
+    if (context.isPossibleDuplicate()) {
+      // do not re-execute the function, another function
+      // targeting the failed buckets will be invoked
+      context.getResultSender().lastResult(new CompactionStatus(BUCKET_ID_FOR_LAST_RESULT, false));
+      return;
+    }
+    RegionFunctionContext rfc = (RegionFunctionContext) context;
+    PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
+    HDFSForceCompactionArgs args = (HDFSForceCompactionArgs) rfc.getArguments();
+    Set<Integer> buckets = new HashSet<Integer>(args.getBuckets()); // copying avoids race when the function coordinator
+                                                                    // also runs the function locally
+    buckets.retainAll(pr.getDataStore().getAllLocalPrimaryBucketIds());
+
+    List<Future<CompactionStatus>> futures =  pr.forceLocalHDFSCompaction(buckets, args.isMajor(), 0);
+    int waitFor = args.getMaxWaitTime();
+    for (Future<CompactionStatus> future : futures) {
+      long start = System.currentTimeMillis();
+      CompactionStatus status = null;
+      try {
+        // TODO use a CompletionService instead
+        if (!args.isSynchronous() && waitFor <= 0) {
+          break;
+        }
+        status = args.isSynchronous() ? future.get() : future.get(waitFor, TimeUnit.MILLISECONDS);
+        buckets.remove(status.getBucketId());
+        if (logger.isDebugEnabled()) {
+          logger.debug("HDFS: ForceCompaction sending result:"+status);
+        }
+        context.getResultSender().sendResult(status);
+        long elapsedTime = System.currentTimeMillis() - start;
+        waitFor -= elapsedTime;
+      } catch (InterruptedException e) {
+        // send a list of failed buckets after waiting for all buckets
+      } catch (ExecutionException e) {
+        // send a list of failed buckets after waiting for all buckets
+      } catch (TimeoutException e) {
+        // do not wait for other buckets to complete
+        break;
+      }
+    }
+    // for asynchronous invocation, the status is true for buckets that we did not wait for
+    boolean status = args.isSynchronous() ? false : true;
+    for (Integer bucketId : buckets) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("HDFS: ForceCompaction sending result for bucket:"+bucketId);
+      }
+      context.getResultSender().sendResult(new CompactionStatus(bucketId, status));
+    }
+    if (logger.isDebugEnabled()) {
+      logger.debug("HDFS: ForceCompaction sending last result");
+    }
+    context.getResultSender().lastResult(new CompactionStatus(BUCKET_ID_FOR_LAST_RESULT, true));
+  }
+
+  @Override
+  public String getId() {
+    return ID;
+  }
+
+  @Override
+  public boolean hasResult() {
+    return true;
+  }
+
+  @Override
+  public boolean optimizeForWrite() {
+    // run compaction on primary members
+    return true;
+  }
+
+  @Override
+  public boolean isHA() {
+    // so that we can target re-execution on failed buckets
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java
new file mode 100644
index 0000000..ee5e4aa
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionResultCollector.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import com.gemstone.gemfire.cache.execute.FunctionException;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.execute.LocalResultCollector;
+
+/**
+ * 
+ */
+public class HDFSForceCompactionResultCollector implements LocalResultCollector<Object, List<CompactionStatus>> {
+
+  /** list of received replies*/
+  private List<CompactionStatus> reply = new ArrayList<CompactionStatus>();
+
+  /** semaphore to block the caller of getResult()*/
+  private CountDownLatch waitForResults = new CountDownLatch(1);
+
+  /** boolean to indicate if clearResults() was called to indicate a failure*/
+  private volatile boolean shouldRetry;
+
+  private ReplyProcessor21 processor;
+
+  @Override
+  public List<CompactionStatus> getResult() throws FunctionException {
+    try {
+      waitForResults.await();
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      GemFireCacheImpl.getExisting().getCancelCriterion().checkCancelInProgress(e);
+      throw new FunctionException(e);
+    }
+    return reply;
+  }
+
+  @Override
+  public List<CompactionStatus> getResult(long timeout, TimeUnit unit)
+      throws FunctionException, InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void addResult(DistributedMember memberID,
+      Object resultOfSingleExecution) {
+    if (resultOfSingleExecution instanceof CompactionStatus) {
+      CompactionStatus status = (CompactionStatus) resultOfSingleExecution;
+      if (status.getBucketId() != HDFSForceCompactionFunction.BUCKET_ID_FOR_LAST_RESULT) {
+        reply.add(status);
+      }
+    }
+  }
+
+  @Override
+  public void endResults() {
+    waitForResults.countDown();
+  }
+
+  @Override
+  public void clearResults() {
+    this.shouldRetry = true;
+    waitForResults.countDown();
+  }
+
+  /**
+   * @return true if retry should be attempted
+   */
+  public boolean shouldRetry() {
+    return this.shouldRetry || !getFailedBucketIds().isEmpty();
+  }
+
+  private Set<Integer> getFailedBucketIds() {
+    Set<Integer> result = new HashSet<Integer>();
+    for (CompactionStatus status : reply) {
+      if (!status.isStatus()) {
+        result.add(status.getBucketId());
+      }
+    }
+    return result;
+  }
+
+  public Set<Integer> getSuccessfulBucketIds() {
+    Set<Integer> result = new HashSet<Integer>();
+    for (CompactionStatus status : reply) {
+      if (status.isStatus()) {
+        result.add(status.getBucketId());
+      }
+    }
+    return result;
+  }
+
+  @Override
+  public void setProcessor(ReplyProcessor21 processor) {
+    this.processor = processor;
+  }
+
+  @Override
+  public ReplyProcessor21 getProcessor() {
+    return this.processor;
+  }
+
+@Override
+public void setException(Throwable exception) {
+	// TODO Auto-generated method stub
+	
+}
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java
new file mode 100644
index 0000000..789fe4d
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSLastCompactionTimeFunction.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
+import com.gemstone.gemfire.internal.InternalEntity;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+
+/**
+ * Function that returns the oldest timestamp among all the major
+ * compacted buckets on the members
+ *
+ */
+@SuppressWarnings("serial")
+public class HDFSLastCompactionTimeFunction extends FunctionAdapter implements InternalEntity{
+
+  public static final String ID = "HDFSLastCompactionTimeFunction";
+
+  @Override
+  public void execute(FunctionContext context) {
+    RegionFunctionContext rfc = (RegionFunctionContext) context;
+    PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
+    rfc.getResultSender().lastResult(pr.lastLocalMajorHDFSCompaction());
+  }
+
+  @Override
+  public String getId() {
+    return ID;
+  }
+
+  @Override
+  public boolean isHA() {
+    return true;
+  }
+
+  @Override
+  public boolean optimizeForWrite() {
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java
new file mode 100644
index 0000000..6d70dce
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirector.java
@@ -0,0 +1,480 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import com.gemstone.gemfire.StatisticsFactory;
+import com.gemstone.gemfire.cache.GemFireCache;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
+import com.gemstone.gemfire.i18n.LogWriterI18n;
+import com.gemstone.gemfire.internal.SystemTimer;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+
+import org.apache.logging.log4j.Logger;
+
+/**
+ * Cache for hoplog organizers associated with buckets of a region. The director creates an
+ * instance of organizer on first get request. It does not read HDFS in advance. Creation of
+ * organizer depends on File system initialization that takes outside this class. This class also
+ * provides utility methods to monitor usage and manage bucket sets.
+ * 
+ */
+public class HDFSRegionDirector {
+  /*
+   * Maps each region name to its listener and store objects. This map must be populated before file
+   * organizers of a bucket can be created
+   */
+  private final ConcurrentHashMap<String, HdfsRegionManager> regionManagerMap;
+  
+  /**
+   * regions of this Gemfire cache are managed by this director. TODO this
+   * should be final and be provided at the time of creation of this instance or
+   * through a cache directory
+   */
+  private GemFireCache cache;
+  
+  // singleton instance
+  private static HDFSRegionDirector instance;
+  
+  final ScheduledExecutorService janitor;
+  private JanitorTask janitorTask;
+  
+  private static final Logger logger = LogService.getLogger();
+  protected final static String logPrefix = "<" + "RegionDirector" + "> ";
+  
+  
+  private HDFSRegionDirector() {
+    regionManagerMap = new ConcurrentHashMap<String, HDFSRegionDirector.HdfsRegionManager>();
+    janitor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
+      @Override
+      public Thread newThread(Runnable r) {
+        Thread thread = new Thread(r, "HDFSRegionJanitor");
+        thread.setDaemon(true);
+        return thread;
+      }
+    });
+    
+    long interval = Long.getLong(HoplogConfig.JANITOR_INTERVAL_SECS,
+        HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
+    
+    janitorTask = new JanitorTask();
+    janitor.scheduleWithFixedDelay(janitorTask, interval, interval,
+        TimeUnit.SECONDS);
+  }
+  
+  public synchronized static HDFSRegionDirector getInstance() {
+    if (instance == null) {
+      instance = new HDFSRegionDirector();
+    }
+    return instance;
+  }
+  
+  public HDFSRegionDirector setCache(GemFireCache cache) {
+    this.cache = cache;
+    return this;
+  }
+
+  public GemFireCache getCache() {
+    return this.cache;
+  }
+  /**
+   * Caches listener, store object and list of organizers associated with the region associated with
+   * a region. Subsequently, these objects will be used each time an organizer is created
+   */
+  public synchronized HdfsRegionManager manageRegion(LocalRegion region, String storeName,
+      HoplogListener listener) {
+    
+    HdfsRegionManager manager = regionManagerMap.get(region.getFullPath());
+    if (manager != null) {
+      // this is an attempt to re-register a region. Assuming this was required
+      // to modify listener or hdfs store impl associated with the region. Hence
+      // will clear the region first.
+
+      clear(region.getFullPath());
+    }
+    
+    HDFSStoreImpl store = HDFSStoreDirector.getInstance().getHDFSStore(storeName);
+    manager = new HdfsRegionManager(region, store, listener, getStatsFactory(), this);
+    regionManagerMap.put(region.getFullPath(), manager);
+    
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Now managing region " + region.getFullPath(), logPrefix);
+    }
+    
+    return manager;
+  }
+  
+  /**
+   * Find the regions that are part of a particular HDFS store.
+   */
+  public Collection<String> getRegionsInStore(HDFSStore store) {
+    TreeSet<String> regions = new TreeSet<String>();
+    for(Map.Entry<String, HdfsRegionManager> entry : regionManagerMap.entrySet()) {
+      if(entry.getValue().getStore().equals(store)) {
+        regions.add(entry.getKey());
+      }
+    }
+    return regions;
+  }
+  
+  public int getBucketCount(String regionPath) {
+    HdfsRegionManager manager = regionManagerMap.get(regionPath);
+    if (manager == null) {
+      throw new IllegalStateException("Region not initialized");
+    }
+
+    return manager.bucketOrganizerMap.size();
+  }
+  
+  public void closeWritersForRegion(String regionPath, int minSizeForFileRollover) throws IOException {
+    regionManagerMap.get(regionPath).closeWriters(minSizeForFileRollover);
+  }
+  /**
+   * removes and closes all {@link HoplogOrganizer} of this region. This call is expected with
+   * a PR disowns a region.
+   */
+  public synchronized void clear(String regionPath) {
+    HdfsRegionManager manager = regionManagerMap.remove(regionPath);
+    if (manager != null) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Closing hoplog region manager for " + regionPath, logPrefix);
+      }
+      manager.close();
+    }
+  }
+
+  /**
+   * Closes all region managers, organizers and hoplogs. This method should be
+   * called before closing the cache to gracefully release all resources
+   */
+  public static synchronized void reset() {
+    if (instance == null) {
+      // nothing to reset
+      return;
+    }
+    
+    instance.janitor.shutdownNow();
+    
+    for (String region : instance.regionManagerMap.keySet()) {
+      instance.clear(region);
+    }
+    instance.cache = null;
+    instance = null;
+  }
+  
+  /**
+   * Terminates current janitor task and schedules a new. The rate of the new
+   * task is based on the value of system property at that time
+   */
+  public static synchronized void resetJanitor() {
+    instance.janitorTask.terminate();
+    instance.janitorTask = instance.new JanitorTask();
+    long interval = Long.getLong(HoplogConfig.JANITOR_INTERVAL_SECS,
+        HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
+    instance.janitor.scheduleWithFixedDelay(instance.janitorTask, 0, interval,
+        TimeUnit.SECONDS);
+  }
+  
+  /**
+   * @param regionPath name of region for which stats object is desired
+   * @return {@link SortedOplogStatistics} instance associated with hdfs region
+   *         name. Null if region is not managed by director
+   */
+  public synchronized SortedOplogStatistics getHdfsRegionStats(String regionPath) {
+    HdfsRegionManager manager = regionManagerMap.get(regionPath);
+    return manager == null ? null : manager.getHdfsStats();
+  }
+  
+  private StatisticsFactory getStatsFactory() {
+    return cache.getDistributedSystem();
+  }
+
+  /**
+   * A helper class to manage region and its organizers
+   */
+  public static class HdfsRegionManager {
+    // name and store configuration of the region whose buckets are managed by this director.
+    private LocalRegion region;
+    private HDFSStoreImpl store;
+    private HoplogListener listener;
+    private volatile boolean closed = false;
+    private final int FILE_ROLLOVER_TASK_INTERVAL = Integer.parseInt
+        (System.getProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "60"));
+    
+    private SystemTimer hoplogCloseTimer = null;
+    
+    // instance of hdfs statistics object for this hdfs based region. This
+    // object will collect usage and performance related statistics.
+    private final SortedOplogStatistics hdfsStats;
+
+    /*
+     * An instance of organizer is created for each bucket of regionName region residing on this
+     * node. This member maps bucket id with its corresponding organizer instance. A lock is used to
+     * manage concurrent writes to the map.
+     */
+    private ConcurrentMap<Integer, HoplogOrganizer> bucketOrganizerMap;
+    
+    private HDFSRegionDirector hdfsRegionDirector;
+
+    /**
+     * @param listener
+     *          listener of change events like file creation and deletion
+     * @param hdfsRegionDirector 
+     */
+    HdfsRegionManager(LocalRegion region, HDFSStoreImpl store,
+        HoplogListener listener, StatisticsFactory statsFactory, HDFSRegionDirector hdfsRegionDirector) {
+      bucketOrganizerMap = new ConcurrentHashMap<Integer, HoplogOrganizer>();
+      this.region = region;
+      this.listener = listener;
+      this.store = store;
+      this.hdfsStats = new SortedOplogStatistics(statsFactory, "HDFSRegionStatistics", region.getFullPath());
+      this.hdfsRegionDirector = hdfsRegionDirector;
+    }
+
+    public void closeWriters(int minSizeForFileRollover) throws IOException {
+      final long startTime = System.currentTimeMillis();
+      long elapsedTime = 0;
+        
+      Collection<HoplogOrganizer> organizers = bucketOrganizerMap.values();
+      
+      for (HoplogOrganizer organizer : organizers) {
+      
+        try {
+          this.getRegion().checkReadiness();
+        } catch (Exception e) {
+          break;
+        }
+        
+        ((HDFSUnsortedHoplogOrganizer)organizer).synchronizedCloseWriter(true, 0, 
+            minSizeForFileRollover);
+      }
+      
+    }
+
+    public synchronized <T extends PersistedEventImpl> HoplogOrganizer<T> create(int bucketId) throws IOException {
+      assert !bucketOrganizerMap.containsKey(bucketId);
+
+      HoplogOrganizer<?> organizer = region.getHDFSWriteOnly() 
+          ? new HDFSUnsortedHoplogOrganizer(this, bucketId) 
+          : new HdfsSortedOplogOrganizer(this, bucketId);
+
+      bucketOrganizerMap.put(bucketId, organizer);
+      // initialize a timer that periodically closes the hoplog writer if the 
+      // time for rollover has passed. It also has the responsibility to fix the files.  
+      if (this.region.getHDFSWriteOnly() && 
+          hoplogCloseTimer == null) {
+        hoplogCloseTimer = new SystemTimer(hdfsRegionDirector.
+            getCache().getDistributedSystem(), true);
+        
+        // schedule the task to fix the files that were not closed properly 
+        // last time. 
+        hoplogCloseTimer.scheduleAtFixedRate(new CloseTmpHoplogsTimerTask(this), 
+            1000, FILE_ROLLOVER_TASK_INTERVAL * 1000);
+        
+        if (logger.isDebugEnabled()) {
+          logger.debug("{}Schedulng hoplog rollover timer with interval "+ FILE_ROLLOVER_TASK_INTERVAL + 
+              " for hoplog organizer for " + region.getFullPath()
+              + ":" + bucketId + " " + organizer, logPrefix);
+        }
+      }
+      
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Constructed hoplog organizer for " + region.getFullPath()
+            + ":" + bucketId + " " + organizer, logPrefix);
+      }
+      return (HoplogOrganizer<T>) organizer;
+    }
+    
+    public synchronized <T extends PersistedEventImpl> void addOrganizer(
+        int bucketId, HoplogOrganizer<T> organizer) {
+      if (bucketOrganizerMap.containsKey(bucketId)) {
+        throw new IllegalArgumentException();
+      }
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}added pre constructed organizer " + region.getFullPath()
+            + ":" + bucketId + " " + organizer, logPrefix);
+      }
+      bucketOrganizerMap.put(bucketId, organizer);
+    }
+
+    public void close() {
+      closed = true;
+      
+      if (this.region.getHDFSWriteOnly() && 
+          hoplogCloseTimer != null) {
+        hoplogCloseTimer.cancel();
+        hoplogCloseTimer = null;
+      }
+      for (int bucket : bucketOrganizerMap.keySet()) {
+        close(bucket);
+      }
+    }
+    
+    public boolean isClosed() {
+      return closed;
+    }
+
+    public synchronized void close(int bucketId) {
+      try {
+        HoplogOrganizer organizer = bucketOrganizerMap.remove(bucketId);
+        if (organizer != null) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("{}Closing hoplog organizer for " + region.getFullPath() + ":" + 
+                bucketId + " " + organizer, logPrefix);
+          }
+          organizer.close();
+        }
+      } catch (IOException e) {
+        if (logger.isDebugEnabled()) {
+          logger.debug(logPrefix + "Error closing hoplog organizer for " + region.getFullPath() + ":" + bucketId, e);
+        }
+      }
+      //TODO abort compaction and flush requests for this region
+    }
+    
+    public static String getRegionFolder(String regionPath) {
+      String folder = regionPath;
+      //Change any underscore into a double underscore
+      folder = folder.replace("_", "__");
+      //get rid of the leading slash
+      folder = folder.replaceFirst("^/", "");
+      //replace slashes with underscores
+      folder = folder.replace('/', '_');
+      return folder;
+    }
+
+    public String getRegionFolder() {
+      return getRegionFolder(region.getFullPath());
+    }
+
+    public HoplogListener getListener() {
+      return listener;
+    }
+
+    public HDFSStoreImpl getStore() {
+      return store;
+    }
+
+    public LocalRegion getRegion() {
+      return region;
+    }
+    
+    public SortedOplogStatistics getHdfsStats() {
+      return hdfsStats;
+    }
+    
+    public Collection<HoplogOrganizer> getBucketOrganizers(){
+      return this.bucketOrganizerMap.values();
+    }
+
+    /**
+     * get the HoplogOrganizers only for the given set of buckets
+     */
+    public Collection<HoplogOrganizer> getBucketOrganizers(Set<Integer> buckets){
+      Set<HoplogOrganizer> result = new HashSet<HoplogOrganizer>();
+      for (Integer bucketId : buckets) {
+        result.add(this.bucketOrganizerMap.get(bucketId));
+      }
+      return result;
+    }
+
+    /**
+     * Delete all files from HDFS for this region. This method
+     * should be called after all members have destroyed their
+     * region in gemfire, so there should be no threads accessing
+     * these files.
+     * @throws IOException 
+     */
+    public void destroyData() throws IOException {
+      //Make sure everything is shut down and closed.
+      close();
+      if (store == null) {
+        return;
+      }
+      Path regionPath = new Path(store.getHomeDir(), getRegionFolder());
+      
+      //Delete all files in HDFS.
+      FileSystem fs = getStore().getFileSystem();
+      if(!fs.delete(regionPath, true)) {
+        if(fs.exists(regionPath)) {
+          throw new IOException("Unable to delete " + regionPath);
+        }
+      }
+    }
+
+    public void performMaintenance() throws IOException {
+      Collection<HoplogOrganizer> buckets = getBucketOrganizers();
+      for (HoplogOrganizer bucket : buckets) {
+        bucket.performMaintenance();
+      }
+    }
+  }
+  
+  private class JanitorTask implements Runnable {
+    boolean terminated = false;
+    @Override
+    public void run() {
+      if (terminated) {
+        return;
+      }
+      fineLog("Executing HDFS Region janitor task", null);
+      
+      Collection<HdfsRegionManager> regions = regionManagerMap.values();
+      for (HdfsRegionManager region : regions) {
+        fineLog("Maintaining region:" + region.getRegionFolder(), null);
+        try {
+          region.performMaintenance();
+        } catch (Throwable e) {
+          logger.info(LocalizedMessage.create(LocalizedStrings.HOPLOG_IO_ERROR , region.getRegionFolder()));
+          logger.info(LocalizedMessage.create(LocalizedStrings.ONE_ARG, e.getMessage()));
+          fineLog(null, e);
+        }
+      }
+    }
+
+    public void terminate() {
+      terminated = true;
+    }
+  }
+  
+  protected static void fineLog(String message, Throwable e) {
+    if(logger.isDebugEnabled()) {
+      logger.debug(message, e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java
new file mode 100644
index 0000000..880ef3e
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStoreDirector.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.concurrent.ConcurrentHashMap;
+
+
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+
+/**
+ * HDFSStoreDirector is created for managing all instances of HDFSStoreImpl.    
+ *
+ */
+public final class HDFSStoreDirector {
+  private final ConcurrentHashMap<String, HDFSStoreImpl> storeMap = new ConcurrentHashMap<String, HDFSStoreImpl>();
+
+  // singleton instance
+  private static volatile HDFSStoreDirector instance;
+  
+  private HDFSStoreDirector() {
+
+  }
+  
+  public static final HDFSStoreDirector getInstance() {
+    if (instance == null) {
+      synchronized (HDFSStoreDirector.class)  {
+        if (instance == null)
+          instance = new HDFSStoreDirector();
+      }
+    }
+    return instance;
+  }
+
+  // Called when the region is created.
+  public final void addHDFSStore(HDFSStoreImpl hdfsStore){
+    this.storeMap.put(hdfsStore.getName(), hdfsStore); 
+  }
+  
+  public final HDFSStoreImpl getHDFSStore(String hdfsStoreName) {
+    return this.storeMap.get(hdfsStoreName);
+  }
+  
+  public final void removeHDFSStore(String hdfsStoreName) {
+    this.storeMap.remove(hdfsStoreName);
+  } 
+  
+  public void closeHDFSStores() {
+    Iterator<HDFSStoreImpl> it = this.storeMap.values().iterator();
+    while (it.hasNext()) {
+      HDFSStoreImpl hsi = it.next();
+      hsi.close();
+    }
+    this.storeMap.clear();
+  }
+
+   public ArrayList<HDFSStoreImpl> getAllHDFSStores() {
+    ArrayList<HDFSStoreImpl> hdfsStores = new ArrayList<HDFSStoreImpl>();
+    hdfsStores.addAll(this.storeMap.values());
+    return hdfsStores;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
new file mode 100644
index 0000000..cbb35cb
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizer.java
@@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
+import com.gemstone.gemfire.internal.HeapDataOutputStream;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * Manages unsorted Hoplog files for a bucket (Streaming Ingest option). An instance per bucket 
+ * will exist in each PR
+ * 
+ *
+ */
+public class HDFSUnsortedHoplogOrganizer extends AbstractHoplogOrganizer<UnsortedHoplogPersistedEvent> {
+  public static final String HOPLOG_REGEX = HOPLOG_NAME_REGEX + "("
+      + SEQ_HOPLOG_EXTENSION + "|" + TEMP_HOPLOG_EXTENSION + ")";
+  public static final Pattern HOPLOG_PATTERN = Pattern.compile(HOPLOG_REGEX);
+  protected static String TMP_FILE_NAME_REGEX = HOPLOG_NAME_REGEX + SEQ_HOPLOG_EXTENSION + TEMP_HOPLOG_EXTENSION + "$";
+  protected static final Pattern patternForTmpHoplog = Pattern.compile(TMP_FILE_NAME_REGEX);
+  
+   volatile private HoplogWriter writer;
+   volatile private Hoplog currentHoplog;
+   
+   volatile private long lastFlushTime = System.currentTimeMillis();
+   
+   volatile private boolean abortFlush = false;
+   private FileSystem fileSystem;
+   
+   public HDFSUnsortedHoplogOrganizer(HdfsRegionManager region, int bucketId) throws IOException{
+    super(region, bucketId);
+    writer = null;
+    sequence = new AtomicInteger(0);
+
+    fileSystem = store.getFileSystem();
+    if (! fileSystem.exists(bucketPath)) {
+      return;
+    }
+    
+    FileStatus validHoplogs[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
+      @Override
+      public boolean accept(Path file) {
+        // All valid hoplog files must match the regex
+        Matcher matcher = HOPLOG_PATTERN.matcher(file.getName());
+        return matcher.matches();
+      }
+    });
+
+    if (validHoplogs != null && validHoplogs.length > 0) {
+      for (FileStatus file : validHoplogs) {
+        // account for the disk used by this file
+        incrementDiskUsage(file.getLen());
+      }
+    }
+
+  }
+  
+    @Override
+    public void close() throws IOException {
+      super.close();
+      if (logger.isDebugEnabled())
+        logger.debug("{}Closing the hoplog organizer and the open files", logPrefix);
+      // abort the flush so that we can immediately call the close current writer. 
+      abortFlush = true;
+      synchronizedCloseWriter(true, 0, 0);
+    }
+    
+    
+    /**
+     * Flushes the data to HDFS. 
+     * Synchronization ensures that the writer is not closed when flush is happening.
+     * To abort the flush, abortFlush needs to be set.  
+     * @throws ForceReattemptException 
+     */
+     @Override
+    public synchronized void flush(Iterator<? extends QueuedPersistentEvent> bufferIter, final int count)
+        throws IOException, ForceReattemptException {
+      assert bufferIter != null;
+      
+      if (abortFlush)
+        throw new CacheClosedException("Either the region has been cleared " +
+            "or closed. Aborting the ongoing flush operation.");
+      if (logger.isDebugEnabled())
+        logger.debug("{}Initializing flush operation", logPrefix);
+      
+      // variables for updating stats
+      long start = stats.getFlush().begin();
+      int byteCount = 0;
+      if (writer == null) {
+        // Hoplogs of sequence files are always created with a 0 sequence number
+        currentHoplog = getTmpSortedOplog(0, SEQ_HOPLOG_EXTENSION);
+        try {
+          writer = this.store.getSingletonWriter().runSerially(new Callable<Hoplog.HoplogWriter>() {
+            @Override
+            public HoplogWriter call() throws Exception {
+              return currentHoplog.createWriter(count);
+            }
+          });
+        } catch (Exception e) {
+          if (e instanceof IOException) {
+            throw (IOException)e;
+          }
+          throw new IOException(e);
+        }
+      }
+      long timeSinceLastFlush = (System.currentTimeMillis() - lastFlushTime)/1000 ;
+      
+      try {
+        /**MergeGemXDHDFSToGFE changed the following statement as the code of HeapDataOutputStream is not merged */
+        //HeapDataOutputStream out = new HeapDataOutputStream();
+        while (bufferIter.hasNext()) {
+          HeapDataOutputStream out = new HeapDataOutputStream(1024, null);
+          if (abortFlush) {
+            stats.getFlush().end(byteCount, start);
+            throw new CacheClosedException("Either the region has been cleared " +
+            		"or closed. Aborting the ongoing flush operation.");
+          }
+          QueuedPersistentEvent item = bufferIter.next();
+          item.toHoplogEventBytes(out);
+          byte[] valueBytes = out.toByteArray();
+          writer.append(item.getRawKey(), valueBytes);
+          // add key length and value length to stats byte counter
+          byteCount += (item.getRawKey().length + valueBytes.length);
+          /**MergeGemXDHDFSToGFE how to clear for reuse. Leaving it for Darrel to merge this change*/
+          //out.clearForReuse();
+        }
+        // ping secondaries before making the file a legitimate file to ensure 
+        // that in case of split brain, no other vm has taken up as primary. #50110. 
+        if (!abortFlush)
+          pingSecondaries();
+        // append completed. If the file is to be rolled over, 
+        // close writer and rename the file to a legitimate name.
+        // Else, sync the already written data with HDFS nodes. 
+        int maxFileSize = this.store.getWriteOnlyFileRolloverSize() * 1024 * 1024;  
+        int fileRolloverInterval = this.store.getWriteOnlyFileRolloverInterval(); 
+        if (writer.getCurrentSize() >= maxFileSize || 
+            timeSinceLastFlush >= fileRolloverInterval) {
+          closeCurrentWriter();
+        }
+        else {
+          // if flush is not aborted, hsync the batch. It ensures that 
+          // the batch has reached HDFS and we can discard it. 
+          if (!abortFlush)
+            writer.hsync();
+        }
+      } catch (IOException e) {
+        stats.getFlush().error(start);
+        // as there is an exception, it can be probably be a file specific problem.
+        // close the current file to avoid any file specific issues next time  
+        closeCurrentWriter();
+        // throw the exception so that async queue will dispatch the same batch again 
+        throw e;
+      } 
+      
+      stats.getFlush().end(byteCount, start);
+    }
+    
+    /**
+     * Synchronization ensures that the writer is not closed when flush is happening. 
+     */
+    synchronized void synchronizedCloseWriter(boolean forceClose, 
+        long timeSinceLastFlush, int minsizeforrollover) throws IOException { 
+      long writerSize = 0;
+      if (writer != null){
+        writerSize = writer.getCurrentSize();
+      }
+      
+      if (writerSize < (minsizeforrollover * 1024L))
+        return;
+      
+      int maxFileSize = this.store.getWriteOnlyFileRolloverSize() * 1024 * 1024;  
+      int fileRolloverInterval = this.store.getWriteOnlyFileRolloverInterval(); 
+      if (writerSize >= maxFileSize || 
+          timeSinceLastFlush >= fileRolloverInterval || forceClose) {
+        closeCurrentWriter();
+      }
+      }
+        
+    
+    /**
+     * Closes the current writer so that next time a new hoplog can 
+     * be created. Also, fixes any tmp hoplogs. 
+     * 
+     * @throws IOException
+     */
+    void closeCurrentWriter() throws IOException {
+      
+      if (writer != null) {
+        // If this organizer is closing, it is ok to ignore exceptions here
+        // because CloseTmpHoplogsTimerTask
+        // on another member may have already renamed the hoplog
+        // fixes bug 49141
+        boolean isClosing = abortFlush;
+        try {
+          incrementDiskUsage(writer.getCurrentSize());
+        } catch (IOException e) {
+          if (!isClosing) {
+            throw e;
+          }
+        }
+        if (logger.isDebugEnabled())
+          logger.debug("{}Closing hoplog " + currentHoplog.getFileName(), logPrefix);
+        try{
+          writer.close();
+          makeLegitimate(currentHoplog);
+        } catch (IOException e) {
+          if (!isClosing) {
+            logger.warn(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e);
+            throw e;
+          }
+        } finally {
+          writer = null;
+          lastFlushTime = System.currentTimeMillis();
+        }
+      }
+      else
+        lastFlushTime = System.currentTimeMillis();
+    }
+
+    @Override
+    public void clear() throws IOException {
+      boolean prevAbortFlushFlag = abortFlush;
+      // abort the flush so that we can immediately call the close current writer. 
+      abortFlush = true;
+      
+      // Close if there is any existing writer. 
+      try {
+        synchronizedCloseWriter(true, 0, 0);
+      } catch (IOException e) {
+        logger.warn(LocalizedStrings.HOPLOG_CLOSE_FAILED, e);
+      }
+      
+      // reenable the aborted flush
+      abortFlush = prevAbortFlushFlag;
+      
+      // Mark the hoplogs for deletion
+      markHoplogsForDeletion();
+      
+    }
+  
+    @Override
+    public void performMaintenance() {
+      // TODO remove the timer for tmp file conversion. Use this instead
+    }
+
+    @Override
+    public Future<CompactionStatus> forceCompaction(boolean isMajor) {
+      return null;
+    }
+
+    @Override
+    protected Hoplog getHoplog(Path hoplogPath) throws IOException {
+      Hoplog so = new SequenceFileHoplog(fileSystem, hoplogPath, stats);
+      return so;
+    }
+  
+  /**
+   * Fixes the size of hoplogs that were not closed properly last time. 
+   * Such hoplogs are *.tmphop files. Identify them and open them and close 
+   * them, this fixes the size. After doing this rename them to *.hop. 
+   * 
+   * @throws IOException
+   * @throws ForceReattemptException 
+   */
+  void identifyAndFixTmpHoplogs(FileSystem fs) throws IOException, ForceReattemptException {
+    if (logger.isDebugEnabled())
+      logger.debug("{}Fixing temporary hoplogs", logPrefix);
+    
+    // A different filesystem is passed to this function for the following reason: 
+    // For HDFS, if a file wasn't closed properly last time, 
+    // while calling FileSystem.append for this file, FSNamesystem.startFileInternal->
+    // FSNamesystem.recoverLeaseInternal function gets called. 
+    // This function throws AlreadyBeingCreatedException if there is an open handle, to any other file, 
+    // created using the same FileSystem object. This is a bug and is being tracked at: 
+    // https://issues.apache.org/jira/browse/HDFS-3848?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
+    // 
+    // The fix for this bug is not yet part of Pivotal HD. So to overcome the bug, 
+    // we create a new file system for the timer task so that it does not encounter the bug. 
+    
+    FileStatus tmpHoplogs[] = FSUtils.listStatus(fs, fs.makeQualified(bucketPath), new PathFilter() {
+      @Override
+      public boolean accept(Path file) {
+        // All valid hoplog files must match the regex
+        Matcher matcher = patternForTmpHoplog.matcher(file.getName());
+        return matcher.matches();
+      }
+    });
+    
+    if (tmpHoplogs == null || tmpHoplogs.length == 0) {
+      if (logger.isDebugEnabled())
+        logger.debug("{}No files to fix", logPrefix);
+      return;
+    }
+    // ping secondaries so that in case of split brain, no other vm has taken up 
+    // as primary. #50110. 
+    pingSecondaries();
+    if (logger.isDebugEnabled())
+      logger.debug("{}Files to fix " + tmpHoplogs.length, logPrefix);
+
+    String currentHoplogName = null;
+    // get the current hoplog name. We need to ignore current hoplog while fixing. 
+    if (currentHoplog != null) {
+      currentHoplogName = currentHoplog.getFileName();
+    }
+    
+    for (int i = 0; i < tmpHoplogs.length; i++) {
+      // Skip directories
+      if (tmpHoplogs[i].isDirectory()) {
+        continue;
+      }
+
+      final Path p = tmpHoplogs[i].getPath();
+      
+      if (tmpHoplogs[i].getPath().getName().equals(currentHoplogName)){
+        if (logger.isDebugEnabled())
+          logger.debug("Skipping current file: " + tmpHoplogs[i].getPath().getName(), logPrefix);
+        continue;
+      } 
+      
+      SequenceFileHoplog hoplog = new SequenceFileHoplog(fs, p, stats);
+      try {
+        makeLegitimate(hoplog);
+        logger.info (LocalizedMessage.create(LocalizedStrings.DEBUG, "Hoplog " + p + " was a temporary " +
+            "hoplog because the node managing it wasn't shutdown properly last time. Fixed the hoplog name."));
+      } catch (IOException e) {
+        logger.info (LocalizedMessage.create(LocalizedStrings.DEBUG, "Hoplog " + p + " is still a temporary " +
+            "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
+            "change the hoplog name because an exception was thrown while fixing it. " + e));
+      }
+    }
+  }
+  
+  private FileStatus[] getExpiredHoplogs() throws IOException {
+    FileStatus files[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
+      @Override
+      public boolean accept(Path file) {
+        // All expired hoplog end with expire extension and must match the valid file regex
+        String fileName = file.getName();
+        if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
+          return false;
+        }
+        return true;
+      }
+    });
+    return files;
+  }
+  /**
+   * locks sorted oplogs collection, removes oplog and renames for deletion later
+   * @throws IOException 
+   */
+  private void markHoplogsForDeletion() throws IOException {
+    
+    ArrayList<IOException> errors = new ArrayList<IOException>();
+    FileStatus validHoplogs[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
+      @Override
+      public boolean accept(Path file) {
+        // All valid hoplog files must match the regex
+        Matcher matcher = HOPLOG_PATTERN.matcher(file.getName());
+        return matcher.matches();
+      }
+    });
+    
+    FileStatus[] expired = getExpiredHoplogs();
+    validHoplogs = filterValidHoplogs(validHoplogs, expired);
+
+    if (validHoplogs == null || validHoplogs.length == 0) {
+      return;
+    }
+    for (FileStatus fileStatus : validHoplogs) {
+      try {
+        addExpiryMarkerForAFile(getHoplog(fileStatus.getPath()));
+      } catch (IOException e) {
+        // even if there is an IO error continue removing other hoplogs and
+        // notify at the end
+        errors.add(e);
+      }
+    }
+    
+    if (!errors.isEmpty()) {
+      for (IOException e : errors) {
+        logger.warn(LocalizedStrings.HOPLOG_HOPLOG_REMOVE_FAILED, e);
+      }
+    }
+  }
+  
+  @Override
+  public Compactor getCompactor() {
+    throw new UnsupportedOperationException("Not supported for " + this.getClass().getSimpleName());
+  }
+  
+    @Override
+  public HoplogIterator<byte[], UnsortedHoplogPersistedEvent> scan(
+      long startOffset, long length) throws IOException {
+    throw new UnsupportedOperationException("Not supported for " + this.getClass().getSimpleName());
+    }
+
+  public long getLastFlushTime() {
+    return this.lastFlushTime;
+      }
+  
+  public long getfileRolloverInterval(){
+    int fileRolloverInterval = this.store.getWriteOnlyFileRolloverInterval(); 
+    return fileRolloverInterval;
+    }
+
+  @Override
+  public long getLastMajorCompactionTimestamp() {
+    throw new UnsupportedOperationException();
+  }
+
+}


[02/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
new file mode 100644
index 0000000..f3c66b0
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
@@ -0,0 +1,364 @@
+/*=========================================================================
+ * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.logging.log4j.Logger;
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.ResultSender;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * The DescribeHDFSStoreFunctionJUnitTest test suite class tests the contract
+ * and functionality of the DescribeHDFSStoreFunction class. </p>
+ * 
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
+ * @see com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction
+ * @see org.jmock.Expectations
+ * @see org.jmock.Mockery
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ */
+@SuppressWarnings( { "unused" })
+@Category({IntegrationTest.class, HoplogTest.class})
+public class DescribeHDFSStoreFunctionJUnitTest {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private Mockery mockContext;
+
+  @Before
+  public void setup() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+      }
+    };
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testExecute() throws Throwable {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final HDFSStoreImpl mockHdfsStore = createMockHDFSStore(hdfsStoreName, "hdfs://localhost:9000", "testDir", 1024, 20, .25f,
+        null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final LogService mockLogService = mockContext.mock(LogService.class, "LogService");
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).findHDFSStore(hdfsStoreName);
+        will(returnValue(mockHdfsStore));
+        oneOf(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(hdfsStoreName));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache, mockMember);
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final HDFSStoreConfigHolder hdfsStoreDetails = (HDFSStoreConfigHolder)results.get(0);
+
+    assertNotNull(hdfsStoreDetails);
+    assertEquals(hdfsStoreName, hdfsStoreDetails.getName());
+    assertEquals("hdfs://localhost:9000", hdfsStoreDetails.getNameNodeURL());
+    assertEquals("testDir", hdfsStoreDetails.getHomeDir());
+    assertEquals(1024, hdfsStoreDetails.getWriteOnlyFileRolloverSize());
+    assertEquals(20, hdfsStoreDetails.getWriteOnlyFileRolloverInterval());
+    assertFalse(hdfsStoreDetails.getMinorCompaction());
+    assertEquals("0.25", Float.toString(hdfsStoreDetails.getBlockCacheSize()));
+    assertNull(hdfsStoreDetails.getHDFSClientConfigFile());
+    assertTrue(hdfsStoreDetails.getMajorCompaction());
+    assertEquals(20, hdfsStoreDetails.getMajorCompactionInterval());
+    assertEquals(20, hdfsStoreDetails.getMajorCompactionThreads());
+    assertEquals(10, hdfsStoreDetails.getMinorCompactionThreads());
+    assertEquals(100, hdfsStoreDetails.getPurgeInterval());
+
+    assertEquals(20, hdfsStoreDetails.getBatchSize());
+    assertEquals(20, hdfsStoreDetails.getBatchInterval());
+    assertNull(hdfsStoreDetails.getDiskStoreName());
+    assertFalse(hdfsStoreDetails.getSynchronousDiskWrite());
+    assertEquals(0, hdfsStoreDetails.getDispatcherThreads());
+    assertEquals(1024, hdfsStoreDetails.getMaxMemory());
+    assertFalse(hdfsStoreDetails.getBufferPersistent());
+  }
+
+  
+  @Test
+  public void testExecuteOnMemberHavingANonGemFireCache() throws Throwable {
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {{
+      exactly(0).of(mockFunctionContext).getResultSender();
+      will(returnValue(testResultSender));
+      
+    }});
+
+    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache , mockMember);
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertTrue(results.isEmpty());
+  }
+
+  
+  @Test(expected = HDFSStoreNotFoundException.class)
+  public void testExecuteThrowingResourceNotFoundException() throws Throwable{    
+    final String hdfsStoreName = "testHdfsStore";
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {{
+      oneOf(mockCache).findHDFSStore(hdfsStoreName);
+      will(returnValue(null));
+      oneOf(mockMember).getName();
+      will(returnValue(memberName));
+      oneOf(mockFunctionContext).getArguments();
+      will(returnValue(hdfsStoreName));
+      oneOf(mockFunctionContext).getResultSender();
+      will(returnValue(testResultSender));
+    }});
+
+    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache,mockMember);
+
+    function.execute(mockFunctionContext);
+
+    try {
+      testResultSender.getResults();
+    }
+    catch (HDFSStoreNotFoundException e) {
+      assertEquals(String.format("A hdfs store with name (%1$s) was not found on member (%2$s).",
+        hdfsStoreName, memberName), e.getMessage());
+      throw e;
+    }
+  }
+  
+  
+  @Test(expected = RuntimeException.class)
+  public void testExecuteThrowingRuntimeException() throws Throwable {
+    final String hdfsStoreName = "testHdfsStore";
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {{
+      oneOf(mockCache).findHDFSStore(hdfsStoreName);
+      will(throwException(new RuntimeException("ExpectedStrings")));
+      oneOf(mockMember).getName();
+      will(returnValue(memberName));
+      oneOf(mockFunctionContext).getArguments();
+      will(returnValue(hdfsStoreName));
+      oneOf(mockFunctionContext).getResultSender();
+      will(returnValue(testResultSender));
+    }});
+
+    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache, mockMember);
+
+    function.execute(mockFunctionContext);
+
+    try {
+      testResultSender.getResults();
+    }
+    catch (RuntimeException e) {
+      assertEquals("ExpectedStrings", e.getMessage());
+      throw e;
+    }
+  }
+  
+  
+  protected HDFSStoreImpl createMockHDFSStore(final String storeName, final String namenode, final String homeDir,
+      final int maxFileSize, final int fileRolloverInterval, final float blockCachesize, final String clientConfigFile,
+      final int batchSize, final int batchInterval, final String diskStoreName, final boolean syncDiskwrite,
+      final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent, final boolean minorCompact,
+      final boolean majorCompact, final int majorCompactionInterval, final int majorCompactionThreads,
+      final int minorCompactionThreads, final int purgeInterval) {
+
+    final HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, storeName);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockHdfsStore).getMajorCompaction();
+        will(returnValue(majorCompact));
+        oneOf(mockHdfsStore).getMajorCompactionInterval();
+        will(returnValue(majorCompactionInterval));
+        oneOf(mockHdfsStore).getMajorCompactionThreads();
+        will(returnValue(majorCompactionThreads));
+        oneOf(mockHdfsStore).getMinorCompactionThreads();
+        will(returnValue(minorCompactionThreads));
+        oneOf(mockHdfsStore).getPurgeInterval();
+        will(returnValue(purgeInterval));
+        oneOf(mockHdfsStore).getInputFileCountMax();
+        will(returnValue(10));
+        oneOf(mockHdfsStore).getInputFileSizeMax();
+        will(returnValue(1024));
+        oneOf(mockHdfsStore).getInputFileCountMin();
+        will(returnValue(2));
+        oneOf(mockHdfsStore).getBatchSize();
+        will(returnValue(batchSize));
+        oneOf(mockHdfsStore).getBatchInterval();
+        will(returnValue(batchInterval));
+        oneOf(mockHdfsStore).getDiskStoreName();
+        will(returnValue(diskStoreName));
+        oneOf(mockHdfsStore).getSynchronousDiskWrite();
+        will(returnValue(syncDiskwrite));
+        oneOf(mockHdfsStore).getBufferPersistent();
+        will(returnValue(bufferPersistent));
+        oneOf(mockHdfsStore).getDispatcherThreads();
+        will(returnValue(dispatcherThreads));
+        oneOf(mockHdfsStore).getMaxMemory();
+        will(returnValue(maxMemory));
+        oneOf(mockHdfsStore).getName();
+        will(returnValue(storeName));
+        oneOf(mockHdfsStore).getNameNodeURL();
+        will(returnValue(namenode));
+        oneOf(mockHdfsStore).getHomeDir();
+        will(returnValue(homeDir));
+        oneOf(mockHdfsStore).getWriteOnlyFileRolloverSize();
+        will(returnValue(maxFileSize));
+        oneOf(mockHdfsStore).getWriteOnlyFileRolloverInterval();
+        will(returnValue(fileRolloverInterval));
+        oneOf(mockHdfsStore).getMinorCompaction();
+        will(returnValue(minorCompact));
+        oneOf(mockHdfsStore).getBlockCacheSize();
+        will(returnValue(blockCachesize));
+        allowing(mockHdfsStore).getHDFSClientConfigFile();
+        will(returnValue(clientConfigFile));
+      }
+    });
+    return mockHdfsStore;
+  }
+
+  protected TestDescribeHDFSStoreFunction createDescribeHDFSStoreFunction(final Cache cache, DistributedMember member) {
+    return new TestDescribeHDFSStoreFunction(cache, member);
+  }
+
+  protected static class TestDescribeHDFSStoreFunction extends DescribeHDFSStoreFunction {
+    private static final long serialVersionUID = 1L;
+
+    private final Cache cache;
+
+    private final DistributedMember member;
+
+    public TestDescribeHDFSStoreFunction(final Cache cache, DistributedMember member) {
+      this.cache = cache;
+      this.member = member;
+    }
+
+    @Override
+    protected Cache getCache() {
+      return this.cache;
+    }
+
+    @Override
+    protected DistributedMember getDistributedMemberId(Cache cache) {
+      return member;
+    }
+  }
+
+  protected static class TestResultSender implements ResultSender {
+
+    private final List<Object> results = new LinkedList<Object>();
+
+    private Throwable t;
+
+    protected List<Object> getResults() throws Throwable {
+      if (t != null) {
+        throw t;
+      }
+      return Collections.unmodifiableList(results);
+    }
+
+    public void lastResult(final Object lastResult) {
+      results.add(lastResult);
+    }
+
+    public void sendResult(final Object oneResult) {
+      results.add(oneResult);
+    }
+
+    public void sendException(final Throwable t) {
+      this.t = t;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
new file mode 100644
index 0000000..08e18ec
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
@@ -0,0 +1,305 @@
+/*=========================================================================
+ * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.logging.log4j.Logger;
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.ResultSender;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest
+;
+
+/**
+ * The DestroyHDFSStoreFunctionJUnitTest test suite class tests the contract and
+ * functionality of the DestroyHDFSStoreFunction class. </p>
+ * 
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
+ * @see com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction
+ * @see org.jmock.Expectations
+ * @see org.jmock.Mockery
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ */
+@SuppressWarnings( { "unused" })
+@Category({IntegrationTest.class, HoplogTest.class})
+public class DestroyHDFSStoreFunctionJUnitTest {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private Mockery mockContext;
+
+  @Before
+  public void setup() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+      }
+    };
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testExecute() throws Throwable {
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+    final HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreImpl");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+    final TestResultSender testResultSender = new TestResultSender();
+    final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).findHDFSStore(hdfsStoreName);
+        will(returnValue(mockHdfsStore));
+        one(mockHdfsStore).destroy();
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(hdfsStoreName));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("Success", result.getMessage());
+
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testExecuteOnMemberHavingNoHDFSStore() throws Throwable {
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final TestResultSender testResultSender = new TestResultSender();
+    final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).findHDFSStore(hdfsStoreName);
+        will(returnValue(null));
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(hdfsStoreName));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("Hdfs store not found on this member", result.getMessage());
+  }
+
+  @Test
+  public void testExecuteOnMemberWithNoCache() throws Throwable {
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String hdfsStoreName = "mockHdfsStore";
+
+    final TestResultSender testResultSender = new TestResultSender();
+    final DestroyHDFSStoreFunction function = new TestDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity) {
+      private static final long serialVersionUID = 1L;
+
+      @Override
+      protected Cache getCache() {
+        throw new CacheClosedException("Expected");
+      }
+    };
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(hdfsStoreName));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals("", result.getMemberIdOrName());
+    assertNull(result.getMessage());
+  }
+
+  @Test
+  public void testExecuteHandleRuntimeException() throws Throwable {
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
+
+    final String hdfsStoreName = "mockHdfsStore";
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+
+    final TestResultSender testResultSender = new TestResultSender();
+    final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(2).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockFunctionContext).getArguments();
+        will(returnValue(hdfsStoreName));
+        oneOf(mockCache).findHDFSStore(hdfsStoreName);
+        will(throwException(new RuntimeException("expected")));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    function.execute(mockFunctionContext);
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final CliFunctionResult result = (CliFunctionResult)results.get(0);
+    assertEquals(memberName, result.getMemberIdOrName());
+    assertEquals("expected", result.getThrowable().getMessage());
+
+  }
+
+  protected TestDestroyHDFSStoreFunction createDestroyHDFSStoreFunction(final Cache cache, DistributedMember member,
+      XmlEntity xml) {
+    return new TestDestroyHDFSStoreFunction(cache, member, xml);
+  }
+
+  protected static class TestDestroyHDFSStoreFunction extends DestroyHDFSStoreFunction {
+    private static final long serialVersionUID = 1L;
+
+    private final Cache cache;
+
+    private final DistributedMember member;
+
+    private final XmlEntity xml;
+
+    public TestDestroyHDFSStoreFunction(final Cache cache, DistributedMember member, XmlEntity xml) {
+      this.cache = cache;
+      this.member = member;
+      this.xml = xml;
+    }
+
+    @Override
+    protected Cache getCache() {
+      return this.cache;
+    }
+
+    @Override
+    protected DistributedMember getDistributedMember(Cache cache) {
+      return member;
+    }
+
+    @Override
+    protected XmlEntity getXMLEntity(String storeName) {
+      return xml;
+    }
+
+  }
+
+  protected static class TestResultSender implements ResultSender {
+
+    private final List<Object> results = new LinkedList<Object>();
+
+    private Throwable t;
+
+    protected List<Object> getResults() throws Throwable {
+      if (t != null) {
+        throw t;
+      }
+      return Collections.unmodifiableList(results);
+    }
+
+    public void lastResult(final Object lastResult) {
+      results.add(lastResult);
+    }
+
+    public void sendResult(final Object oneResult) {
+      results.add(oneResult);
+    }
+
+    public void sendException(final Throwable t) {
+      this.t = t;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
new file mode 100644
index 0000000..11bc430
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
@@ -0,0 +1,319 @@
+/*=========================================================================
+ * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+
+package com.gemstone.gemfire.management.internal.cli.functions;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.ResultSender;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+/**
+ * The ListHDFSStoreFunctionJUnitTest test suite class tests the contract and functionality of the
+ * ListHDFSStoreFunction.
+ * </p>
+ * @author Namrata Thanvi
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
+ * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
+ * @see com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction
+ * @see org.jmock.Expectations
+ * @see org.jmock.Mockery
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ */
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class ListHDFSStoresFunctionJUnitTest {
+  private Mockery mockContext;
+
+  @Before
+  public void setup() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+      }
+    };
+  }
+
+  @After
+  public void tearDown() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  
+  @Test
+  public void testExecute() throws Throwable {
+    final String memberId = "mockMemberId";
+    final String memberName = "mockMemberName";
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+    final TestResultSender testResultSender = new TestResultSender();
+
+    final HDFSStoreImpl mockHdfsStoreOne = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreOne");
+    final HDFSStoreImpl mockHdfsStoreTwo = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreTwo");
+    final HDFSStoreImpl mockHdfsStoreThree = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreThree");
+
+    final List<HDFSStoreImpl> mockHdfsStores = new ArrayList<HDFSStoreImpl>();
+
+    mockHdfsStores.add(mockHdfsStoreOne);
+    mockHdfsStores.add(mockHdfsStoreTwo);
+    mockHdfsStores.add(mockHdfsStoreThree);
+
+    final List<String> storeNames = new ArrayList<String>();
+    storeNames.add("hdfsStoreOne");
+    storeNames.add("hdfsStoreTwo");
+    storeNames.add("hdfsStoreThree");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).getHDFSStores();
+        will(returnValue(mockHdfsStores));
+        exactly(3).of(mockMember).getId();
+        will(returnValue(memberId));
+        exactly(3).of(mockMember).getName();
+        will(returnValue(memberName));
+        oneOf(mockHdfsStoreOne).getName();
+        will(returnValue(storeNames.get(0)));       
+        oneOf(mockHdfsStoreTwo).getName();
+        will(returnValue(storeNames.get(1)));        
+        oneOf(mockHdfsStoreThree).getName();
+        will(returnValue(storeNames.get(2)));        
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final Set<HdfsStoreDetails> listHdfsStoreFunctionresults = (Set<HdfsStoreDetails>)results.get(0);
+
+    assertNotNull(listHdfsStoreFunctionresults);
+    assertEquals(3, listHdfsStoreFunctionresults.size());
+
+    Collections.sort(storeNames);
+
+    for (HdfsStoreDetails listHdfsStoreFunctionresult : listHdfsStoreFunctionresults) {
+      assertTrue(storeNames.contains(listHdfsStoreFunctionresult.getStoreName()));
+      assertTrue(storeNames.remove(listHdfsStoreFunctionresult.getStoreName()));
+      assertEquals(memberId, listHdfsStoreFunctionresult.getMemberId());
+      assertEquals(memberName, listHdfsStoreFunctionresult.getMemberName());
+    }
+  }
+  
+  
+  @Test(expected = CacheClosedException.class)
+  public void testExecuteOnMemberWithNoCache() throws Throwable {
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final TestListHDFSStoresFunction testListHdfsStoresFunction = 
+          new TestListHDFSStoresFunction(mockContext.mock(Cache.class, "MockCache"), mockMember) {
+      @Override protected Cache getCache() {
+        throw new CacheClosedException("Expected");
+      }
+    };
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {{
+      oneOf(mockFunctionContext).getResultSender();
+      will(returnValue(testResultSender));
+    }});
+
+    testListHdfsStoresFunction.execute(mockFunctionContext);
+
+    try {
+      testResultSender.getResults();
+    }
+    catch (CacheClosedException expected) {
+      assertEquals("Expected", expected.getMessage());
+      throw expected;
+    }
+  }  
+  
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testExecuteOnMemberHavingNoHDFSStores() throws Throwable {
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {{
+      oneOf(mockCache).getHDFSStores();
+      will(returnValue(Collections.emptyList()));
+      oneOf(mockFunctionContext).getResultSender();
+      will(returnValue(testResultSender));
+    }});
+
+    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final Set<HdfsStoreDetails> hdfsStoreDetails = (Set<HdfsStoreDetails>) results.get(0);
+
+    assertNotNull(hdfsStoreDetails);
+    assertTrue(hdfsStoreDetails.isEmpty());
+  }
+  
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testExecuteOnMemberWithANonGemFireCache() throws Throwable {
+    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, null);
+
+    function.execute(mockFunctionContext);
+
+    final List<?> results = testResultSender.getResults();
+
+    assertNotNull(results);
+    assertEquals(1, results.size());
+
+    final Set<HdfsStoreDetails> hdfsStoreDetails = (Set<HdfsStoreDetails>)results.get(0);
+
+    assertNotNull(hdfsStoreDetails);
+    assertTrue(hdfsStoreDetails.isEmpty());
+  }
+  
+  
+  @Test(expected = RuntimeException.class)
+  public void testExecuteThrowsRuntimeException() throws Throwable {
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
+
+    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
+
+    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
+
+    final TestResultSender testResultSender = new TestResultSender();
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).getHDFSStores();
+        will(throwException(new RuntimeException("expected")));
+        oneOf(mockFunctionContext).getResultSender();
+        will(returnValue(testResultSender));
+      }
+    });
+
+    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
+
+    function.execute(mockFunctionContext);
+
+    try {
+      testResultSender.getResults();
+    } catch (Throwable throwable) {
+      assertTrue(throwable instanceof RuntimeException);
+      assertEquals("expected", throwable.getMessage());
+      throw throwable;
+    }
+  }
+  
+  protected ListHDFSStoresFunction createListHDFSStoresFunction(final Cache cache, DistributedMember member) {
+    return new TestListHDFSStoresFunction(cache, member);
+  }
+    
+  protected static class TestListHDFSStoresFunction extends ListHDFSStoresFunction {
+    private static final long serialVersionUID = 1L;
+
+    private final Cache cache;
+
+    DistributedMember member;
+
+    @Override
+    protected DistributedMember getDistributedMemberId(Cache cache) {
+      return member;
+    }
+
+    public TestListHDFSStoresFunction(final Cache cache, DistributedMember member) {
+      assert cache != null: "The Cache cannot be null!";
+      this.cache = cache;
+      this.member = member;
+    }
+
+    @Override
+    protected Cache getCache() {
+      return cache;
+    }
+  }
+
+  protected static class TestResultSender implements ResultSender {
+
+    private final List<Object> results = new LinkedList<Object>();
+
+    private Throwable t;
+
+    protected List<Object> getResults() throws Throwable {
+      if (t != null) {
+        throw t;
+      }
+      return Collections.unmodifiableList(results);
+    }
+
+    public void lastResult(final Object lastResult) {
+      results.add(lastResult);
+    }
+
+    public void sendResult(final Object oneResult) {
+      results.add(oneResult);
+    }
+
+    public void sendException(final Throwable t) {
+      this.t = t;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
index 57d1c7e..dca5d0b 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.java
@@ -120,6 +120,7 @@ public class CacheElementJUnitTest {
     assertEntry("cache-server", order++, entries.next());
     assertEntry("pool", order++, entries.next());
     assertEntry("disk-store", order++, entries.next());
+    assertEntry("hdfs-store", order++, entries.next());
     assertEntry("pdx", order++, entries.next());
     assertEntry("region-attributes", order++, entries.next());
     assertEntry("jndi-bindings", order++, entries.next());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java b/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
index 8678095..c06f4db 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/test/dunit/internal/JUnit4DistributedTestCase.java
@@ -35,6 +35,7 @@ import org.junit.Rule;
 import com.gemstone.gemfire.admin.internal.AdminDistributedSystemImpl;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
 import com.gemstone.gemfire.cache.query.QueryTestUtils;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
@@ -414,6 +415,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     assertNotNull("defaultDiskStoreName must not be null", defaultDiskStoreName);
     setTestMethodName(methodName);
     GemFireCacheImpl.setDefaultDiskStoreName(defaultDiskStoreName);
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
     setUpCreationStackGenerator();
   }
 
@@ -566,6 +568,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
 
     // clear system properties -- keep alphabetized
     System.clearProperty("gemfire.log-level");
+    System.clearProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP);
     System.clearProperty("jgroups.resolve_dns");
 
     if (InternalDistributedSystem.systemAttemptingReconnect != null) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java b/geode-core/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
new file mode 100644
index 0000000..08987a5
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
@@ -0,0 +1,7 @@
+package com.gemstone.gemfire.test.junit.categories;
+/**
+ * JUnit Test Category that specifies a test with very narrow and well defined
+ * scope. Any complex dependencies and interactions are stubbed or mocked.
+ */
+public interface HoplogTest {
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
index fe21fbf..0dda2e6 100644
--- a/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
@@ -78,6 +78,46 @@ com/gemstone/gemfire/cache/client/internal/locator/ServerLocationRequest,2
 fromData,9,2a2bb80003b50002b1
 toData,9,2ab400022bb80004b1
 
+com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl,2
+fromData,17,2a2bb7001b2a2bb8001cc0001db50009b1
+toData,14,2a2bb700172ab400092bb80018b1
+
+com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl,2
+fromData,104,2a2bb900110100b80012b500042a2bb900110100b500072a04b7000a99000e2a2bb80013b50005a700402a05b7000a9900302bb800134d2cc7000b2a01b50005a7001cb8001499000e2a2cb80015b50005a7000b2a2cb80016b50005a7000b2a2bb80017b50005b1
+toData,107,2b2ab40004b40008b9000902002b2ab40007b9000902002a04b7000a9900142ab40005c0000bc0000b2bb8000ca7003d2a05b7000a99002d2ab40005c1000d9900182ab40005c0000d4d2cb9000e01002bb8000fa700162ab400052bb8000fa7000b2ab400052bb80010b1
+
+com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent,2
+fromData,14,2a2bb7000d2a2bb8000eb5000ab1
+toData,14,2a2bb7000b2ab4000a2bb8000cb1
+
+com/gemstone/gemfire/cache/hdfs/internal/SortedHoplogPersistedEvent,2
+fromData,37,2a2bb700092ab6000a9900112a2bb8000bc0000cb50002a7000d2a2bb9000d0100b50003b1
+toData,34,2a2bb700062ab40002c700102b2ab40003b900070300a7000b2ab400022bb80008b1
+
+com/gemstone/gemfire/cache/hdfs/internal/UnsortedHDFSQueuePersistedEvent,2
+fromData,14,2a2bb7000c2a2bb8000db50008b1
+toData,14,2a2bb7000a2ab400082bb8000bb1
+
+com/gemstone/gemfire/cache/hdfs/internal/UnsortedHoplogPersistedEvent,2
+fromData,17,2a2bb700072a2bb80008b60009b50003b1
+toData,17,2a2bb700042ab40003b800052bb80006b1
+
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/CompactionStatus,2
+fromData,21,2a2bb900060100b500022a2bb900070100b50003b1
+toData,21,2b2ab40002b9000402002b2ab40003b900050200b1
+
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/FlushStatus,2
+fromData,11,2a2bb900060100b50004b1
+toData,11,2b2ab40004b900050200b1
+
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSFlushQueueArgs,2
+fromData,19,2a2bb80008b500042a2bb900090100b50005b1
+toData,19,2ab400042bb800062b2ab40005b900070300b1
+
+com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSForceCompactionArgs,2
+fromData,29,2a2bb8000bb500042a2bb9000c0100b500052a2bb9000d0100b50007b1
+toData,29,2ab400042bb800082b2ab40005b9000902002b2ab40007b9000a0200b1
+
 com/gemstone/gemfire/cache/query/internal/CqEntry,2
 fromData,17,2a2bb80009b500022a2bb80009b50003b1
 toData,17,2ab400022bb8000b2ab400032bb8000bb1
@@ -325,8 +365,8 @@ fromData,22,2a2bb900130100b500022a2bb80014c00015b50003b1
 toData,19,2b2ab40002b9001102002ab400032bb80012b1
 
 com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage,2
-fromData,49,2a2bb700112a2bb900120100b500072ab800132bb90012010032b500052a2bb80014c00015b500022a2bb80014b50006b1
-toData,45,2a2bb7000d2b2ab40007b9000e02002b2ab40005b6000fb9000e02002ab400022bb800102ab400062bb80010b1
+fromData,49,2a2bb700122a2bb900130100b500072ab800142bb90013010032b500042a2bb80015c00016b500022a2bb80015b50005b1
+toData,45,2a2bb7000e2b2ab40007b9000f02002b2ab40004b60010b9000f02002ab400022bb800112ab400052bb80011b1
 
 com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinRequestMessage,2
 fromData,38,2a2bb80019c0001ab500042a2bb80019b500052a2bb8001bb500022a2bb9001c0100b6001db1
@@ -707,8 +747,8 @@ fromData,27,2a2bb80013b500042a2bb80013b500062a2bb900140100b50008b1
 toData,27,2ab400042bb800112ab400062bb800112b2ab40008b900120200b1
 
 com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes,2
-fromData,404,2a2bb80080b500082a2bb80080b5000a2a2bb80081b5000d2a2bb80080b5007a2a2bb80082c00083b5000f2a2bb80082c00083b500112a2bb80082c00084b500132a2bb80082c00084b500152a2bb80082c00084b500172a2bb80080b500192a2bb80082c00084b5001b2a2bb80080b5001d2a2bb80082c00085b5001f2a2bb80082c00086b500212a2bb900870100b500232a2bb900870100b500252a2bb900880100b500272a2bb900890100b5002b2a2bb900880100b5002d2a2bb900870100b5002f2a2bb900870100b500312a2bb900870100b500332a2bb900870100b500352a2bb900870100b500372a2bb80082c0008ab5003b2a2bb80082c0008bc0008bb5003d2a2bb80082c0008cc0008cb5003f2a2bb900870100b5007f2a2bb80082c0008db500412a2bb80082c0008eb500432a2bb80082c0008fb500452a2bb80082c00002b500042a2bb900870100b500472a2bb80080b500392a2bb900870100b5004b2a2bb80081b5004e2a2bb900870100b500052a2bb900870100b500292a2bb80080b500522a2bb900870100b50054b1
-toData,353,2ab400082bb800782ab4000a2bb800782ab4000d2bb800792ab4007a2bb800782ab4000f2bb8007b2ab400112bb8007b2ab400132bb8007b2ab400152bb8007b2ab400172bb8007b2ab400192bb800782ab4001b2bb8007b2ab4001d2bb800782ab4001f2bb8007b2ab400212bb8007b2b2ab40023b9007c02002b2ab40025b9007c02002b2ab40027b9007d02002b2ab4002bb9007e02002b2ab4002db9007d02002b2ab4002fb9007c02002b2ab40031b9007c02002b2ab40033b9007c02002b2ab40035b9007c02002b2ab40037b9007c02002ab4003b2bb8007b2ab4003d2bb8007b2ab4003f2bb8007b2b2ab4007fb9007c02002ab400412bb8007b2ab400432bb8007b2ab400452bb8007b2ab400042bb8007b2b2ab40047b9007c02002ab400392bb800782b2ab4004bb9007c02002ab4004e2bb800792b2ab40005b9007c02002b2ab40029b9007c02002ab400522bb800782b2ab40054b9007c0200b1
+fromData,412,2a2bb80084b500082a2bb80084b5000a2a2bb80085b5000d2a2bb80084b5007e2a2bb80086c00087b5000f2a2bb80086c00087b500112a2bb80086c00088b500132a2bb80086c00088b500152a2bb80086c00088b500172a2bb80084b500192a2bb80086c00088b5001b2a2bb80084b5001d2a2bb80086c00089b5001f2a2bb80086c0008ab500212a2bb9008b0100b500232a2bb9008b0100b500252a2bb9008c0100b500272a2bb9008d0100b5002b2a2bb9008c0100b5002d2a2bb9008b0100b5002f2a2bb9008b0100b500312a2bb9008b0100b500332a2bb9008b0100b500352a2bb9008b0100b500372a2bb80086c0008eb5003b2a2bb80086c0008fc0008fb5003d2a2bb80086c00090c00090b5003f2a2bb9008b0100b500832a2bb80086c00091b500412a2bb80086c00092b500432a2bb80086c00093b500452a2bb80086c00002b500042a2bb9008b0100b500472a2bb80084b500392a2bb9008b0100b5004b2a2bb80085b5004e2a2bb9008b0100b500052a2bb9008b0100b500292a2bb80084b500562a2bb9008b0100b500582a2bb80084b50052b1
+toData,361,2ab400082bb8007c2ab4000a2bb8007c2ab4000d2bb8007d2ab4007e2bb8007c2ab4000f2bb8007f2ab400112bb8007f2ab400132bb8007f2ab400152bb8007f2ab400172bb8007f2ab400192bb8007c2ab4001b2bb8007f2ab4001d2bb8007c2ab4001f2bb8007f2ab400212bb8007f2b2ab40023b9008002002b2ab40025b9008002002b2ab40027b9008102002b2ab4002bb9008202002b2ab4002db9008102002b2ab4002fb9008002002b2ab40031b9008002002b2ab40033b9008002002b2ab40035b9008002002b2ab40037b9008002002ab4003b2bb8007f2ab4003d2bb8007f2ab4003f2bb8007f2b2ab40083b9008002002ab400412bb8007f2ab400432bb8007f2ab400452bb8007f2ab400042bb8007f2b2ab40047b9008002002ab400392bb8007c2b2ab4004bb9008002002ab4004e2bb8007d2b2ab40005b9008002002b2ab40029b9008002002ab400562bb8007c2b2ab40058b9008002002ab400522bb8007cb1
 
 com/gemstone/gemfire/internal/admin/remote/RemoteRegionSnapshot,2
 fromData,59,2a2bb80029b500032a2bb8002ac00009b5000c2a2bb8002ac00005b500072a2bb9002b0100b500102a2bb9002b0100b500122a2bb8002ab5001cb1
@@ -843,8 +883,8 @@ fromData,1,b1
 toData,1,b1
 
 com/gemstone/gemfire/internal/cache/AbstractRegion,2
-toData,6,2a2bb80176b1
-fromData,8,bb017759b70178bf
+toData,6,2a2bb80184b1
+fromData,8,bb018559b70186bf
 
 com/gemstone/gemfire/internal/cache/AbstractUpdateOperation$AbstractUpdateMessage,2
 fromData,16,2a2bb700192a2bb9001a0100b5000db1
@@ -947,8 +987,8 @@ fromData,14,2a2bb7001a2a2bb8001bb50004b1
 toData,14,2a2bb700182ab400042bb80019b1
 
 com/gemstone/gemfire/internal/cache/DistributedCacheOperation$CacheOperationMessage,2
-fromData,318,2bb9009501003d2bb9009501003e2a1cb500962a1c2bb600972a2bb80098b500232a2bb900990100b8009ab500092a1c1100807e99000704a7000403b500042a1c10087e99000704a7000403b500581c1102007e99000b2a2bb8009bb500882a1c1104007e99000704a7000403b500072a1c10407e99000704a7000403b5001d2ab4001d9900382bb900990100360415049a000b2a03b5001ea7001b150404a0000b2a04b5001ea7000dbb009c59129db7009ebf2a2bb8009fb5001f1c1101007e99000704a700040336042a1c1108007e99000704a7000403b500a015049900162abb00a159b700a2b5000e2ab4000e2bb800a31c1110007e99001c1c1120007e99000704a700040336052a15052bb800a4b5000a1d1104007e9900232a04b5000f2ac100a59900172ac000a51d1101007e99000704a7000403b600a6b1
-toData,202,033d033e2a1cb600a73d2a1db600a83e2b1cb900a902002b1db900a902002ab4000d9e000d2b2ab4000db900aa02002ab400232bb800ab2b2ab40009b400acb900ad02002ab40088c6000b2ab400882bb800ae2ab4001d9900542b2ab4001e99000704a7000403b900ad02002ab4001eb800af36042ab4001e9a001f2ab4001fc10020990015013a052ab4001fc00020c000203a06a7000c2ab4001f3a05013a061504190519062bb800b02ab4000ec6000b2ab4000e2bb800b12ab4000ac6000b2ab4000a2bb800b1b1
+fromData,338,2bb9009501003d2bb9009501003e2a1cb500962a1c2bb600972a2bb80098b500232a2bb900990100b8009ab500092a1c1100807e99000704a7000403b500042a1c10087e99000704a7000403b500581c1102007e99000b2a2bb8009bb500882a1c1104007e99000704a7000403b500072a1c10407e99000704a7000403b5001d2ab4001d9900382bb900990100360415049a000b2a03b5001ea7001b150404a0000b2a04b5001ea7000dbb009c59129db7009ebf2a2bb8009fb5001f1c1101007e99000704a700040336042a1c1108007e99000704a7000403b500a015049900162abb00a159b700a2b5000e2ab4000e2bb800a31c1110007e99001c1c1120007e99000704a700040336052a15052bb800a4b5000a1d1104007e9900372a04b5000f2ac100a599002b2ac000a51d1102007e99000704a7000403b600a62ac000a51d1101007e99000704a7000403b600a7b1
+toData,202,033d033e2a1cb600a83d2a1db600a93e2b1cb900aa02002b1db900aa02002ab4000d9e000d2b2ab4000db900ab02002ab400232bb800ac2b2ab40009b400adb900ae02002ab40088c6000b2ab400882bb800af2ab4001d9900542b2ab4001e99000704a7000403b900ae02002ab4001eb800b036042ab4001e9a001f2ab4001fc10020990015013a052ab4001fc00020c000203a06a7000c2ab4001f3a05013a061504190519062bb800b12ab4000ec6000b2ab4000e2bb800b22ab4000ac6000b2ab4000a2bb800b2b1
 
 com/gemstone/gemfire/internal/cache/DistributedClearOperation$ClearRegionMessage,2
 fromData,53,2a2bb700212ab800222bb90023010032b500022a2bb80024c00025b500062a2bb80024c00026b500172a2bb80024c00027b50011b1
@@ -966,8 +1006,8 @@ com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$PutAllEntryData,1
 toData,236,2ab4000a4e2ab4000c3a042d2bb8003d1904c1003e9a00081904c700192b03b9003f02001904c0003ec0003e2bb80040a700341904c1004199001f1904c000413a052b04b9003f02001905b9004201002bb80040a700102b04b9003f020019042bb800432b2ab40012b40044b9003f02002ab4000636052ab40026c6000a150507809136052ab40017c6001d15051008809136052ab40017c1004599000b150510208091360515051080809136052b1505b9003f02002ab40026c6000b2ab400262bb8003d2ab40017c6000b2ab400172bb800462ab6002899000b2ab400142bb800462ab400082bb80047b1
 
 com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$PutAllMessage,2
-fromData,197,2a2bb7003e2a2bb8003fc00040b500072a2bb8004188b500172a2ab40017bd0042b500082ab400179e00722bb800434dbb004459b700454e03360415042ab40017a200202ab400081504bb0042592b2ab4000715042c2db7004653840401a7ffdd2bb9004701003604150499002f2bb800483a0503360615062ab40017a2001d2ab4000815063219051506b60049c0004ab50030840601a7ffe02ab4004b1140007e99000e2a2bb8003fc0004cb5000d2a2ab4004b1180007e99000704a7000403b5001cb1
-toData,181,2a2bb7004d2ab400072bb8004e2ab40017852bb8004f2ab400179e008bbb0050592ab40017b700514d033e2ab400080332b40052c10026360403360515052ab40017a200531d9a00122ab40008150532b40030c60005043e2ab40008150532b400303a062c1906b60053572ab4000815053201b500302ab400081505322b1504b600542ab400081505321906b50030840501a7ffaa2b1db9005502001d9900082c2bb800562ab4000dc6000b2ab4000d2bb8004eb1
+fromData,197,2a2bb700402a2bb80041c00042b500082a2bb8004388b500182a2ab40018bd0044b500092ab400189e00722bb800454dbb004659b700474e03360415042ab40018a200202ab400091504bb0044592b2ab4000815042c2db7004853840401a7ffdd2bb9004901003604150499002f2bb8004a3a0503360615062ab40018a2001d2ab4000915063219051506b6004bc0004cb50032840601a7ffe02ab4004d1140007e99000e2a2bb80041c0004eb5000e2a2ab4004d1180007e99000704a7000403b5001db1
+toData,181,2a2bb7004f2ab400082bb800502ab40018852bb800512ab400189e008bbb0052592ab40018b700534d033e2ab400090332b40054c10028360403360515052ab40018a200531d9a00122ab40009150532b40032c60005043e2ab40009150532b400323a062c1906b60055572ab4000915053201b500322ab400091505322b1504b600562ab400091505321906b50032840501a7ffaa2b1db9005702001d9900082c2bb800582ab4000ec6000b2ab4000e2bb80050b1
 
 com/gemstone/gemfire/internal/cache/DistributedRegionFunctionStreamingMessage,2
 fromData,171,2a2bb700622bb9006301003d1c047e9900142a2bb900640100b500092ab40009b800651c077e99000d2a2bb900640100b500061c057e99000e2a2bb80066c00067b500072bb800664e2dc100689900252a03b5000e2a2dc00068b80069b500082ab40008c7001b2a2dc00068b5004da700102a2dc0006ab500082a04b5000e2a2bb80066c0006bb5000a2a2bb8006cb5000c2a2bb8006db5000b2a1c10407e99000704a7000403b5000db1
@@ -989,8 +1029,8 @@ fromData,17,2a2bb80005b500022a2bb80005b50003b1
 toData,17,2ab400022bb800042ab400032bb80004b1
 
 com/gemstone/gemfire/internal/cache/EntryEventImpl,2
-fromData,214,2a2bb8001bc0001cb5001d2bb8001b4d2bb8001b4e2abb001e592c2d01b7001fb500202a2bb900210100b80022b500232a2bb900240100b500092ab400202bb8001bb600252a2bb8001bc00026b5000a2bb9002701009900112a2bb8001bc00028b50008a700322bb9002701009900212a2bb80029b5002a2a2ab4002ab500062a2ab4002ab8002bb50005a7000b2a2bb8001bb500052bb9002701009900192a2bb80029b5002c2a2ab4002cb8002bb50007a7000b2a2bb8001bb500072a2bb8002db5002e2a2bb8002fb5000b2a2bb80030b50014b1
-toData,312,2ab4001d2bb801602ab6008c2bb801602ab40020b6018d2bb801602b2ab40023b4018eb9018f02002b2ab4000911c03f7eb9019002002ab6004b2bb801602ab4000a2bb801602ab40008c6000704a70004033d2b1cb9019102001c99000e2ab400082bb80160a700682ab600414e2dc100823604150499000e2dc00082b900b8010036042b1504b901910200150499003b2ab4002ac6000e2ab4002a2bb80192a7002e2ab40006c6000e2ab400062bb80192a7001c2dc000823a051905b900c601002bb80193a700082d2bb801602ab700434d2cc100823e1d99000d2cc00082b900b801003e2b1db9019102001d9900292ab4002cc6000e2ab4002c2bb80192a7001c2cc000823a041904b900c601002bb80193a700082c2bb801602ab4002ec001942bb801952ab600582bb801602ab400142bb80196b1
+fromData,214,2a2bb8001dc0001eb5001f2bb8001d4d2bb8001d4e2abb0020592c2d01b70021b500222a2bb900230100b80024b500252a2bb900260100b500082ab400222bb8001db600272a2bb8001dc00028b500092bb9002901009900112a2bb8001dc0002ab50007a700322bb9002901009900212a2bb8002bb5002c2a2ab4002cb500052a2ab4002cb8002db50004a7000b2a2bb8001db500042bb9002901009900192a2bb8002bb5002e2a2ab4002eb8002db50006a7000b2a2bb8001db500062a2bb8002fb500302a2bb80031b5000a2a2bb80032b50016b1
+toData,312,2ab400202bb801632ab6008f2bb801632ab40023b6018c2bb801632b2ab40026b4018db9018e02002b2ab4000911c03f7eb9018f02002ab6004e2bb801632ab4000a2bb801632ab40008c6000704a70004033d2b1cb9019002001c99000e2ab400082bb80163a700682ab600444e2dc100853604150499000e2dc00085b900bb010036042b1504b901900200150499003b2ab4002dc6000e2ab4002d2bb80191a7002e2ab40006c6000e2ab400062bb80191a7001c2dc000853a051905b900c901002bb80192a700082d2bb801632ab700464d2cc100853e1d99000d2cc00085b900bb01003e2b1db9019002001d9900292ab4002fc6000e2ab4002f2bb80191a7001c2cc000853a041904b900c901002bb80192a700082c2bb801632ab40031c001932bb801942ab6005b2bb801632ab400172bb80195b1
 
 com/gemstone/gemfire/internal/cache/EntrySnapshot,2
 fromData,50,2a03b500052bb9004101003d1c9900112abb000759b70042b50004a7000e2abb000359b70043b500042ab400042bb60044b1
@@ -1079,8 +1119,8 @@ fromData,107,2a2bb9001b0100b500032bb9001b01003d2a2bb8001cb500122ab40003b80014990
 toData,125,2b2ab40003b9000f02002ab4000dc6000704a70004033d1c2ab4000dc1001199000705a700040380913d2b1cb9000f02002ab400122bb800132ab40003b800149a00232ab600159a000e2ab400022bb80016a700112ab40002c00017c000172bb800182b2ab40004b9001903002ab4000dc6000b2ab4000d2bb8001ab1
 
 com/gemstone/gemfire/internal/cache/InitialImageOperation$FilterInfoMessage,2
-fromData,230,2a2bb7008c2a2bb8008db500202ab4000403322bb8008db5003d2ab4000403322bb8008db500402ab4000403322bb8008db500422ab4000403322bb8008db500442ab4000403322bb8008db500462ab4000403322bb8008db500482ab4000403322bb8008db5004a2ab4000403322bb8008db5004c2ab4000404322bb8008db5003d2ab4000404322bb8008db500402ab4000404322bb8008db500422ab4000404322bb8008db500442ab4000404322bb8008db500462ab4000404322bb8008db500482ab4000404322bb8008db5004a2ab4000404322bb8008db5004c2a2bb8008db50033b1
-toData,284,2a2bb7008a2ab40020c000312bb8008b2ab400040332b4003dc000312bb8008b2ab400040332b40040c000312bb8008b2ab400040332b40042c000312bb8008b2ab400040332b40044c000312bb8008b2ab400040332b40046c000312bb8008b2ab400040332b40048c000312bb8008b2ab400040332b4004ac000312bb8008b2ab400040332b4004cc000312bb8008b2ab400040432b4003dc000312bb8008b2ab400040432b40040c000312bb8008b2ab400040432b40042c000312bb8008b2ab400040432b40044c000312bb8008b2ab400040432b40046c000312bb8008b2ab400040432b40048c000312bb8008b2ab400040432b4004ac000312bb8008b2ab400040432b4004cc000312bb8008b2ab40033c000312bb8008bb1
+fromData,230,2a2bb7008b2a2bb8008cb500202ab4000403322bb8008cb5003d2ab4000403322bb8008cb500402ab4000403322bb8008cb500422ab4000403322bb8008cb500442ab4000403322bb8008cb500462ab4000403322bb8008cb500482ab4000403322bb8008cb5004a2ab4000403322bb8008cb5004c2ab4000404322bb8008cb5003d2ab4000404322bb8008cb500402ab4000404322bb8008cb500422ab4000404322bb8008cb500442ab4000404322bb8008cb500462ab4000404322bb8008cb500482ab4000404322bb8008cb5004a2ab4000404322bb8008cb5004c2a2bb8008cb50033b1
+toData,284,2a2bb700892ab40020c000312bb8008a2ab400040332b4003dc000312bb8008a2ab400040332b40040c000312bb8008a2ab400040332b40042c000312bb8008a2ab400040332b40044c000312bb8008a2ab400040332b40046c000312bb8008a2ab400040332b40048c000312bb8008a2ab400040332b4004ac000312bb8008a2ab400040332b4004cc000312bb8008a2ab400040432b4003dc000312bb8008a2ab400040432b40040c000312bb8008a2ab400040432b40042c000312bb8008a2ab400040432b40044c000312bb8008a2ab400040432b40046c000312bb8008a2ab400040432b40048c000312bb8008a2ab400040432b4004ac000312bb8008a2ab400040432b4004cc000312bb8008a2ab40033c000312bb8008ab1
 
 com/gemstone/gemfire/internal/cache/InitialImageOperation$ImageReplyMessage,2
 fromData,224,2a2bb7001d2bb8001e4d014e2cc600102cb6001f9e00092c03b600204e2dc1002199000e2a2dc00022b50010a700082a2cb500102a2bb900230100b500112a2bb900230100b500122a2bb900230100b500132a2bb900240100b500142a2bb900230100b500152a2bb80025b500042a2bb900240100b500032a2bb900240100b500162ab4001699000f2abb0026592bb70027b500022bb900280100360415049b00102abb0029591504b7002ab5000103360515051504a200292bb8002bc0002c3a062bb8002d37072ab4000119061607b8002eb9002f030057840501a7ffd6b1
@@ -1194,8 +1234,8 @@ fromData,16,2a2bb700152a2bb900160100b50003b1
 toData,16,2a2bb700172b2ab40003b900180200b1
 
 com/gemstone/gemfire/internal/cache/RemoteDestroyMessage,2
-fromData,131,2a2bb7008b2a2bb8008cb7008d2a2bb8008cb5000c2a2bb9008e0100b8008fb5000e2ab400901102007e99000e2a2bb8008cc00091b500102ab400901104007e99000e2a2bb8008cc00034b500662a2bb8008cc00092b500122ab400059900122bb9008e0100572a2bb80093b700222a2bb8008cb500082a2bb8008cc00094b50017b1
-toData,135,2a2bb700952ab6006c2bb800962ab4000c2bb800962b2ab4000eb40097b9009802002ab40010c6000b2ab400102bb800962ab40066c6000b2ab400662bb800962ab400122bb800962ab4000599002a2b2ab4000699000704a7000403b9009802002ab40006b800993d1c2ab7009a2ab600702bb8009b2ab400082bb800962ab400172bb80096b1
+fromData,131,2a2bb7008c2a2bb8008db7008e2a2bb8008db5000c2a2bb9008f0100b80090b5000e2ab400911102007e99000e2a2bb8008dc00092b500102ab400911104007e99000e2a2bb8008dc00035b500672a2bb8008dc00093b500122ab400059900122bb9008f0100572a2bb80094b700232a2bb8008db500082a2bb8008dc00095b50017b1
+toData,135,2a2bb700962ab6006d2bb800972ab4000c2bb800972b2ab4000eb40098b9009902002ab40010c6000b2ab400102bb800972ab40067c6000b2ab400672bb800972ab400122bb800972ab4000599002a2b2ab4000699000704a7000403b9009902002ab40006b8009a3d1c2ab7009b2ab600712bb8009c2ab400082bb800972ab400172bb80097b1
 
 com/gemstone/gemfire/internal/cache/RemoteDestroyMessage$DestroyReplyMessage,2
 fromData,52,2a2bb700232bb9002401003d1c047e99000704a70004033e1c057e99000704a700040336041d99000d2a15042bb80025b50009b1
@@ -1624,8 +1664,8 @@ fromData,36,2a2bb700252a2bb900260100b500022a2bb900260100b500032a2bb900260100b500
 toData,36,2a2bb700272b2ab40002b9002802002b2ab40003b9002802002b2ab40004b900280200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/GetMessage,2
-fromData,43,2a2bb700542a2bb80055b500052a2bb80055b500062a2bb80055c00056b500072a2bb900570100b50008b1
-toData,40,2a2bb700582ab400052bb800592ab400062bb800592ab400072bb800592b2ab40008b9005a0200b1
+fromData,43,2a2bb700552a2bb80056b500052a2bb80056b500062a2bb80056c00057b500072a2bb900580100b50008b1
+toData,40,2a2bb700592ab400052bb8005a2ab400062bb8005a2ab400072bb8005a2b2ab40008b9005b0200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/GetMessage$GetReplyMessage,2
 fromData,77,2a2bb7002a2bb9002b01003d1c10087e99000704a7000403593e9900091c10f77e913d2a1cb500072a2bb8002cb5002d1c049f000b2a2bb8002eb5002f1d99000e2a2bb80030c00031b5000ab1
@@ -1712,8 +1752,8 @@ fromData,49,2a2bb700392a2bb8003ab500062a2bb9003b0100b8003cb500082a2bb8003ac0003d
 toData,43,2a2bb7003f2ab700122bb800402b2ab40008b40041b9004202002ab4000a2bb800402ab4000c2bb80040b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PartitionMessage,2
-fromData,58,2a2bb700742a2bb900750100b500052a2ab400052bb600762a2bb900770100b5000e2bb80078b20079b6007a9b000d2a2bb9007b0100b5000ab1
-toData,104,2a2bb7007f033d2a1cb600803d2b1cb9008102002ab4001099000d2b2ab40010b9008202002ab40008029f000d2b2ab40008b9008202002ab40006c6000b2ab400062bb800832b2ab4000eb9008202002bb80084b20079b6007a9b000d2b2ab4000ab900850200b1
+fromData,58,2a2bb700732a2bb900740100b500052a2ab400052bb600752a2bb900760100b5000e2bb80077b20078b600799b000d2a2bb9007a0100b5000ab1
+toData,104,2a2bb7007e033d2a1cb6007f3d2b1cb9008002002ab4001099000d2b2ab40010b9008102002ab40008029f000d2b2ab40008b9008102002ab40006c6000b2ab400062bb800822b2ab4000eb9008102002bb80083b20078b600799b000d2b2ab4000ab900840200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionFunctionStreamingMessage,2
 fromData,17,2a2bb7003c2a2bb8003dc0003eb50003b1
@@ -1728,16 +1768,16 @@ fromData,16,2a2bb700092a2bb9000a0100b50007b1
 toData,16,2a2bb7000b2b2ab40007b9000c0200b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage,2
-fromData,183,2a2bb7003f2a2bb8004088b80009b5000a2ab400411110007e99000e2a2bb80042c00043b5003d2a2bb80042b500102a2bb8004488b500052a2ab40005bd000bb5000c2ab400059e006f2bb800454dbb004659b700474e03360415042ab40005a2001d2ab4000c1504bb000b592b0115042c2db7004853840401a7ffe02bb9004901003604150499002f2bb8004a3a0503360615062ab40005a2001d2ab4000c15063219051506b6004bc0004cb5004d840601a7ffe0b1
-toData,210,2a2bb7004e2ab4000ac7000d14004f2bb80051a7000f2ab4000ab60052852bb800512ab4003dc6000b2ab4003d2bb800532ab400102bb800532ab40005852bb800542ab400059e008bbb0055592ab40005b700564d033e2ab4000c0332b60022c10057360403360515052ab40005a200531d9a00122ab4000c150532b4004dc60005043e2ab4000c150532b4004d3a062c1906b60058572ab4000c15053201b5004d2ab4000c1505322b1504b600592ab4000c1505321906b5004d840501a7ffaa2b1db9005a02001d9900082c2bb8005bb1
+fromData,183,2a2bb700402a2bb8004188b80009b5000a2ab400421110007e99000e2a2bb80043c00044b5003e2a2bb80043b500102a2bb8004588b500052a2ab40005bd000bb5000c2ab400059e006f2bb800464dbb004759b700484e03360415042ab40005a2001d2ab4000c1504bb000b592b0115042c2db7004953840401a7ffe02bb9004a01003604150499002f2bb8004b3a0503360615062ab40005a2001d2ab4000c15063219051506b6004cc0004db5004e840601a7ffe0b1
+toData,210,2a2bb7004f2ab4000ac7000d1400502bb80052a7000f2ab4000ab60053852bb800522ab4003ec6000b2ab4003e2bb800542ab400102bb800542ab40005852bb800552ab400059e008bbb0056592ab40005b700574d033e2ab4000c0332b60023c10058360403360515052ab40005a200531d9a00122ab4000c150532b4004ec60005043e2ab4000c150532b4004e3a062c1906b60059572ab4000c15053201b5004e2ab4000c1505322b1504b6005a2ab4000c1505321906b5004e840501a7ffaa2b1db9005b02001d9900082c2bb8005cb1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutAllPRMessage$PutAllReplyMessage,2
 fromData,27,2a2bb7001b2a2bb9001c0100b500032a2bb8001dc0001eb50002b1
 toData,24,2a2bb7001f2b2ab40003b9002002002ab400022bb80021b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutMessage,2
-fromData,260,2a2bb7005d2bb9005e01003d2a2bb8005fb600602a2bb8005fb500172a2bb900610100b500182a2bb900620100b80063b500191cb200647e99000b2a2bb80065b5001a1cb200667e99000e2a2bb8005fc00067b5001c2abb006859b70069b5001d2ab4001d2bb8006a2ab4006b1120007e99000b2a2bb8005fb500222ab4006c9900162abb006d59b7006eb500282ab400282bb8006a2a1cb2006f7e91b500072ab4000999000e2a2bb80070b5000da7002e2ab4000704a0000e2a2bb8005fb70071a7000b2a2bb80070b700721cb200737e99000b2a2bb80070b5000d2ab4006b1140007e99000e2a2bb8005fc00074b500252ab4006b1180007e9900082a04b50075b1
-toData,358,014d2ab40021b60076b9007701003e2ab4000cb60078c600161d9900122ab4000b99000b2a04b50009a700082a03b50009a7000d4ebb007a592db7007bbf2a2bb7007c2ab400073e2ab4001ac600091db20064803e2ab400079900282ab40011c7000a2ab6007dc6001a2ab4007e9900132ab4000cb60078c600091db20073803e2ab4001cc600091db20066803e2b1db9007f02002ab600802bb800812ab600822bb800812b2ab40018b9008303002b2ab40019b40084b9007f02002ab4001ac6000b2ab4001a2bb800812ab4001cc6000b2ab4001c2bb800812ab4001d2bb800852ab40022c6000b2ab400222bb800812ab4006c99000b2ab400282bb800852ab4000999002f2ab40086b800874da7000f3a04bb008959128ab7008bbf2ab4000cb600782bb8008c2cb6008db6008ea700262ab400072ab400112ab6007d2bb8008f1db200737e99000e2ab4000cb600782bb8008c2ab40025c6000b2ab400252bb80081b1
+fromData,273,2a2bb7005e2bb9005f01003d2a2bb80060b600612a2bb80060b500182a2bb900620100b500192a2bb900630100b80064b5001a1cb200657e99000b2a2bb80066b5001b1cb200677e99000e2a2bb80060c00068b5001d1cb200697e9900082a04b5006a2abb006b59b7006cb5001e2ab4001e2bb8006d2ab4006e1120007e99000b2a2bb80060b500232ab4006f9900162abb007059b70071b500292ab400292bb8006d2a1cb200727e91b500072ab4000999000e2a2bb80073b5000da7002e2ab4000704a0000e2a2bb80060b70074a7000b2a2bb80073b700751cb200767e99000b2a2bb80073b5000d2ab4006e1140007e99000e2a2bb80060c00077b500262ab4006e1180007e9900082a04b50078b1
+toData,374,014d2ab40022b60079b9007a01003e2ab4000cb6007bc600161d9900122ab4000b99000b2a04b50009a700082a03b50009a7000d4ebb007d592db7007ebf2a2bb7007f2ab400073e2ab4001bc600091db20065803e2ab400079900282ab40011c7000a2ab60080c6001a2ab400819900132ab4000cb6007bc600091db20076803e2ab4001dc600091db20067803e2ab4000cb600829900091db20069803e2b1db9008302002ab600842bb800852ab600862bb800852b2ab40019b9008703002b2ab4001ab40088b9008302002ab4001bc6000b2ab4001b2bb800852ab4001dc6000b2ab4001d2bb800852ab4001e2bb800892ab40023c6000b2ab400232bb800852ab4006f99000b2ab400292bb800892ab4000999002f2ab4008ab8008b4da7000f3a04bb008d59128eb7008fbf2ab4000cb6007b2bb800902cb60091b60092a700262ab400072ab400112ab600802bb800931db200767e99000e2ab4000cb6007b2bb800902ab40026c6000b2ab400262bb80085b1
 
 com/gemstone/gemfire/internal/cache/partitioned/PutMessage$PutReplyMessage,2
 fromData,48,2a2bb700252a2bb900260100b500032a2bb900270100b80028b500022a2bb80029b500062a2bb80029c0002ab50007b1
@@ -1989,8 +2029,8 @@ fromData,67,2a2bb700192a2bb8001ab60018b500032abb000759b7000bb500062bb9001b01003d
 toData,91,2a2bb700122ab40003b800132bb800142ab40006c6003f2b2ab40006b900150100b9001602002ab40006b9001701004d2cb9000d010099001a2cb9000e0100c0000f4e2b2db60018b900160200a7ffe3a7000a2b03b900160200b1
 
 com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl,2
-fromData,183,2bb9007301003d1c10119f00032a04b5002a2a2bb900740100b500272a2bb900740100b500281c1011a200232bc1007599001c2bb80076b20077a60012bb0078592bc00075b20079b7007a4c2a2bb8007bc0007cb500292a2bb8007db500102a2bb9007e0100b5002d2a2bb6007f2a2bb80080b5002f2a2bb8007bc0001fb500202a2bb900810100b500132a2bb900820100b500172a2bb900740100b500092a2bb900820100b80004b500052a2bb900820100b5001ab1
-toData,133,2ab600262b1011b9006802002b2ab40027b9006902002b2ab40028b9006902002ab400292bb8006a2ab400102bb8006b2b2ab4002db9006c02002a2bb6006d2ab6002e2bb8006e2ab400202bb8006a2b2ab40013b9006f02002b2ab40017b9007003002b2ab40009b9006902002b2ab40005b60071b9007003002b2ab60072b900700300b1
+fromData,183,2bb9006d01003d1c10119f00032a04b500282a2bb9006e0100b500252a2bb9006e0100b500261c1011a200232bc1006f99001c2bb80070b20071a60012bb0072592bc0006fb20073b700744c2a2bb80075c00076b500272a2bb80077b5000e2a2bb900780100b5002b2a2bb600792a2bb8007ab5002d2a2bb80075c0001db5001e2a2bb9007b0100b500112a2bb9007c0100b500152a2bb9006e0100b500072a2bb9007c0100b80004b500052a2bb9007c0100b50018b1
+toData,133,2ab600242b1011b9006202002b2ab40025b9006302002b2ab40026b9006302002ab400272bb800642ab4000e2bb800652b2ab4002bb9006602002a2bb600672ab6002c2bb800682ab4001e2bb800642b2ab40011b9006902002b2ab40015b9006a03002b2ab40007b9006302002b2ab40005b6006bb9006a03002b2ab6006cb9006a0300b1
 
 com/gemstone/gemfire/internal/cache/wan/parallel/ParallelQueueBatchRemovalMessage,2
 fromData,17,2a2bb7003a2a2bb8003bc0003cb50004b1


[20/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java
new file mode 100644
index 0000000..5ba20d2
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HFileSortedOplog.java
@@ -0,0 +1,853 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.ByteArrayInputStream;
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import com.gemstone.gemfire.internal.hll.HyperLogLog;
+import com.gemstone.gemfire.internal.hll.ICardinality;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.ShutdownHookManager;
+
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.DelegatingSerializedComparator;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics.ScanOperation;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedReader.SerializedComparator;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.Version;
+import com.gemstone.gemfire.internal.util.Hex;
+import com.gemstone.gemfire.internal.util.SingletonValue;
+import com.gemstone.gemfire.internal.util.SingletonValue.SingletonBuilder;
+
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
+import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.util.BloomFilterFactory;
+import org.apache.hadoop.hbase.util.BloomFilterWriter;
+
+/**
+ * Implements hfile based {@link Hoplog}
+ */
+public final class HFileSortedOplog extends AbstractHoplog {
+
+//  private static final boolean CACHE_DATA_BLOCKS_ON_READ = !Boolean.getBoolean("gemfire.HFileSortedOplog.DISABLE_CACHE_ON_READ");
+  private final CacheConfig cacheConf;
+  private ICardinality entryCountEstimate;
+  
+  // a cached reader for the file
+  private final SingletonValue<HFileReader> reader;
+
+  public HFileSortedOplog(HDFSStoreImpl store, Path hfilePath,
+      BlockCache blockCache, SortedOplogStatistics stats,
+      HFileStoreStatistics storeStats) throws IOException {
+    super(store, hfilePath, stats);
+    cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
+    reader = getReaderContainer();
+  }
+
+  /**
+   * THIS METHOD SHOULD BE USED FOR LONER ONLY
+   */
+  public static HFileSortedOplog getHoplogForLoner(FileSystem inputFS,
+      Path hfilePath) throws IOException {
+    return new HFileSortedOplog(inputFS, hfilePath, null, null, null);
+  }
+
+  private HFileSortedOplog(FileSystem inputFS, Path hfilePath,
+      BlockCache blockCache, SortedOplogStatistics stats,
+      HFileStoreStatistics storeStats) throws IOException {
+    super(inputFS, hfilePath, stats);
+    cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
+    reader = getReaderContainer();
+  }
+
+  protected CacheConfig getCacheConfInstance(BlockCache blockCache,
+      SortedOplogStatistics stats, HFileStoreStatistics storeStats) {
+    CacheConfig tmpConfig = null;
+//    if (stats == null) {
+      tmpConfig = new CacheConfig(conf);
+//    } else {
+//      tmpConfig = new CacheConfig(conf, CACHE_DATA_BLOCKS_ON_READ, blockCache,
+//          HFileSortedOplogFactory.convertStatistics(stats, storeStats));
+//    }
+    tmpConfig.shouldCacheBlockOnRead(BlockCategory.ALL_CATEGORIES);
+    return tmpConfig;
+  }  
+
+  private SingletonValue<HFileReader> getReaderContainer() {
+    return new SingletonValue<HFileReader>(new SingletonBuilder<HFileReader>() {
+      @Override
+      public HFileReader create() throws IOException {
+        if (logger.isDebugEnabled())
+          logger.debug("{}Creating hoplog reader", logPrefix);
+        return new HFileReader();
+      }
+
+      @Override
+      public void postCreate() {
+        if (readerListener != null) {
+          readerListener.readerCreated();
+        }
+      }
+      
+      @Override
+      public void createInProgress() {
+      }
+    });
+  }
+  
+  @Override
+  public HoplogReader getReader() throws IOException {
+    return reader.get();
+  }
+  
+  @Override
+  public ICardinality getEntryCountEstimate() throws IOException {
+    ICardinality result = entryCountEstimate;
+    if (result == null) {
+      HoplogReader rdr = getReader(); // keep this out of the critical section
+      synchronized(this) {
+        result = entryCountEstimate;
+          if (result == null) {
+            entryCountEstimate = result = rdr.getCardinalityEstimator();
+          }
+        }
+    }
+    return result;
+  }
+  
+  @Override
+  public HoplogWriter createWriter(int keys) throws IOException {
+    return new HFileSortedOplogWriter(keys);
+  }
+
+  @Override
+  public boolean isClosed() {
+    HFileReader rdr = reader.getCachedValue();
+    return rdr == null || rdr.isClosed();
+  }
+  
+  @Override
+  public void close() throws IOException {
+    close(true);
+  }
+
+  @Override
+  public void close(boolean clearCache) throws IOException {
+    compareAndClose(null, clearCache);
+  }
+  
+  private void compareAndClose(HFileReader hfileReader, boolean clearCache) throws IOException {
+    HFileReader rdr ;
+    if (hfileReader == null) {
+      rdr = reader.clear(true);
+    } else {
+      boolean result = reader.clear(hfileReader, true);
+      if (! result) {
+        if (logger.isDebugEnabled())
+          logger.debug("{}skipping close, provided hfileReader mismatched", logPrefix);
+        return;
+      } 
+      rdr = hfileReader;
+    }
+    
+    if (rdr != null) {
+      try {
+        rdr.close(clearCache);
+      } finally {
+        if (readerListener != null) {
+          readerListener.readerClosed();
+        }
+      }
+    }
+  }
+  
+  @Override
+  public String toString() {
+    return "HFileSortedOplog[" + getFileName() + "]";
+  }
+
+  private class HFileSortedOplogWriter implements HoplogWriter {
+    private final Writer writer;
+    private final BloomFilterWriter bfw;
+    private final AtomicBoolean closed = new AtomicBoolean(false);
+
+    public HFileSortedOplogWriter(int keys) throws IOException {
+      try {
+        int hfileBlockSize = Integer.getInteger(
+            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));
+
+        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
+            HoplogConfig.COMPRESSION_DEFAULT));
+
+//        ByteComparator bc = new ByteComparator();
+        writer = HFile.getWriterFactory(conf, cacheConf)
+            .withPath(fsProvider.getFS(), path)
+            .withBlockSize(hfileBlockSize)
+//            .withComparator(bc)
+            .withCompression(compress)
+            .create();
+//        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
+//            writer, bc);
+        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
+            writer);
+
+        if (logger.isDebugEnabled())
+          logger.debug("{}Created hoplog writer with compression " + compress, logPrefix);
+      } catch (IOException e) {
+        if (logger.isDebugEnabled())
+          logger.debug("{}IO Error while creating writer", logPrefix);
+        throw e;
+      }
+    }
+
+    @Override
+    public void append(byte[] key, byte[] value) throws IOException {
+      writer.append(key, value);
+      bfw.add(key, 0, key.length);
+    }
+
+    @Override
+    public void append(ByteBuffer key, ByteBuffer value) throws IOException {
+      byte[] keyBytes = byteBufferToArray(key);
+      byte[] valueBytes = byteBufferToArray(value);
+      writer.append(keyBytes, valueBytes);
+      bfw.add(keyBytes, 0, keyBytes.length);
+    }
+
+    @Override
+    public void close() throws IOException {
+      close(null);
+    }
+
+    @Override
+    public void close(EnumMap<Meta, byte[]> metadata) throws IOException {
+      if (closed.get()) {
+        if (logger.isDebugEnabled())
+          logger.debug("{}Writer already closed", logPrefix);
+        return;
+      }
+      
+      bfw.compactBloom();
+      writer.addGeneralBloomFilter(bfw);
+
+      // append system metadata
+      writer.appendFileInfo(Meta.GEMFIRE_MAGIC.toBytes(), Hoplog.MAGIC);
+      writer.appendFileInfo(Meta.SORTED_OPLOG_VERSION.toBytes(), HoplogVersion.V1.toBytes());
+      writer.appendFileInfo(Meta.GEMFIRE_VERSION.toBytes(), Version.CURRENT.toBytes());
+      
+      // append comparator info
+//      if (writer.getComparator() instanceof DelegatingSerializedComparator) {
+//        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+//        DataOutput out = new DataOutputStream(bos);
+//        
+//        writeComparatorInfo(out, ((DelegatingSerializedComparator) writer.getComparator()).getComparators());
+//        writer.appendFileInfo(Meta.COMPARATORS.toBytes(), bos.toByteArray());
+//      }
+      
+      // append user metadata
+      HyperLogLog cachedEntryCountEstimate = null;
+      if (metadata != null) {
+        for (Entry<Meta, byte[]> entry : metadata.entrySet()) {
+          writer.appendFileInfo(entry.getKey().toBytes(), entry.getValue());
+          if (Meta.LOCAL_CARDINALITY_ESTIMATE_V2.equals(entry.getKey())) {
+             cachedEntryCountEstimate = HyperLogLog.Builder.build(entry.getValue()); 
+          }
+        }
+      }
+      
+      writer.close();
+      if (logger.isDebugEnabled())
+        logger.debug("{}Completed closing writer", logPrefix);
+      closed.set(true);
+      // cache estimate value to avoid reads later
+      entryCountEstimate = cachedEntryCountEstimate;
+    }
+
+    @Override
+    public void hsync() throws IOException {
+      throw new UnsupportedOperationException("hsync is not supported for HFiles"); 
+    }
+
+    @Override
+    public long getCurrentSize() throws IOException {
+      throw new UnsupportedOperationException("getCurrentSize is not supported for HFiles"); 
+    }
+    
+//    private void writeComparatorInfo(DataOutput out, SerializedComparator[] comparators) throws IOException {
+//      out.writeInt(comparators.length);
+//      for (SerializedComparator sc : comparators) {
+//        out.writeUTF(sc.getClass().getName());
+//        if (sc instanceof DelegatingSerializedComparator) {
+//          writeComparatorInfo(out, ((DelegatingSerializedComparator) sc).getComparators());
+//        }
+//      }
+//    }
+  }
+  
+  private void handleReadIOError(HFileReader hfileReader, IOException e, boolean skipFailIfSafe) {
+    if (logger.isDebugEnabled())
+      logger.debug("Read IO error", e);
+    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
+    if (safeError) {
+      // IOException because of closed file system. This happens when member is
+      // shutting down
+      if (logger.isDebugEnabled())
+        logger.debug("IO error caused by filesystem shutdown", e);
+      throw new CacheClosedException("IO error caused by filesystem shutdown", e);
+    } 
+    
+    // expose the error wrapped inside remote exception. Remote exceptions are
+    // handled by file system client. So let the caller handle this error
+    if (e instanceof RemoteException) {
+      e = ((RemoteException) e).unwrapRemoteException();
+      throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
+    } 
+    
+    FileSystem currentFs = fsProvider.checkFileSystem();
+    if (hfileReader != null && hfileReader.previousFS != currentFs) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Detected new FS client, closing old reader", logPrefix);
+        if (currentFs != null) {
+          if (logger.isDebugEnabled())
+            logger.debug("CurrentFs:" + currentFs.getUri() + "-"
+                + currentFs.hashCode(), logPrefix);
+        }
+        if (hfileReader.previousFS != null) {
+          if (logger.isDebugEnabled())
+            logger.debug("OldFs:" + hfileReader.previousFS.getUri() + "-"
+                + hfileReader.previousFS.hashCode() + ", closing old reader", logPrefix);
+        }
+      }
+      try {
+        HFileSortedOplog.this.compareAndClose(hfileReader, false);
+      } catch (Exception ex) {
+        if (logger.isDebugEnabled())
+          logger.debug("Failed to close reader", ex);
+      }
+      if (skipFailIfSafe) {
+        if (logger.isDebugEnabled())
+          logger.debug("Not faling after io error since FS client changed");
+        return;
+      }
+    }
+
+    // it is not a safe error. let the caller handle it
+    throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
+  }
+
+  class HFileReader implements HoplogReader, Closeable {
+    private final Reader reader;
+    private volatile BloomFilter hoplogBloom;
+    private final AtomicBoolean closed;
+    private final Map<byte[], byte[]> fileInfo;
+    private final HyperLogLog estimator;
+    private final FileSystem previousFS;
+    
+    public HFileReader() throws IOException {
+      try {
+        FileSystem fs = fsProvider.getFS();
+        reader = HFile.createReader(fs, path, cacheConf);
+        fileInfo = reader.loadFileInfo();
+        closed = new AtomicBoolean(false);
+
+        validate();
+        if (reader.getComparator() instanceof DelegatingSerializedComparator) {
+          loadComparators((DelegatingSerializedComparator) reader.getComparator());
+        }
+
+        // read the old HLL if it exists so that a CardinalityMergeException will trigger a Major Compaction
+        byte[] hll = fileInfo.get(Meta.LOCAL_CARDINALITY_ESTIMATE.toBytes());
+        if (hll != null) {
+          entryCountEstimate = estimator = HyperLogLog.Builder.build(hll);
+        } else if ((hll = fileInfo.get(Meta.LOCAL_CARDINALITY_ESTIMATE_V2.toBytes())) != null) {
+          entryCountEstimate = estimator = HyperLogLog.Builder.build(hll);
+        } else {
+          estimator = new HyperLogLog(HdfsSortedOplogOrganizer.HLL_CONSTANT);
+        }
+        
+        previousFS = fs;
+      } catch (IOException e) {
+        if (logger.isDebugEnabled())
+          logger.debug("IO Error while creating reader", e);
+        throw e;
+      }
+    }
+
+    @Override
+    public byte[] read(byte[] key) throws IOException {
+      IOException err = null;
+      HFileReader delegateReader = this;
+      for (int retry = 1; retry >= 0; retry --) {
+        try {
+          return delegateReader.readDelegate(key);
+        } catch (IOException e) {
+          err = e;
+          handleReadIOError(delegateReader, e, retry > 0);
+          // Current reader may have got closed in error handling. Get the new
+          // one for retry attempt
+          try {
+            delegateReader = (HFileReader) HFileSortedOplog.this.getReader(); 
+          } catch (IOException ex) {
+            handleReadIOError(null, e, false);
+          }
+        }
+      }
+
+      if (logger.isDebugEnabled())
+        logger.debug("Throwing err from read delegate ", err);
+      throw err;
+    }
+
+    private byte[] readDelegate(byte[] key) throws IOException {
+      try {
+        if (!getBloomFilter().mightContain(key)) {
+          // bloom filter check failed, the key is not present in this hoplog
+          return null;
+        }
+      } catch (IllegalArgumentException e) {
+        if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
+          throw (IOException) e.getCause();
+        } else {
+          throw e;
+        }
+      }
+      
+      byte[] valueBytes = null;
+      ByteBuffer bb = get(key);
+      if (bb != null) {
+        valueBytes = new byte[bb.remaining()];
+        bb.get(valueBytes);
+      } else {
+        stats.getBloom().falsePositive();
+      }
+      return valueBytes;
+    }
+
+    @Override
+    public ByteBuffer get(byte[] key) throws IOException {
+      assert key != null;
+      HFileScanner seek = reader.getScanner(false, true);
+      if (seek.seekTo(key) == 0) {
+        return seek.getValue();
+      }
+      return null;
+    }
+
+    @Override
+    public HoplogIterator<byte[], byte[]> scan(byte[] from, boolean fromInclusive, byte[] to,
+        boolean toInclusive) throws IOException {
+      IOException err = null;
+      HFileReader delegateReader = this;
+      for (int retry = 1; retry >= 0; retry --) {
+        try {
+          return delegateReader.scanDelegate(from, fromInclusive, to, toInclusive);
+        } catch (IOException e) {
+          err = e;
+          handleReadIOError(delegateReader, e, retry > 0);
+          // Current reader may have got closed in error handling. Get the new
+          // one for retry attempt
+          try {
+            delegateReader = (HFileReader) HFileSortedOplog.this.getReader(); 
+          } catch (IOException ex) {
+            handleReadIOError(null, e, false);
+          }
+        }
+      }
+      if (logger.isDebugEnabled())
+        logger.debug("Throwing err from scan delegate ", err);
+      throw err;
+    }
+
+    private HoplogIterator<byte[], byte[]> scanDelegate(byte[] from, boolean fromInclusive, byte[] to,
+        boolean toInclusive) throws IOException {
+      return new HFileSortedIterator(reader.getScanner(true, false), from,
+          fromInclusive, to, toInclusive);
+    }
+    
+    @Override
+    public HoplogIterator<byte[], byte[]> scan(long offset, long length)
+        throws IOException {
+      /**
+       * Identifies the first and last key to be scanned based on offset and
+       * length. It loads hfile block index and identifies the first hfile block
+       * starting after offset. The key of that block is from key for scanner.
+       * Similarly it locates first block starting beyond offset + length range.
+       * It uses key of that block as the to key for scanner
+       */
+
+      // load block indexes in memory
+      BlockIndexReader bir = reader.getDataBlockIndexReader();
+      int blockCount = bir.getRootBlockCount();
+      
+      byte[] fromKey = null, toKey = null;
+
+      // find from key
+      int i = 0;
+      for (; i < blockCount; i++) {
+        if (bir.getRootBlockOffset(i) < offset) {
+          // hfile block has offset less than this reader's split offset. check
+          // the next block
+          continue;
+        }
+
+        // found the first hfile block starting after offset
+        fromKey = bir.getRootBlockKey(i);
+        break;
+      }
+
+      if (fromKey == null) {
+        // seems no block starts after the offset. return no-op scanner
+        return new HFileSortedIterator(null, null, false, null, false);
+      }
+      
+      // find to key
+      for (; i < blockCount; i++) {
+        if (bir.getRootBlockOffset(i) < (offset + length)) {
+          // this hfile block lies within the offset+lenght range. check the
+          // next block for a higher offset
+          continue;
+        }
+
+        // found the first block starting beyong offset+length range.
+        toKey = bir.getRootBlockKey(i);
+        break;
+      }
+
+      // from key is included in scan and to key is excluded
+      HFileScanner scanner = reader.getScanner(true, false);
+      return new HFileSortedIterator(scanner, fromKey, true, toKey, false);
+    }
+    
+    @Override
+    public HoplogIterator<byte[], byte[]> scan() throws IOException {
+      return scan(null, null);
+    }
+
+    public HoplogIterator<byte[], byte[]> scan(byte[] from, byte[] to)
+        throws IOException {
+      return scan(from, true, to, false);
+    }
+
+    @Override
+    public BloomFilter getBloomFilter() throws IOException {
+      BloomFilter result = hoplogBloom;
+      if (result == null) {
+        synchronized (this) {
+          result = hoplogBloom;
+          if (result == null) {
+            hoplogBloom = result = new BloomFilterImpl();
+          }
+        }
+      }
+      return result;
+    }
+
+    @Override
+    public boolean isClosed() {
+      return closed.get();
+    }
+    
+    @Override
+    public void close() throws IOException {
+      close(true);
+    }
+    
+    public void close(boolean clearCache) throws IOException {
+      if (closed.compareAndSet(false, true)) {
+        if (logger.isDebugEnabled())
+          logger.debug("{}Closing reader", logPrefix);
+        reader.close(clearCache);
+      }
+    }
+
+    @Override
+    public long getEntryCount() {
+      return reader.getEntries();
+    }
+
+    public ICardinality getCardinalityEstimator() {
+      return estimator;
+    }
+
+    @Override
+    public long sizeEstimate() {
+      return getCardinalityEstimator().cardinality();
+    }
+
+    private void validate() throws IOException {
+      // check magic
+      byte[] magic = fileInfo.get(Meta.GEMFIRE_MAGIC.toBytes());
+      if (!Arrays.equals(magic, MAGIC)) {
+        throw new IOException(LocalizedStrings.Soplog_INVALID_MAGIC.toLocalizedString(Hex.toHex(magic)));
+      }
+      
+      // check version compatibility
+      byte[] ver = fileInfo.get(Meta.SORTED_OPLOG_VERSION.toBytes());
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Hoplog version is " + Hex.toHex(ver), logPrefix);
+      }
+      
+      if (!Arrays.equals(ver, HoplogVersion.V1.toBytes())) {
+        throw new IOException(LocalizedStrings.Soplog_UNRECOGNIZED_VERSION.toLocalizedString(Hex.toHex(ver)));
+      }
+    }
+    
+    private void loadComparators(DelegatingSerializedComparator comparator) throws IOException {
+      byte[] raw = fileInfo.get(Meta.COMPARATORS.toBytes());
+      assert raw != null;
+
+      DataInput in = new DataInputStream(new ByteArrayInputStream(raw));
+      comparator.setComparators(readComparators(in));
+    }
+    
+    private SerializedComparator[] readComparators(DataInput in) throws IOException {
+      try {
+        SerializedComparator[] comps = new SerializedComparator[in.readInt()];
+        assert comps.length > 0;
+        
+        for (int i = 0; i < comps.length; i++) {
+          comps[i] = (SerializedComparator) Class.forName(in.readUTF()).newInstance();
+          if (comps[i] instanceof DelegatingSerializedComparator) {
+            ((DelegatingSerializedComparator) comps[i]).setComparators(readComparators(in));
+          }
+        }
+        return comps;
+        
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+    
+    class BloomFilterImpl implements BloomFilter {
+      private final org.apache.hadoop.hbase.util.BloomFilter hfileBloom;
+
+      public BloomFilterImpl() throws IOException {
+        DataInput bin = reader.getGeneralBloomFilterMetadata();
+        // instantiate bloom filter if meta present in hfile
+        if (bin != null) {
+          hfileBloom = BloomFilterFactory.createFromMeta(bin, reader);
+          if (reader.getComparator() instanceof DelegatingSerializedComparator) {
+            loadComparators((DelegatingSerializedComparator) hfileBloom.getComparator());
+          }
+        } else {
+          hfileBloom = null;
+        }
+      }
+
+      @Override
+      public boolean mightContain(byte[] key) {
+        assert key != null;
+        return mightContain(key, 0, key.length);
+      }
+
+      @Override
+      public boolean mightContain(byte[] key, int keyOffset, int keyLength) {
+        assert key != null;
+        long start = stats.getBloom().begin();
+        boolean found = hfileBloom == null ? true : hfileBloom.contains(key, keyOffset, keyLength, null);
+        stats.getBloom().end(start);
+        return found;
+      }
+
+      @Override
+      public long getBloomSize() {
+        return hfileBloom == null ? 0 : hfileBloom.getByteSize();
+      }
+    }
+
+    // TODO change the KV types to ByteBuffer instead of byte[]
+    public final class HFileSortedIterator implements HoplogIterator<byte[], byte[]> {
+      private final HFileScanner scan;
+      
+      private final byte[] from;
+      private final boolean fromInclusive;
+      
+      private final byte[] to;
+      private final boolean toInclusive;
+      
+      private ByteBuffer prefetchedKey;
+      private ByteBuffer prefetchedValue;
+      private ByteBuffer currentKey;
+      private ByteBuffer currentValue;
+      
+      // variable linked to scan stats
+      ScanOperation scanStat;
+      private long scanStart;
+      
+      public HFileSortedIterator(HFileScanner scan, byte[] from, boolean fromInclusive, byte[] to, 
+          boolean toInclusive) throws IOException {
+        this.scan = scan;
+        this.from = from;
+        this.fromInclusive = fromInclusive;
+        this.to = to;
+        this.toInclusive = toInclusive;
+
+        scanStat = (stats == null) ? new SortedOplogStatistics("", "").new ScanOperation(
+            0, 0, 0, 0, 0, 0, 0) : stats.getScan();
+        scanStart = scanStat.begin();
+
+        if (scan == null) {
+          return;
+        }
+
+        assert from == null || to == null
+            || scan.getReader().getComparator().compare(from, to) <= 0;
+
+        initIterator();
+      }
+      
+      /*
+       * prefetches first key and value from the file for hasnext to work
+       */
+      private void initIterator() throws IOException {
+        long startNext = scanStat.beginIteration();
+        boolean scanSuccessful = true;
+        if (from == null) {
+          scanSuccessful = scan.seekTo();
+        } else {
+          int compare = scan.seekTo(from);
+          if (compare == 0 && !fromInclusive || compare > 0) {
+            // as from in exclusive and first key is same as from, skip the first key
+            scanSuccessful = scan.next();
+          }
+        }
+        
+        populateKV(startNext, scanSuccessful);
+      }
+      
+      @Override
+      public boolean hasNext() {
+        return prefetchedKey != null;
+      }
+
+      @Override
+      public byte[] next() throws IOException {
+        return byteBufferToArray(nextBB());
+      }
+
+      public ByteBuffer nextBB() throws IOException {
+        long startNext = scanStat.beginIteration();
+        if (prefetchedKey == null) {
+          throw new NoSuchElementException();
+        }
+
+        currentKey = prefetchedKey;
+        currentValue = prefetchedValue;
+
+        prefetchedKey = null;
+        prefetchedValue = null;
+
+        if (scan.next()) {
+          populateKV(startNext, true);
+        }
+        
+        return currentKey;
+      }
+
+      
+      private void populateKV(long nextStartTime, boolean scanSuccessful) {
+        if (!scanSuccessful) {
+          //end of file reached. collect stats and return
+          scanStat.endIteration(0, nextStartTime);
+          return;
+        }
+        
+        prefetchedKey = scan.getKey();
+        prefetchedValue = scan.getValue();
+        
+        if (to != null) {
+          // TODO Optimization? Perform int comparison instead of byte[]. Identify
+          // offset of key greater than two.
+          int compare = -1;
+          compare = scan.getReader().getComparator().compare
+              (prefetchedKey.array(), prefetchedKey.arrayOffset(), prefetchedKey.remaining(), to, 0, to.length);
+          if (compare > 0 || (compare == 0 && !toInclusive)) {
+            prefetchedKey = null;
+            prefetchedValue = null;
+            return;
+          }
+        }
+        
+        // account for bytes read and time spent
+        int byteCount = prefetchedKey.remaining() + prefetchedValue.remaining();
+        scanStat.endIteration(byteCount, nextStartTime);
+      }
+      
+
+      @Override
+      public byte[] getKey() {
+        return byteBufferToArray(getKeyBB());
+      }
+      public ByteBuffer getKeyBB() {
+        return currentKey;
+      }
+
+      @Override
+      public byte[] getValue() {
+        return byteBufferToArray(getValueBB());
+      }
+      public ByteBuffer getValueBB() {
+        return currentValue;
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException("Cannot delete a key-value from a hfile sorted oplog");
+      }
+      
+      @Override
+      public void close() {
+        scanStat.end(scanStart);
+      }
+    }
+  }
+  
+  public static byte[] byteBufferToArray(ByteBuffer bb) {
+    if (bb == null) {
+      return null;
+    }
+    
+    byte[] tmp = new byte[bb.remaining()];
+    bb.duplicate().get(tmp);
+    return tmp;
+  }
+}


[19/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
new file mode 100644
index 0000000..e8abb38
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizer.java
@@ -0,0 +1,2004 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.gemstone.gemfire.internal.hll.CardinalityMergeException;
+import com.gemstone.gemfire.internal.hll.HyperLogLog;
+import com.gemstone.gemfire.internal.hll.ICardinality;
+import com.gemstone.gemfire.internal.hll.MurmurHash;
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.ShutdownHookManager;
+
+import com.gemstone.gemfire.InternalGemFireException;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager.CompactionRequest;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReader;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReaderActivityListener;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.Meta;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
+import com.gemstone.gemfire.internal.HeapDataOutputStream;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.PrimaryBucketException;
+import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics.IOOperation;
+import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
+import com.gemstone.gemfire.internal.concurrent.ConcurrentHashSet;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * Manages sorted oplog files for a bucket. An instance per bucket will exist in
+ * each PR
+ * 
+ */
+public class HdfsSortedOplogOrganizer extends AbstractHoplogOrganizer<SortedHoplogPersistedEvent> {
+  public static final int AVG_NUM_KEYS_PER_INDEX_BLOCK = 200;
+  
+  // all valid sorted hoplogs will follow the following name pattern
+  public static final String SORTED_HOPLOG_REGEX = HOPLOG_NAME_REGEX + "("
+      + FLUSH_HOPLOG_EXTENSION + "|" + MINOR_HOPLOG_EXTENSION + "|"
+      + MAJOR_HOPLOG_EXTENSION + ")";
+  public static final Pattern SORTED_HOPLOG_PATTERN = Pattern.compile(SORTED_HOPLOG_REGEX);
+  
+  //Amount of time before deleting old temporary files
+  final long TMP_FILE_EXPIRATION_TIME_MS = Long.getLong(HoplogConfig.TMP_FILE_EXPIRATION, HoplogConfig.TMP_FILE_EXPIRATION_DEFAULT);
+  
+  static float RATIO = HoplogConfig.COMPACTION_FILE_RATIO_DEFAULT;
+
+  // Compacter for this bucket
+  private Compactor compacter;
+    
+  private final HoplogReadersController hoplogReadersController;
+  private AtomicLong previousCleanupTimestamp = new AtomicLong(Long.MIN_VALUE);
+
+  /**
+   * The default HLL constant. gives an accuracy of about 3.25%
+   * public only for testing upgrade from 1.3 to 1.4
+   */
+  public static double HLL_CONSTANT = 0.03;
+  /**
+   * This estimator keeps track of this buckets entry count. This value is
+   * affected by flush and compaction cycles
+   */
+  private volatile ICardinality bucketSize = new HyperLogLog(HLL_CONSTANT);
+  //A set of tmp files that existed when this bucket organizer was originally
+  //created. These may still be open by the old primary, or they may be
+  //abandoned files.
+  private LinkedList<FileStatus> oldTmpFiles;
+
+  private ConcurrentMap<Hoplog, Boolean> tmpFiles = new ConcurrentHashMap<Hoplog, Boolean>();
+
+  protected volatile boolean organizerClosed = false;
+
+  /**
+   * For the 1.4 release we are changing the HLL_CONSTANT which will make the
+   * old persisted HLLs incompatible with the new HLLs. To fix this we will
+   * force a major compaction when the system starts up so that we will only
+   * have new HLLs in the system (see bug 51403)
+   */
+  private boolean startCompactionOnStartup = false;
+
+  /**
+   * @param region
+   *          Region manager instance. Instances of hdfs listener instance,
+   *          stats collector, file system, etc are shared by all buckets of a
+   *          region and provided by region manager instance
+   * @param bucketId bucket id to be managed by this organizer
+   * @throws IOException
+   */
+  public HdfsSortedOplogOrganizer(HdfsRegionManager region, int bucketId) throws IOException{
+    super(region, bucketId);
+    
+    String val = System.getProperty(HoplogConfig.COMPACTION_FILE_RATIO);
+    try {
+      RATIO = Float.parseFloat(val);
+    } catch (Exception e) {
+    }
+
+    hoplogReadersController = new HoplogReadersController();
+    
+    // initialize with all the files in the directory
+    List<Hoplog> hoplogs = identifyAndLoadSortedOplogs(true);
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Initializing bucket with existing hoplogs, count = " + hoplogs.size(), logPrefix);
+    }
+    for (Hoplog hoplog : hoplogs) {
+      addSortedOplog(hoplog, false, true);
+    }
+
+    // initialize sequence to the current maximum
+    sequence = new AtomicInteger(findMaxSequenceNumber(hoplogs));
+    
+    initOldTmpFiles();
+    
+    FileSystem fs = store.getFileSystem();
+    Path cleanUpIntervalPath = new Path(store.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME); 
+    if (!fs.exists(cleanUpIntervalPath)) {
+      long intervalDurationMillis = store.getPurgeInterval() * 60 * 1000;
+      HoplogUtil.exposeCleanupIntervalMillis(fs, cleanUpIntervalPath, intervalDurationMillis);
+    }
+
+    if (startCompactionOnStartup) {
+      forceCompactionOnVersionUpgrade();
+      if (logger.isInfoEnabled()) {
+        logger.info(LocalizedStrings.HOPLOG_MAJOR_COMPACTION_SCHEDULED_FOR_BETTER_ESTIMATE);
+      }
+    }
+  }
+
+  /**
+   * Iterates on the input buffer and persists it in a new sorted oplog. This operation is
+   * synchronous and blocks the thread.
+   */
+  @Override
+  public void flush(Iterator<? extends QueuedPersistentEvent> iterator, final int count)
+      throws IOException, ForceReattemptException {
+    assert iterator != null;
+
+    if (logger.isDebugEnabled())
+      logger.debug("{}Initializing flush operation", logPrefix);
+    
+    final Hoplog so = getTmpSortedOplog(null, FLUSH_HOPLOG_EXTENSION);
+    HoplogWriter writer = null;
+    ICardinality localHLL = new HyperLogLog(HLL_CONSTANT);
+    
+    // variables for updating stats
+    long start = stats.getFlush().begin();
+    int byteCount = 0;
+    
+    try {
+      /**MergeGemXDHDFSToGFE changed the following statement as the code of HeapDataOutputStream is not merged */
+      //HeapDataOutputStream out = new HeapDataOutputStream();
+      
+      try {
+        writer = this.store.getSingletonWriter().runSerially(new Callable<Hoplog.HoplogWriter>() {
+          @Override
+          public HoplogWriter call() throws Exception {
+            return so.createWriter(count);
+          }
+        });
+      } catch (Exception e) {
+        if (e instanceof IOException) {
+          throw (IOException)e;
+        }
+        throw new IOException(e);
+      }
+
+      while (iterator.hasNext() && !this.organizerClosed) {
+        HeapDataOutputStream out = new HeapDataOutputStream(1024, null);
+        
+        QueuedPersistentEvent item = iterator.next();
+        item.toHoplogEventBytes(out);
+        byte[] valueBytes = out.toByteArray();
+        writer.append(item.getRawKey(), valueBytes);
+        
+        // add key length and value length to stats byte counter
+        byteCount += (item.getRawKey().length + valueBytes.length);
+
+        // increment size only if entry is not deleted
+        if (!isDeletedEntry(valueBytes, 0)) {
+          int hash = MurmurHash.hash(item.getRawKey());
+          localHLL.offerHashed(hash);
+        }
+        /**MergeGemXDHDFSToGFE how to clear for reuse. Leaving it for Darrel to merge this change*/
+        //out.clearForReuse();
+      }
+      if (organizerClosed)
+        throw new BucketMovedException("The current bucket is moved BucketID: "+  
+            this.bucketId + " Region name: " +  this.regionManager.getRegion().getName());
+      
+      // append completed. provide cardinality and close writer
+      writer.close(buildMetaData(localHLL));
+      writer = null;
+    } catch (IOException e) {
+      stats.getFlush().error(start);
+      try {
+        e = handleWriteHdfsIOError(writer, so, e);
+      } finally {
+        //Set the writer to null because handleWriteHDFSIOError has
+        //already closed the writer.
+        writer = null;
+      }
+      throw e;
+    } catch (BucketMovedException e) {
+      stats.getFlush().error(start);
+      deleteTmpFile(writer, so);
+      writer = null;
+      throw e;
+    } finally {
+      if (writer != null) {
+        writer.close();
+      }
+    }
+
+    try{
+      
+      // ping secondaries before making the file a legitimate file to ensure 
+      // that in case of split brain, no other vm has taken up as primary. #50110.  
+      pingSecondaries();
+      
+      // rename file and check if renaming was successful
+      synchronized (changePrimarylockObject) {
+        if (!organizerClosed)
+          makeLegitimate(so);
+        else 
+          throw new BucketMovedException("The current bucket is moved BucketID: "+  
+              this.bucketId + " Region name: " +  this.regionManager.getRegion().getName());
+      }
+      try {
+        so.getSize();
+      } catch (IllegalStateException e) {
+        throw new IOException("Failed to rename hoplog file:" + so.getFileName());
+      }
+      
+      //Disabling this assertion due to bug 49740
+      // check to make sure the sequence number is correct
+//      if (ENABLE_INTEGRITY_CHECKS) {
+//        Assert.assertTrue(getSequenceNumber(so) == findMaxSequenceNumber(identifyAndLoadSortedOplogs(false)), 
+//            "Invalid sequence number detected for " + so.getFileName());
+//      }
+      
+      // record the file for future maintenance and reads
+      addSortedOplog(so, false, true);
+      stats.getFlush().end(byteCount, start);
+      incrementDiskUsage(so.getSize());
+    } catch (BucketMovedException e) {
+      stats.getFlush().error(start);
+      deleteTmpFile(writer, so);
+      writer = null;
+      throw e;
+    } catch (IOException e) {
+      stats.getFlush().error(start);
+      logger.warn(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e);
+      throw e;
+    }
+
+    submitCompactionRequests();
+  }
+
+
+  /**
+   * store cardinality information in metadata
+   * @param localHLL the hll estimate for this hoplog only
+   */
+  private EnumMap<Meta, byte[]> buildMetaData(ICardinality localHLL) throws IOException {
+    EnumMap<Meta, byte[]> map = new EnumMap<Hoplog.Meta, byte[]>(Meta.class);
+    map.put(Meta.LOCAL_CARDINALITY_ESTIMATE_V2, localHLL.getBytes());
+    return map;
+  }
+
+  private void submitCompactionRequests() throws IOException {
+    CompactionRequest req;
+    
+    // determine if a major compaction is needed and create a compaction request
+    // with compaction manager
+    if (store.getMajorCompaction()) {
+      if (isMajorCompactionNeeded()) {
+        req = new CompactionRequest(regionFolder, bucketId, getCompactor(), true);
+        HDFSCompactionManager.getInstance(store).submitRequest(req);
+      }
+    }
+    
+    // submit a minor compaction task. It will be ignored if there is no work to
+    // be done.
+    if (store.getMinorCompaction()) {
+      req = new CompactionRequest(regionFolder, bucketId, getCompactor(), false);
+      HDFSCompactionManager.getInstance(store).submitRequest(req);
+    }
+  }
+
+  /**
+   * @return true if the oldest hoplog was created 1 major compaction interval ago
+   */
+  private boolean isMajorCompactionNeeded() throws IOException {
+    // major compaction interval in milliseconds
+    
+    long majorCInterval = ((long)store.getMajorCompactionInterval()) * 60 * 1000;
+
+    Hoplog oplog = hoplogReadersController.getOldestHoplog();
+    if (oplog == null) {
+      return false;
+    }
+    
+    long oldestFileTime = oplog.getModificationTimeStamp();
+    long now = System.currentTimeMillis();
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Checking oldest hop " + oplog.getFileName()
+          + " for majorCompactionInterval=" + majorCInterval
+          + " + now=" + now, logPrefix);
+    }
+    if (oldestFileTime > 0l && oldestFileTime < (now - majorCInterval)) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public SortedHoplogPersistedEvent read(byte[] key) throws IOException {
+    long startTime = stats.getRead().begin();
+    String user = logger.isDebugEnabled() ? "Read" : null;
+    
+    // collect snapshot of hoplogs
+    List<TrackedReference<Hoplog>> hoplogs = null;
+    hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
+    try {
+      // search for the key in order starting with the youngest oplog
+      for (TrackedReference<Hoplog> hoplog : hoplogs) {
+        HoplogReader reader = hoplog.get().getReader();
+        byte[] val = reader.read(key);
+        if (val != null) {
+          // value found in a younger hoplog. stop iteration
+          SortedHoplogPersistedEvent eventObj = deserializeValue(val);
+          stats.getRead().end(val.length, startTime);
+          return eventObj;
+        }
+      }
+    } catch (IllegalArgumentException e) {
+      if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
+        throw handleIOError((IOException) e.getCause());
+      } else {
+        throw e;
+      }
+    } catch (IOException e) {
+      throw handleIOError(e);
+    } catch (HDFSIOException e) {
+        throw handleIOError(e);
+    } finally {
+      hoplogReadersController.releaseHoplogs(hoplogs, user);
+    }
+    
+    stats.getRead().end(0, startTime);
+    return null;
+  }
+
+  protected IOException handleIOError(IOException e) {
+    // expose the error wrapped inside remote exception
+    if (e instanceof RemoteException) {
+      return ((RemoteException) e).unwrapRemoteException();
+    } 
+    
+    checkForSafeError(e);
+    
+    // it is not a safe error. let the caller handle it
+    return e;
+  }
+  
+  protected HDFSIOException handleIOError(HDFSIOException e) {
+    checkForSafeError(e);
+    return e;
+  }
+
+  protected void checkForSafeError(Exception e) {
+    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
+    if (safeError) {
+      // IOException because of closed file system. This happens when member is
+      // shutting down
+      if (logger.isDebugEnabled())
+        logger.debug("IO error caused by filesystem shutdown", e);
+      throw new CacheClosedException("IO error caused by filesystem shutdown", e);
+    } 
+
+    if(isClosed()) {
+      //If the hoplog organizer is closed, throw an exception to indicate the 
+      //caller should retry on the new primary.
+      throw new PrimaryBucketException(e);
+    }
+  }
+  
+  protected IOException handleWriteHdfsIOError(HoplogWriter writer, Hoplog so, IOException e)
+      throws IOException {
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Handle write error:" + so, logPrefix);
+    }
+    
+    closeWriter(writer);
+    // add to the janitor queue
+    tmpFiles.put(so, Boolean.TRUE);
+
+    return handleIOError(e);
+  }
+
+  private void deleteTmpFile(HoplogWriter writer, Hoplog so) {
+    closeWriter(writer);
+    
+    // delete the temporary hoplog
+    try {
+      if (so != null) {
+        so.delete();
+      }
+    } catch (IOException e1) {
+      logger.info(e1);
+    }
+  }
+
+  private void closeWriter(HoplogWriter writer) {
+    if (writer != null) {
+      // close writer before deleting it
+      try {
+        writer.close();
+      } catch (Throwable e1) {
+        // error to close hoplog will happen if no connections to datanode are
+        // available. Try to delete the file on namenode
+        if(!isClosed()) {
+          logger.info(e1);
+        }
+      }
+    }
+  }
+
+  /**
+   * Closes hoplog and suppresses IO during reader close. Suppressing IO errors
+   * when the organizer is closing or an hoplog becomes inactive lets the system
+   * continue freeing other resources. It could potentially lead to socket
+   * leaks though.
+   */
+  private void closeReaderAndSuppressError(Hoplog hoplog, boolean clearCache) {
+    try {
+      hoplog.close();
+    } catch (IOException e) {
+      // expose the error wrapped inside remote exception
+      if (e instanceof RemoteException) {
+        e = ((RemoteException) e).unwrapRemoteException();
+      } 
+      logger.info(e);
+    }
+  }
+
+  @Override
+  public BucketIterator scan() throws IOException {
+    String user = logger.isDebugEnabled() ? "Scan" : null;
+    List<TrackedReference<Hoplog>> hoplogs = null;
+    BucketIterator iter = null;
+    try {
+      hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
+      iter = new BucketIterator(hoplogs);
+      return iter;
+    }  finally {
+      // Normally the hoplogs will be released when the iterator is closed. The
+      // hoplogs must be released only if creating the iterator has failed.
+      if (iter == null) {
+        hoplogReadersController.releaseHoplogs(hoplogs, user);
+      }
+    }
+  }
+
+  @Override
+  public BucketIterator scan(byte[] from, byte[] to) throws IOException {
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public BucketIterator scan(byte[] from, boolean fromInclusive, byte[] to, boolean toInclusive) throws IOException {
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public HoplogIterator<byte[], SortedHoplogPersistedEvent> scan(
+      long startOffset, long length) throws IOException {
+    throw new UnsupportedOperationException("Not supported for " + this.getClass().getSimpleName());
+  }
+
+  @Override
+  public void close() throws IOException {
+    super.close();
+    
+    synchronized (changePrimarylockObject) {
+      organizerClosed = true;
+    }
+    //Suspend compaction
+    getCompactor().suspend();
+    
+    //Close the readers controller.
+    hoplogReadersController.close();
+    
+    previousCleanupTimestamp.set(Long.MIN_VALUE);
+    
+  }
+
+  /**
+   * This method call will happen on secondary node. The secondary node needs to update its data
+   * structures
+   */
+  @Override
+  public void hoplogCreated(String region, int bucketId, Hoplog... oplogs)
+      throws IOException {
+    for (Hoplog oplog : oplogs) {
+      addSortedOplog(oplog, false, true);
+    }
+  }
+
+  @Override
+  public long sizeEstimate() {
+    return this.bucketSize.cardinality();
+  }
+
+  private void addSortedOplog(Hoplog so, boolean notify, boolean addsToBucketSize)
+  throws IOException {
+    if (!hoplogReadersController.addSortedOplog(so)) {
+      so.close();
+      throw new InternalGemFireException("Failed to add " + so);
+    }
+
+    String user = logger.isDebugEnabled() ? "Add" : null;
+    if (addsToBucketSize) {
+      TrackedReference<Hoplog> ref = null;
+      try {
+        ref = hoplogReadersController.trackHoplog(so, user);
+        synchronized (bucketSize) {
+          ICardinality localHLL = ref.get().getEntryCountEstimate();
+          if (localHLL != null) {
+            bucketSize = mergeHLL(bucketSize, localHLL);
+          }
+        }
+      } finally {
+        if (ref != null) {
+          hoplogReadersController.releaseHoplog(ref, user);
+        }
+      }
+    }
+
+    if (notify && listener != null) {
+      listener.hoplogCreated(regionFolder, bucketId, so);
+    }
+  }
+
+  private void reEstimateBucketSize() throws IOException {
+    ICardinality global = null;
+    String user = logger.isDebugEnabled() ? "HLL" : null;
+    List<TrackedReference<Hoplog>> hoplogs = null;
+    try {
+      hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
+      global = new HyperLogLog(HLL_CONSTANT);
+      for (TrackedReference<Hoplog> hop : hoplogs) {
+        global = mergeHLL(global, hop.get().getEntryCountEstimate());
+      }
+    } finally {
+      hoplogReadersController.releaseHoplogs(hoplogs, user);
+    }
+    bucketSize = global;
+  }
+
+  protected ICardinality mergeHLL(ICardinality global, ICardinality local)
+  /*throws IOException*/ {
+    try {
+      return global.merge(local);
+    } catch (CardinalityMergeException e) {
+      // uncomment this after the 1.4 release
+      //throw new InternalGemFireException(e.getLocalizedMessage(), e);
+      startCompactionOnStartup = true;
+      return global;
+    }
+  }
+
+  private void removeSortedOplog(TrackedReference<Hoplog> so, boolean notify) throws IOException {
+    hoplogReadersController.removeSortedOplog(so);
+    
+    // release lock before notifying listeners
+    if (notify && listener != null) {
+      listener.hoplogDeleted(regionFolder, bucketId, so.get());
+    }
+  }
+  
+  private void notifyCompactionListeners(boolean isMajor) {
+    listener.compactionCompleted(regionFolder, bucketId, isMajor);
+  }
+  
+  /**
+   * This method call will happen on secondary node. The secondary node needs to update its data
+   * structures
+   * @throws IOException 
+   */
+  @Override
+  public void hoplogDeleted(String region, int bucketId, Hoplog... oplogs) throws IOException {
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public synchronized Compactor getCompactor() {
+    if (compacter == null) {
+      compacter = new HoplogCompactor();
+    }
+    return compacter;
+  }
+
+  @Override
+  protected Hoplog getHoplog(Path hoplogPath) throws IOException {
+    Hoplog so = new HFileSortedOplog(store, hoplogPath, store.getBlockCache(), stats, store.getStats());
+    return so;
+  }
+
+  /**
+   * locks sorted oplogs collection, removes oplog and renames for deletion later
+   * @throws IOException 
+   */
+  void markSortedOplogForDeletion(List<TrackedReference<Hoplog>> targets, boolean notify) throws IOException {
+    for (int i = targets.size(); i > 0; i--) {
+      TrackedReference<Hoplog> so = targets.get(i - 1);
+      removeSortedOplog(so, true);
+      if (!store.getFileSystem().exists(new Path(bucketPath, so.get().getFileName()))) {
+        // the hoplog does not even exist on file system. Skip remaining steps
+        continue;
+      }
+      addExpiryMarkerForAFile(so.get());
+    }
+  }
+  
+  /**
+   * Deletes expired hoplogs and expiry markers from the file system. Calculates
+   * a target timestamp based on cleanup interval. Then gets list of target
+   * hoplogs. It also updates the disk usage state
+   * 
+   * @return number of files deleted
+   */
+   synchronized int initiateCleanup() throws IOException {
+    int conf = store.getPurgeInterval();
+    // minutes to milliseconds
+    long intervalDurationMillis = conf * 60 * 1000;
+    // Any expired hoplog with timestamp less than targetTS is a delete
+    // candidate.
+    long targetTS = System.currentTimeMillis() - intervalDurationMillis;
+    if (logger.isDebugEnabled()) {
+      logger.debug("Target timestamp for expired hoplog deletion " + targetTS, logPrefix);
+    }
+    // avoid too frequent cleanup invocations. Exit cleanup invocation if the
+    // previous cleanup was executed within 10% range of cleanup interval
+    if (previousCleanupTimestamp.get() > targetTS
+        && (previousCleanupTimestamp.get() - targetTS) < (intervalDurationMillis / 10)) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("Skip cleanup, previous " + previousCleanupTimestamp.get(), logPrefix);
+      }
+      return 0;
+    }
+
+    List<FileStatus> targets = getOptimizationTargets(targetTS);
+    return deleteExpiredFiles(targets);
+  }
+
+  protected int deleteExpiredFiles(List<FileStatus> targets) throws IOException {
+    if (targets == null) {
+      return 0;
+    }
+
+    for (FileStatus file : targets) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Deleting file: " + file.getPath(), logPrefix);
+      }
+      store.getFileSystem().delete(file.getPath(), false);
+      
+      if (isClosed()) {
+        if (logger.isDebugEnabled())
+          logger.debug("{}Expiry file cleanup interupted by bucket close", logPrefix);
+        return 0;
+      }
+      incrementDiskUsage(-1 * file.getLen());
+    }
+
+    previousCleanupTimestamp.set(System.currentTimeMillis());
+    return targets.size();
+  }
+
+  /**
+   * @param ts
+   *          target timestamp
+   * @return list of hoplogs, whose expiry markers were created before target
+   *         timestamp, and the expiry marker itself.
+   * @throws IOException
+   */
+  protected List<FileStatus> getOptimizationTargets(long ts) throws IOException {
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Identifying optimization targets " + ts, logPrefix);
+    }
+
+    List<FileStatus> deleteTargets = new ArrayList<FileStatus>();
+    FileStatus[] markers = getExpiryMarkers();
+    if (markers != null) {
+      for (FileStatus marker : markers) {
+        String name = truncateExpiryExtension(marker.getPath().getName());
+        long timestamp = marker.getModificationTime();
+
+        // expired minor compacted files are not being used anywhere. These can
+        // be removed immediately. All the other expired files should be removed
+        // when the files have aged
+        boolean isTarget = false;
+        
+        if (name.endsWith(MINOR_HOPLOG_EXTENSION)) {
+          isTarget = true;
+        } else if (timestamp < ts && name.endsWith(FLUSH_HOPLOG_EXTENSION)) {
+          isTarget = true;
+        } else if (timestamp < ts && name.endsWith(MAJOR_HOPLOG_EXTENSION)) {
+          long majorCInterval = ((long)store.getMajorCompactionInterval()) * 60 * 1000;
+          if (timestamp < (System.currentTimeMillis() - majorCInterval)) {
+            isTarget = true;
+          }
+        }
+        if (!isTarget) {
+          continue;
+        }
+        
+        // if the file is still being read, do not delete or rename it
+        TrackedReference<Hoplog> used = hoplogReadersController.getInactiveHoplog(name);
+        if (used != null) {
+          if (used.inUse() && logger.isDebugEnabled()) {
+            logger.debug("{}Optimizer: found active expired hoplog:" + name, logPrefix);
+          } else if (logger.isDebugEnabled()) {
+            logger.debug("{}Optimizer: found open expired hoplog:" + name, logPrefix);
+          }
+          continue;
+        }
+        
+        if (logger.isDebugEnabled()) {
+          logger.debug("{}Delete target identified " + marker.getPath(), logPrefix);
+        }
+        
+        deleteTargets.add(marker);
+        Path hoplogPath = new Path(bucketPath, name);
+        if (store.getFileSystem().exists(hoplogPath)) {
+          FileStatus hoplog = store.getFileSystem().getFileStatus(hoplogPath);
+          deleteTargets.add(hoplog);
+        }
+      }
+    }
+    return deleteTargets;
+  }
+
+  /**
+   * Returns a list of of hoplogs present in the bucket's directory, expected to be called during
+   * hoplog set initialization
+   */
+  List<Hoplog> identifyAndLoadSortedOplogs(boolean countSize) throws IOException {
+    FileSystem fs = store.getFileSystem();
+    if (! fs.exists(bucketPath)) {
+      return new ArrayList<Hoplog>();
+    }
+    
+    FileStatus allFiles[] = fs.listStatus(bucketPath);
+    ArrayList<FileStatus> validFiles = new ArrayList<FileStatus>();
+    for (FileStatus file : allFiles) {
+      // All hoplog files contribute to disk usage
+      Matcher matcher = HOPLOG_NAME_PATTERN.matcher(file.getPath().getName());
+      if (! matcher.matches()) {
+        // not a hoplog
+        continue;
+      }
+      
+      // account for the disk used by this file
+      if (countSize) {
+        incrementDiskUsage(file.getLen());
+      }
+      
+      // All valid hoplog files must match the regex
+      matcher = SORTED_HOPLOG_PATTERN.matcher(file.getPath().getName());
+      if (matcher.matches()) {
+        validFiles.add(file);
+      }
+    }
+    
+    FileStatus[] markers = getExpiryMarkers();
+    FileStatus[] validHoplogs = filterValidHoplogs(
+        validFiles.toArray(new FileStatus[validFiles.size()]), markers);
+
+    ArrayList<Hoplog> results = new ArrayList<Hoplog>();
+    if (validHoplogs == null || validHoplogs.length == 0) {
+      return results;
+    }
+
+    for (int i = 0; i < validHoplogs.length; i++) {
+      // Skip directories
+      if (validHoplogs[i].isDirectory()) {
+        continue;
+      }
+
+      final Path p = validHoplogs[i].getPath();
+      // skip empty file
+      if (fs.getFileStatus(p).getLen() <= 0) {
+        continue;
+      }
+
+      Hoplog hoplog = new HFileSortedOplog(store, p, store.getBlockCache(), stats, store.getStats());
+      results.add(hoplog);
+    }
+
+    return results;
+  }
+
+  private static int findMaxSequenceNumber(List<Hoplog> hoplogs) throws IOException {
+    int maxSeq = 0;
+    for (Hoplog hoplog : hoplogs) {
+      maxSeq = Math.max(maxSeq, getSequenceNumber(hoplog));
+    }
+    return maxSeq;
+  }
+
+  /**
+   * @return the sequence number associate with a hoplog file
+   */
+  static int getSequenceNumber(Hoplog hoplog) {
+    Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(hoplog.getFileName());
+    boolean matched = matcher.find();
+    assert matched;
+    return Integer.valueOf(matcher.group(3));
+  }
+
+  protected FileStatus[] getExpiryMarkers() throws IOException {
+    FileSystem fs = store.getFileSystem();
+    if (hoplogReadersController.hoplogs == null
+        || hoplogReadersController.hoplogs.size() == 0) {
+      // there are no hoplogs in the system. May be the bucket is not existing
+      // at all.
+      if (!fs.exists(bucketPath)) {
+        if (logger.isDebugEnabled())
+          logger.debug("{}This bucket is unused, skipping expired hoplog check", logPrefix);
+        return null;
+      }
+    }
+    
+    FileStatus files[] = FSUtils.listStatus(fs, bucketPath, new PathFilter() {
+      @Override
+      public boolean accept(Path file) {
+        // All expired hoplog end with expire extension and must match the valid file regex
+        String fileName = file.getName();
+        if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
+          return false;
+        }
+        fileName = truncateExpiryExtension(fileName);
+        Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(fileName);
+        return matcher.find();
+      }
+
+    });
+    return files;
+  }
+  
+  @Override
+  public void clear() throws IOException {
+    //Suspend compaction while we are doing the clear. This
+    //aborts the currently in progress compaction.
+    getCompactor().suspend();
+    
+    // while compaction is suspended, clear method marks hoplogs for deletion
+    // only. Files will be removed by cleanup thread after active gets and
+    // iterations are completed
+    String user = logger.isDebugEnabled() ? "clear" : null;
+    List<TrackedReference<Hoplog>> oplogs = null;
+    try {
+      oplogs = hoplogReadersController.getTrackedSortedOplogList(user);
+      markSortedOplogForDeletion(oplogs, true);
+    } finally {
+      if (oplogs != null) {
+        hoplogReadersController.releaseHoplogs(oplogs, user);
+      }
+      //Resume compaction
+      getCompactor().resume();
+    }
+  }
+
+  /**
+   * Performs the following activities
+   * <UL>
+   * <LI>Submits compaction requests as needed
+   * <LI>Deletes tmp files which the system failed to removed earlier
+   */
+  @Override
+  public void performMaintenance() throws IOException {
+    long startTime = System.currentTimeMillis();
+    
+    if (logger.isDebugEnabled())
+      logger.debug("{}Executing bucket maintenance", logPrefix);
+
+    submitCompactionRequests();
+    hoplogReadersController.closeInactiveHoplogs();
+    initiateCleanup();
+    
+    cleanupTmpFiles();
+    
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}Time spent in bucket maintenance (in ms): "
+          + (System.currentTimeMillis() - startTime), logPrefix);
+    }
+  }
+
+  @Override
+  public Future<CompactionStatus> forceCompaction(boolean isMajor) {
+    CompactionRequest request = new CompactionRequest(regionFolder, bucketId,
+        getCompactor(), isMajor, true/*force*/);
+    return HDFSCompactionManager.getInstance(store).submitRequest(request);
+  }
+
+  private Future<CompactionStatus> forceCompactionOnVersionUpgrade() {
+    CompactionRequest request = new CompactionRequest(regionFolder, bucketId, getCompactor(), true, true, true);
+    return HDFSCompactionManager.getInstance(store).submitRequest(request);
+  }
+
+  @Override
+  public long getLastMajorCompactionTimestamp() {
+    long ts = 0;
+    String user = logger.isDebugEnabled() ? "StoredProc" : null;
+    List<TrackedReference<Hoplog>> hoplogs = hoplogReadersController.getTrackedSortedOplogList(user);
+    try {
+      for (TrackedReference<Hoplog> hoplog : hoplogs) {
+        String fileName = hoplog.get().getFileName();
+        Matcher file = HOPLOG_NAME_PATTERN.matcher(fileName);
+        if (file.matches() && fileName.endsWith(MAJOR_HOPLOG_EXTENSION)) {
+          ts = getHoplogTimestamp(file);
+          break;
+        }
+      }
+    } finally {
+      hoplogReadersController.releaseHoplogs(hoplogs, user);
+    }
+    if (logger.isDebugEnabled()) {
+      logger.debug("{}HDFS: for bucket:"+getRegionBucketStr()+" returning last major compaction timestamp "+ts, logPrefix);
+    }
+    return ts;
+  }
+
+  private void initOldTmpFiles() throws IOException {
+    FileSystem fs = store.getFileSystem();
+    if (! fs.exists(bucketPath)) {
+      return;
+    }
+    
+    oldTmpFiles = new LinkedList<FileStatus>(Arrays.asList(fs.listStatus(bucketPath, new TmpFilePathFilter())));
+  }
+  
+  private void cleanupTmpFiles() throws IOException {
+    if(oldTmpFiles == null && tmpFiles == null) {
+      return;
+    }
+    
+    if (oldTmpFiles != null) {
+      FileSystem fs = store.getFileSystem();
+      long now = System.currentTimeMillis();
+      for (Iterator<FileStatus> itr = oldTmpFiles.iterator(); itr.hasNext();) {
+        FileStatus file = itr.next();
+        if(file.getModificationTime() + TMP_FILE_EXPIRATION_TIME_MS > now) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("{}Deleting temporary file:" + file.getPath(), logPrefix);
+          }
+          fs.delete(file.getPath(), false);
+          itr.remove();
+        }
+      }
+    }
+    if (tmpFiles != null) {
+      for (Hoplog so : tmpFiles.keySet()) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("{}Deleting temporary file:" + so.getFileName(), logPrefix);
+        }
+        deleteTmpFile(null, so);
+      }
+    }
+  }
+  
+  /**
+   * Executes tiered compaction of hoplog files. One instance of compacter per bucket will exist
+   */
+  protected class HoplogCompactor implements Compactor {
+    private volatile boolean suspend = false;
+    
+    // the following boolean will be used to synchronize minor compaction
+    private AtomicBoolean isMinorCompactionActive = new AtomicBoolean(false);
+    // the following boolean will be used to synchronize major compaction
+    private AtomicBoolean isMajorCompactionActive = new AtomicBoolean(false);
+    // the following integer tracks the max sequence number amongst the
+    // target files being major compacted. This value will be used to prevent
+    // concurrent MajorC and minorC. MinorC is preempted in case of an
+    // overlap. This object is also used as a lock. The lock is acquired before
+    // identifying compaction targets and before marking targets for expiry
+    final AtomicInteger maxMajorCSeqNum = new AtomicInteger(-1);
+
+    @Override
+    public void suspend() {
+      long wait = Long.getLong(HoplogConfig.SUSPEND_MAX_WAIT_MS, HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
+      this.suspend=true;
+      //this forces the compact method to finish.
+      while (isMajorCompactionActive.get() || isMinorCompactionActive.get()) {
+        if (wait < 0) {
+          wait = Long.getLong(HoplogConfig.SUSPEND_MAX_WAIT_MS, HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
+          String act = isMajorCompactionActive.get() ? "MajorC" : "MinorC";
+          logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_SUSPEND_OF_0_FAILED_IN_1, new Object[] {act, wait}));
+          break;
+        }
+        try {
+          TimeUnit.MILLISECONDS.sleep(50);
+          wait -= 50;
+        } catch (InterruptedException e) {
+          break;
+        }
+      }
+    }
+    
+    @Override
+    public void resume() {
+      this.suspend = false;
+    }
+    
+    @Override
+    public boolean isBusy(boolean isMajor) {
+      if (isMajor) {
+        return isMajorCompactionActive.get();
+      } else {
+        return isMinorCompactionActive.get();
+      }
+    }
+    
+    /**
+     * compacts hoplogs. The method takes a minor or major compaction "lock" to
+     * prevent concurrent execution of compaction cycles. A possible improvement
+     * is to allow parallel execution of minor compaction if the sets of
+     * hoplogs being compacted are disjoint.
+     */
+    @Override
+    public boolean compact(boolean isMajor, boolean isForced) throws IOException {
+      if(suspend) {
+        return false;
+      }
+
+      String extension = null;
+      IOOperation compactionStats = null;
+      long startTime = 0; 
+      final AtomicBoolean lock;
+      Hoplog compactedHoplog = null;
+      List<TrackedReference<Hoplog>> targets = null;
+      String user = logger.isDebugEnabled() ? (isMajor ? "MajorC" : "MinorC") : null;
+      
+      if (isMajor) {
+        lock = isMajorCompactionActive;
+        extension = MAJOR_HOPLOG_EXTENSION;
+        compactionStats = stats.getMajorCompaction();
+      } else {
+        lock = isMinorCompactionActive;
+        extension = MINOR_HOPLOG_EXTENSION;
+        compactionStats = stats.getMinorCompaction();
+      }
+
+      // final check before beginning compaction. Return if compaction is active
+      if (! lock.compareAndSet(false, true)) {
+        if (isMajor) {
+          if (logger.isDebugEnabled())
+            logger.debug("{}Major compaction already active. Ignoring new request", logPrefix);
+        } else {
+          if (logger.isDebugEnabled())
+            logger.debug("Minor compaction already active. Ignoring new request", logPrefix);
+        }
+        return false;
+      }
+      
+      try {
+        if(suspend) {
+          return false;
+        }
+        
+        // variables for updating stats
+        startTime = compactionStats.begin();
+        
+        int seqNum = -1;
+        int lastKnownMajorCSeqNum;
+        synchronized (maxMajorCSeqNum) {
+          lastKnownMajorCSeqNum = maxMajorCSeqNum.get();
+          targets = hoplogReadersController.getTrackedSortedOplogList(user);
+          getCompactionTargets(isMajor, targets, lastKnownMajorCSeqNum);
+          if (targets != null && targets.size() > 0) {
+            targets = Collections.unmodifiableList(targets);
+            seqNum = getSequenceNumber(targets.get(0).get());
+            if (isMajor) {
+              maxMajorCSeqNum.set(seqNum);
+            }
+          }
+        }
+        
+        if (targets == null || targets.isEmpty() || (!isMajor && targets.size() == 1 && !isForced)) {
+          if (logger.isDebugEnabled()){
+            logger.debug("{}Skipping compaction, too few hoplops to compact. Major?" + isMajor, logPrefix);
+          }
+            
+          compactionStats.end(0, startTime);
+          return true;
+        }
+        
+        //In case that we only have one major compacted file, we don't need to run major compaction to
+        //generate a copy of the same content
+        if (targets.size() == 1 && !isForced) {
+        String hoplogName = targets.get(0).get().getFileName();
+          if (hoplogName.endsWith(MAJOR_HOPLOG_EXTENSION)){
+            if (logger.isDebugEnabled()){
+              logger.debug("{}Skipping compaction, no need to compact a major compacted file. Major?" + isMajor, logPrefix);
+            }
+            compactionStats.end(0, startTime);
+            return true;
+          }
+        }
+        
+        if (logger.isDebugEnabled()) {
+          for (TrackedReference<Hoplog> target : targets) {
+            if (logger.isDebugEnabled()) {
+              fineLog("Target:", target, " size:", target.get().getSize());
+            }
+          }
+        }
+        
+        // Create a temporary hoplog for compacted hoplog. The compacted hoplog
+        // will have the seq number same as that of youngest target file. Any
+        // hoplog younger than target hoplogs will have a higher sequence number
+        compactedHoplog = getTmpSortedOplog(seqNum, extension);
+        
+        long byteCount;
+        try {
+          byteCount = fillCompactionHoplog(isMajor, targets, compactedHoplog, lastKnownMajorCSeqNum);
+          compactionStats.end(byteCount, startTime);
+        } catch (InterruptedException e) {
+          if (logger.isDebugEnabled())
+            logger.debug("{}Compaction execution suspended", logPrefix);
+          compactionStats.error(startTime);
+          return false;
+        } catch (ForceReattemptException e) {
+          if (logger.isDebugEnabled())
+            logger.debug("{}Compaction execution suspended", logPrefix);
+          compactionStats.error(startTime);
+          return false;
+        }
+        
+        // creation of compacted hoplog completed, its time to use it for
+        // reading. Before using it, make sure minorC and mojorC were not
+        // executing on overlapping sets of files. All targets can be marked for
+        // expiration. Notify listener if configured. Update bucket size
+        synchronized (maxMajorCSeqNum) {
+          if (!isMajor && isMinorMajorOverlap(targets, maxMajorCSeqNum.get())) {
+            // MajorC is higher priority. In case of any overlap kill minorC
+            if (logger.isDebugEnabled())
+              logger.debug("{}Interrupting MinorC for a concurrent MajorC", logPrefix);
+            compactionStats.error(startTime);
+            return false;
+          }
+          addSortedOplog(compactedHoplog, true, false);
+          markSortedOplogForDeletion(targets, true);
+        }
+      } catch (IOException e) {
+        compactionStats.error(startTime);
+        throw e;
+      } finally {
+        if (isMajor) {
+          maxMajorCSeqNum.set(-1);
+        }
+        lock.set(false);
+        hoplogReadersController.releaseHoplogs(targets, user);
+      }
+      
+      incrementDiskUsage(compactedHoplog.getSize());
+      reEstimateBucketSize();
+      
+      notifyCompactionListeners(isMajor);
+      return true;
+    }
+
+    /**
+     * Major compaction compacts all files. Seq number of the youngest file
+     * being MajorCed is known. If MinorC is operating on any file with a seq
+     * number less than this number, there is a overlap
+     * @param num 
+     */
+    boolean isMinorMajorOverlap(List<TrackedReference<Hoplog>> targets, int num) {
+      if (num < 0 || targets == null || targets.isEmpty()) {
+        return false;
+      }
+
+      for (TrackedReference<Hoplog> hop : targets) {
+        if (getSequenceNumber(hop.get()) <= num) {
+          return true;
+        }
+      }
+      
+      return false;
+    }
+
+    /**
+     * Iterates over targets and writes eligible targets to the output hoplog.
+     * Handles creation of iterators and writer and closing it in case of
+     * errors.
+     */
+    public long fillCompactionHoplog(boolean isMajor,
+        List<TrackedReference<Hoplog>> targets, Hoplog output, int majorCSeqNum)
+        throws IOException, InterruptedException, ForceReattemptException {
+
+      HoplogWriter writer = null;
+      ICardinality localHLL = new HyperLogLog(HLL_CONSTANT);
+      HoplogSetIterator mergedIter = null;
+      int byteCount = 0;
+      
+      try {
+        // create a merged iterator over the targets and write entries into
+        // output hoplog
+        mergedIter = new HoplogSetIterator(targets);
+        writer = output.createWriter(mergedIter.getRemainingEntryCount());
+
+        boolean interrupted = false;
+        for (; mergedIter.hasNext(); ) {
+          if (suspend) {
+            interrupted = true;
+            break;
+          } else if (!isMajor &&  maxMajorCSeqNum.get() > majorCSeqNum) {
+            // A new major compaction cycle is starting, quit minorC to avoid
+            // duplicate work and missing deletes
+            if (logger.isDebugEnabled())
+              logger.debug("{}Preempting MinorC, new MajorC cycle detected ", logPrefix);
+            interrupted = true;
+            break;
+          }
+
+          mergedIter.nextBB();
+          
+          ByteBuffer k = mergedIter.getKeyBB();
+          ByteBuffer v = mergedIter.getValueBB();
+          
+          boolean isDeletedEntry = isDeletedEntry(v.array(), v.arrayOffset());
+          if (isMajor && isDeletedEntry) {
+            // its major compaction, time to ignore deleted entries
+            continue;
+          }
+
+          if (!isDeletedEntry) {
+            int hash = MurmurHash.hash(k.array(), k.arrayOffset(), k.remaining(), -1);
+            localHLL.offerHashed(hash);
+          }
+
+          writer.append(k, v);
+          byteCount += (k.remaining() + v.remaining());
+        }
+
+        mergedIter.close();
+        mergedIter = null;
+
+        writer.close(buildMetaData(localHLL));
+        writer = null;
+
+        if (interrupted) {
+          // If we suspended compaction operations, delete the partially written
+          // file and return.
+          output.delete();
+          throw new InterruptedException();
+        }
+        
+        // ping secondaries before making the file a legitimate file to ensure 
+        // that in case of split brain, no other vm has taken up as primary. #50110. 
+        pingSecondaries();
+        
+        makeLegitimate(output);
+        return byteCount;
+      } catch (IOException e) {
+        e = handleWriteHdfsIOError(writer, output, e);
+        writer = null;
+        throw e;
+      } catch (ForceReattemptException e) {
+        output.delete();
+        throw e;
+      }finally {
+        if (mergedIter != null) {
+          mergedIter.close();
+        }
+
+        if (writer != null) {
+          writer.close();
+        }
+      }
+    }
+
+    /**
+     * identifies targets. For major compaction all sorted oplogs will be
+     * iterated on. For minor compaction, policy driven fewer targets will take
+     * place.
+     */
+    protected void getCompactionTargets(boolean major,
+        List<TrackedReference<Hoplog>> targets, int majorCSeqNum) {
+      if (!major) {
+        getMinorCompactionTargets(targets, majorCSeqNum);
+      }
+    }
+
+    /**
+     * list of oplogs most suitable for compaction. The alogrithm selects m
+     * smallest oplogs which are not bigger than X in size. Null if valid
+     * candidates are not found
+     */
+    void getMinorCompactionTargets(List<TrackedReference<Hoplog>> targets, int majorCSeqNum) 
+    {
+      List<TrackedReference<Hoplog>> omittedHoplogs = new ArrayList<TrackedReference<Hoplog>>();
+
+      // reverse the order of hoplogs in list. the oldest file becomes the first file.
+      Collections.reverse(targets);
+
+      // hoplog greater than this size will not be minor-compacted
+      final long MAX_COMPACTION_FILE_SIZE;
+      // maximum number of files to be included in any compaction cycle
+      final int MAX_FILE_COUNT_COMPACTION;
+      // minimum number of files that must be present for compaction to be worth
+      final int MIN_FILE_COUNT_COMPACTION;
+      
+      MAX_COMPACTION_FILE_SIZE = ((long)store.getInputFileSizeMax()) * 1024 *1024;
+      MAX_FILE_COUNT_COMPACTION = store.getInputFileCountMax();
+      MIN_FILE_COUNT_COMPACTION = store.getInputFileCountMin();
+
+      try {
+        // skip till first file smaller than the max compaction file size is
+        // found. And if MajorC is active, move to a file which is also outside
+        // scope of MajorC
+        for (Iterator<TrackedReference<Hoplog>> iterator = targets.iterator(); iterator.hasNext();) {
+          TrackedReference<Hoplog> oplog = iterator.next();
+          if (majorCSeqNum >= getSequenceNumber(oplog.get())) {
+            iterator.remove();
+            omittedHoplogs.add(oplog);
+            if (logger.isDebugEnabled()){
+              fineLog("Overlap with MajorC, excluding hoplog " + oplog.get());
+            }
+            continue;
+          }
+          
+          if (oplog.get().getSize() > MAX_COMPACTION_FILE_SIZE || oplog.get().getFileName().endsWith(MAJOR_HOPLOG_EXTENSION)) {
+          // big file will not be included for minor compaction
+          // major compacted file will not be converted to minor compacted file
+            iterator.remove();
+            omittedHoplogs.add(oplog);
+            if (logger.isDebugEnabled()) {
+              fineLog("Excluding big hoplog from minor cycle:",
+                  oplog.get(), " size:", oplog.get().getSize(), " limit:",
+                  MAX_COMPACTION_FILE_SIZE);
+            }
+          } else {
+            // first small hoplog found, skip the loop
+            break;
+          }
+        }
+
+        // If there are too few files no need to perform compaction
+        if (targets.size() < MIN_FILE_COUNT_COMPACTION) {
+          if (logger.isDebugEnabled()){
+            logger.debug("{}Too few hoplogs for minor cycle:" + targets.size(), logPrefix);
+          }
+          omittedHoplogs.addAll(targets);
+          targets.clear();
+          return;
+        }
+        
+        float maxGain = Float.MIN_VALUE;
+        int bestFrom = -1; 
+        int bestTo = -1; 
+        
+        // for listSize=5 list, minFile=3; maxIndex=5-3. 
+        // so from takes values 0,1,2
+        int maxIndexForFrom = targets.size() - MIN_FILE_COUNT_COMPACTION;
+        for (int from = 0; from <= maxIndexForFrom ; from++) {
+          // for listSize=6 list, minFile=3, maxFile=5; minTo=0+3-1, maxTo=0+5-1
+          // so to takes values 2,3,4
+          int minIndexForTo = from + MIN_FILE_COUNT_COMPACTION - 1;
+          int maxIndexForTo = Math.min(from + MAX_FILE_COUNT_COMPACTION, targets.size());
+          
+          for (int i = minIndexForTo; i < maxIndexForTo; i++) {
+            Float gain = computeGain(from, i, targets);
+            if (gain == null) {
+              continue;
+            }
+            
+            if (gain > maxGain) {
+              maxGain = gain;
+              bestFrom = from;
+              bestTo = i;
+            }
+          }
+        }
+        
+        if (bestFrom == -1) {
+          if (logger.isDebugEnabled())
+            logger.debug("{}Failed to find optimal target set for MinorC", logPrefix);
+          omittedHoplogs.addAll(targets);
+          targets.clear();
+          return;
+        }
+
+        if (logger.isDebugEnabled()) {
+          fineLog("MinorCTarget optimal result from:", bestFrom, " to:", bestTo);
+        }
+
+        // remove hoplogs they do not fall in the bestFrom-bestTo range
+        int i = 0;
+        for (Iterator<TrackedReference<Hoplog>> iter = targets.iterator(); iter.hasNext();) {
+          TrackedReference<Hoplog> hop = iter.next();
+          if (i < bestFrom || i > bestTo) {
+            iter.remove();
+            omittedHoplogs.add(hop);
+          }
+          i++;
+        }
+      } finally {
+        // release readers of targets not included in the compaction cycle 
+        String user = logger.isDebugEnabled() ? "MinorC" : null;
+        hoplogReadersController.releaseHoplogs(omittedHoplogs, user);
+      }
+      
+      // restore the order, youngest file is the first file again
+      Collections.reverse(targets);
+    }
+
+    @Override
+    public HDFSStore getHdfsStore() {
+      return store;
+    }
+  }
+  
+  Float computeGain(int from, int to, List<TrackedReference<Hoplog>> targets) {
+    double SIZE_64K = 64.0 * 1024;
+    // TODO the base for log should depend on the average number of keys a index block will contain
+    double LOG_BASE = Math.log(AVG_NUM_KEYS_PER_INDEX_BLOCK);
+    
+    long totalSize = 0;
+    double costBefore = 0f;
+    for (int i = from; i <= to; i++) {
+      long size = targets.get(i).get().getSize();
+      if (size == 0) {
+        continue;
+      }
+      totalSize += size;
+      
+      // For each hoplog file, read cost is number of index block reads and 1
+      // data block read. Index blocks on an average contain N keys and are
+      // organized in a N-ary tree structure. Hence the number of index block
+      // reads will be logBaseN(number of data blocks)
+      costBefore += Math.ceil(Math.max(1.0, Math.log(size / SIZE_64K) / LOG_BASE)) + 1;
+    }
+    
+    // if the first file is relatively too large this set is bad for compaction
+    long firstFileSize = targets.get(from).get().getSize();
+    if (firstFileSize > (totalSize - firstFileSize) * RATIO) {
+      if (logger.isDebugEnabled()){
+        fineLog("First file too big:", firstFileSize, " totalSize:", totalSize);
+      }
+      return null;
+    }
+        
+    // compute size in mb so that the value of gain is in few decimals
+    long totalSizeInMb = totalSize / 1024 / 1024;
+    if (totalSizeInMb == 0) {
+      // the files are tooooo small, just return the count. The more we compact
+      // the better it is
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Total size too small:" +totalSize, logPrefix);
+      }
+      return (float) costBefore;
+    }
+    
+    double costAfter = Math.ceil(Math.log(totalSize / SIZE_64K) / LOG_BASE) + 1;
+    return (float) ((costBefore - costAfter) / totalSizeInMb);
+  }
+  
+  /**
+   * Hoplog readers are accessed asynchronously. There could be a window in
+   * which, while a hoplog is being iterated on, it gets compacted and becomes
+   * expired or inactive. The reader of the hoplog must not be closed till the
+   * iterator completes. All such scenarios will be managed by this class. It
+   * will keep all the reader, active and inactive, and reference counter to the
+   * readers. An inactive reader will be closed if the reference count goes down
+   * to 0.
+   * 
+   * One important point, only compaction process makes a hoplog inactive.
+   * Compaction process in a bucket is single threaded. So compaction itself
+   * will not face race condition. Read and scan operations on the bucket will
+   * be affected. So reference counter is incremented for each read and scan.
+   * 
+   */
+  private class HoplogReadersController implements HoplogReaderActivityListener {
+    private Integer maxOpenFilesLimit;
+
+    // sorted collection of all the active oplog files associated with this bucket. Instead of a
+    // queue, a set is used. New files created as part of compaction may be inserted after a few
+    // hoplogs were created. The compacted file is such a case but should not be treated newest.
+    private final ConcurrentSkipListSet<TrackedReference<Hoplog>> hoplogs;
+    
+    // list of all the hoplogs that have been compacted and need to be closed
+    // once the reference count reduces to 0
+    private final ConcurrentHashSet<TrackedReference<Hoplog>> inactiveHoplogs;
+    
+    // ReadWriteLock on list of oplogs to allow for consistent reads and scans
+    // while hoplog set changes. A write lock is needed on completion of
+    // compaction, addition of a new hoplog or on receiving updates message from
+    // other GF nodes
+    private final ReadWriteLock hoplogRWLock = new ReentrantReadWriteLock(true);
+
+    // tracks the number of active readers for hoplogs of this bucket
+    private AtomicInteger activeReaderCount = new AtomicInteger(0);
+    
+    public HoplogReadersController() {
+      HoplogComparator comp = new HoplogComparator();
+      hoplogs = new ConcurrentSkipListSet<TrackedReference<Hoplog>>(comp) {
+        private static final long serialVersionUID = 1L;
+
+        @Override
+        public boolean add(TrackedReference<Hoplog> e) {
+          // increment number of hoplogs active for this bucket
+          boolean result =  super.add(e);
+          if (result)
+            stats.incActiveFiles(1);
+          return result;
+        }
+        
+        @Override
+        public boolean remove(Object o) {
+          // decrement the number of hoplogs active for this bucket
+          boolean result =  super.remove(o);
+          if (result)
+            stats.incActiveFiles(-1);
+          return result;
+        }
+      };
+      
+      inactiveHoplogs = new ConcurrentHashSet<TrackedReference<Hoplog>>() {
+        private static final long serialVersionUID = 1L;
+        
+        @Override
+        public boolean add(TrackedReference<Hoplog> e) {
+          boolean result =  super.add(e);
+          if (result)
+            stats.incInactiveFiles(1);
+          return result;
+        }
+        
+        @Override
+        public boolean remove(Object o) {
+          boolean result =  super.remove(o);
+          if (result)
+            stats.incInactiveFiles(-1);
+          return result;
+        }
+      };
+      
+      maxOpenFilesLimit = Integer.getInteger(
+          HoplogConfig.BUCKET_MAX_OPEN_HFILES_CONF,
+          HoplogConfig.BUCKET_MAX_OPEN_HFILES_DEFAULT);
+    }
+    
+    Hoplog getOldestHoplog() {
+      if (hoplogs.isEmpty()) {
+        return null;
+      }
+      return hoplogs.last().get();
+    }
+
+    /**
+     * locks sorted oplogs collection and performs add operation
+     * @return if addition was successful
+     */
+    private boolean addSortedOplog(Hoplog so) throws IOException {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Try add " + so, logPrefix);
+      }
+      hoplogRWLock.writeLock().lock();
+      try {
+        int size = hoplogs.size();
+        boolean result = hoplogs.add(new TrackedReference<Hoplog>(so));
+        so.setReaderActivityListener(this);
+        if (logger.isDebugEnabled()){
+          fineLog("Added: ", so, " Before:", size, " After:", hoplogs.size());
+        }
+        return result;
+      } finally {
+        hoplogRWLock.writeLock().unlock();
+      }
+    }
+    
+    /**
+     * locks sorted oplogs collection and performs remove operation and updates readers also
+     */
+    private void removeSortedOplog(TrackedReference<Hoplog> so) throws IOException {
+      if (logger.isDebugEnabled()) {
+        logger.debug("Try remove " + so, logPrefix);
+      }
+      hoplogRWLock.writeLock().lock();
+      try {
+        int size = hoplogs.size();
+        boolean result = hoplogs.remove(so);
+        if (result) {
+          inactiveHoplogs.add(so);
+          if (logger.isDebugEnabled()) {
+            fineLog("Removed: ", so, " Before:", size, " After:", hoplogs.size());
+          }
+        } else {
+          if (inactiveHoplogs.contains(so)) {
+            if (logger.isDebugEnabled()) {
+              logger.debug("{}Found a missing active hoplog in inactive list." + so, logPrefix);
+            }
+          } else {
+            so.get().close();
+            logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_MISSING_IN_BUCKET_FORCED_CLOSED, so.get()));
+          }
+        }
+      } finally {
+        hoplogRWLock.writeLock().unlock();
+      }
+    }
+    
+    private  void closeInactiveHoplogs() throws IOException {
+      hoplogRWLock.writeLock().lock();
+      try {
+        for (TrackedReference<Hoplog> hoplog : inactiveHoplogs) {
+          if (logger.isDebugEnabled()){
+            logger.debug("{}Try close inactive " + hoplog, logPrefix);
+          }
+
+          if (!hoplog.inUse()) {
+            int size = inactiveHoplogs.size();            
+            inactiveHoplogs.remove(hoplog);
+            closeReaderAndSuppressError(hoplog.get(), true);
+            if (logger.isDebugEnabled()){
+              fineLog("Closed inactive: ", hoplog.get(), " Before:", size,
+                  " After:", inactiveHoplogs.size());
+            }
+          }
+        }
+      } finally {
+        hoplogRWLock.writeLock().unlock();
+      }
+    }
+    
+    /**
+     * @param target
+     *          name of the hoplog file
+     * @return trackedReference if target exists in inactive hoplog list.
+     * @throws IOException
+     */
+    TrackedReference<Hoplog> getInactiveHoplog(String target) throws IOException {
+      hoplogRWLock.writeLock().lock();
+      try {
+        for (TrackedReference<Hoplog> hoplog : inactiveHoplogs) {
+          if (hoplog.get().getFileName().equals(target)) {
+            if (logger.isDebugEnabled()) {
+              logger.debug("{}Target found in inactive hoplogs list: " + hoplog, logPrefix);
+            }
+            return hoplog;
+          }
+        }
+        if (logger.isDebugEnabled()){
+          logger.debug("{}Target not found in inactive hoplogs list: " + target, logPrefix);
+        }
+        return null;
+      } finally {
+        hoplogRWLock.writeLock().unlock();
+      }
+    }
+    
+    /**
+     * force closes all readers
+     */
+    public void close() throws IOException {
+      hoplogRWLock.writeLock().lock();
+      try {
+        for (TrackedReference<Hoplog> hoplog : hoplogs) {
+          closeReaderAndSuppressError(hoplog.get(), true);
+        }
+        
+        for (TrackedReference<Hoplog> hoplog : inactiveHoplogs) {
+          closeReaderAndSuppressError(hoplog.get(), true);
+        }
+      } finally {
+        hoplogs.clear();
+        inactiveHoplogs.clear();
+        hoplogRWLock.writeLock().unlock();
+      }
+    }
+    
+    /**
+     * locks hoplogs to create a snapshot of active hoplogs. reference of each
+     * reader is incremented to keep it from getting closed
+     * 
+     * @return ordered list of sorted oplogs
+     */
+    private List<TrackedReference<Hoplog>> getTrackedSortedOplogList(String user) {
+      List<TrackedReference<Hoplog>> oplogs = new ArrayList<TrackedReference<Hoplog>>();
+      hoplogRWLock.readLock().lock();
+      try {
+        for (TrackedReference<Hoplog> oplog : hoplogs) {
+          oplog.increment(user);
+          oplogs.add(oplog);
+          if (logger.isDebugEnabled()) {
+            logger.debug("{}Track ref " + oplog, logPrefix);
+          }
+        }
+      } finally {
+        hoplogRWLock.readLock().unlock();
+      }
+      return oplogs;
+    }
+
+    private TrackedReference<Hoplog> trackHoplog(Hoplog hoplog, String user) {
+      hoplogRWLock.readLock().lock();
+      try {
+        for (TrackedReference<Hoplog> oplog : hoplogs) {
+          if (oplog.get().getFileName().equals(hoplog.getFileName())) {
+            oplog.increment(user);
+            if (logger.isDebugEnabled()) {
+              logger.debug("{}Track " + oplog, logPrefix);
+            }
+            return oplog;
+          }
+        }
+      } finally {
+        hoplogRWLock.readLock().unlock();
+      }
+      throw new NoSuchElementException(hoplog.getFileName());
+    }
+    
+    public void releaseHoplogs(List<TrackedReference<Hoplog>> targets, String user) {
+      if (targets == null) {
+        return;
+      }
+      
+      for (int i = targets.size() - 1; i >= 0; i--) {
+        TrackedReference<Hoplog> hoplog = targets.get(i);
+        releaseHoplog(hoplog, user);
+      }
+    }
+
+    public void releaseHoplog(TrackedReference<Hoplog> target, String user) {
+      if (target ==  null) {
+        return;
+      }
+      
+      target.decrement(user);
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Try release " + target, logPrefix);
+      }
+      if (target.inUse()) {
+        return;
+      }
+      
+      // there are no users of this hoplog. if it is inactive close it.
+      hoplogRWLock.writeLock().lock();
+      try {
+        if (!target.inUse()) {
+          if (inactiveHoplogs.contains(target) ) {
+            int sizeBefore = inactiveHoplogs.size();
+            inactiveHoplogs.remove(target);
+            closeReaderAndSuppressError(target.get(), true);
+            if (logger.isDebugEnabled()) {
+              fineLog("Closed inactive: ", target, " totalBefore:", sizeBefore,
+                  " totalAfter:", inactiveHoplogs.size());
+            }
+          } else if (hoplogs.contains(target)) {
+            closeExcessReaders();              
+          }
+        }
+      } catch (IOException e) {
+        logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_IO_ERROR, 
+            "Close reader: " + target.get().getFileName()), e);
+      } finally {
+        hoplogRWLock.writeLock().unlock();
+      }
+    }
+
+    /*
+     * detects if the total number of open hdfs readers is more than configured
+     * max file limit. In case the limit is exceeded, some readers need to be
+     * closed to avoid dadanode receiver overflow error.
+     */
+    private void closeExcessReaders() throws IOException {
+      if (logger.isDebugEnabled()) {
+        logger.debug("{}Close excess readers. Size:" + hoplogs.size()
+            + " activeReaders:" + activeReaderCount.get() + " limit:"
+            + maxOpenFilesLimit, logPrefix);
+      }
+
+      if (hoplogs.size() <= maxOpenFilesLimit) {
+        return;
+      }
+      
+      if (activeReaderCount.get() <= maxOpenFilesLimit) {
+        return;
+      }
+      
+      for (TrackedReference<Hoplog> hoplog : hoplogs.descendingSet()) {
+        if (!hoplog.inUse() && !hoplog.get().isClosed()) {
+          hoplog.get().close(false);
+          if (logger.isDebugEnabled()) {
+            logger.debug("{}Excess reader closed " + hoplog, logPrefix);
+          }
+        }
+        
+        if (activeReaderCount.get() <= maxOpenFilesLimit) {
+          return;
+        }
+      }
+    }
+
+    @Override
+    public void readerCreated() {
+      activeReaderCount.incrementAndGet();
+      stats.incActiveReaders(1);
+      if (logger.isDebugEnabled())
+        logger.debug("{}ActiveReader++", logPrefix);
+    }
+
+    @Override
+    public void readerClosed() {
+      activeReaderCount.decrementAndGet(); 
+      stats.incActiveReaders(-1);
+      if (logger.isDebugEnabled())
+        logger.debug("{}ActiveReader--", logPrefix);
+    }
+  }
+
+  /**
+   * returns an ordered list of oplogs, FOR TESTING ONLY
+   */
+  public List<TrackedReference<Hoplog>> getSortedOplogs() throws IOException {
+    List<TrackedReference<Hoplog>> oplogs = new ArrayList<TrackedReference<Hoplog>>();
+    for (TrackedReference<Hoplog> oplog : hoplogReadersController.hoplogs) {
+        oplogs.add(oplog);
+    }
+    return oplogs;
+  }
+
+  /**
+   * Merged iterator on a list of hoplogs. 
+   */
+  public class BucketIterator implements HoplogIterator<byte[], SortedHoplogPersistedEvent> {
+    // list of hoplogs to be iterated on.
+    final List<TrackedReference<Hoplog>> hoplogList;
+    HoplogSetIterator mergedIter;
+
+    public BucketIterator(List<TrackedReference<Hoplog>> hoplogs) throws IOException {
+      this.hoplogList = hoplogs;
+      try {
+        mergedIter = new HoplogSetIterator(this.hoplogList);
+        if (logger.isDebugEnabled()) {
+          for (TrackedReference<Hoplog> hoplog : hoplogs) {
+            logger.debug("{}BucketIter target hop:" + hoplog.get().getFileName(), logPrefix);
+          }
+        }
+      } catch (IllegalArgumentException e) {
+        if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
+          throw handleIOError((IOException) e.getCause());
+        } else {
+          throw e;
+        }
+      } catch (IOException e) {
+        throw handleIOError(e);
+      } catch (HDFSIOException e) {
+        throw handleIOError(e);
+      } 
+    }
+
+    @Override
+    public boolean hasNext() {
+      return mergedIter.hasNext();
+    }
+
+    @Override
+    public byte[] next() throws IOException {
+      try {
+        return HFileSortedOplog.byteBufferToArray(mergedIter.next());
+      } catch (IllegalArgumentException e) {
+        if (IOException.class.isAssignableFrom(e.getCause().getClass())) {
+          throw handleIOError((IOException) e.getCause());
+        } else {
+          throw e;
+        }
+      } catch (IOException e) {
+        throw handleIOError(e);
+      }  
+    }
+
+    @Override
+    public byte[] getKey() {
+      // merged iterator returns a byte[]. This needs to be deserialized to the object which was
+      // provided during flush operation
+      return HFileSortedOplog.byteBufferToArray(mergedIter.getKey());
+    }
+
+    @Override
+    public SortedHoplogPersistedEvent getValue() {
+      // merged iterator returns a byte[]. This needs to be deserialized to the
+      // object which was provided during flush operation
+      try {
+        return deserializeValue(HFileSortedOplog.byteBufferToArray(mergedIter.getValue()));
+      } catch (IOException e) {
+        throw new HDFSIOException("Failed to deserialize byte while iterating on partition", e);
+      }
+    }
+
+    @Override
+    public void remove() {
+      mergedIter.remove();
+    }
+
+    @Override
+    public void close() {
+      // TODO release the closed iterators early
+      String user = logger.isDebugEnabled() ? "Scan" : null;
+      hoplogReadersController.releaseHoplogs(hoplogList, user);
+    }
+  }
+  
+  /**
+   * This utility class is used to filter temporary hoplogs in a bucket
+   * directory
+   * 
+   */
+  private static class TmpFilePathFilter implements PathFilter {
+    @Override
+    public boolean accept(Path path) {
+      Matcher matcher = HOPLOG_NAME_PATTERN.matcher(path.getName());
+      if (matcher.matches() && path.getName().endsWith(TEMP_HOPLOG_EXTENSION)) {
+        return true;
+      }
+      return false;
+    }
+  }
+
+  private void fineLog(Object... strings) {
+    if (logger.isDebugEnabled()) {
+      StringBuffer sb = concatString(strings);
+      logger.debug(logPrefix + sb.toString());
+    }
+  }
+
+  private StringBuffer concatString(Object... strings) {
+    StringBuffer sb = new StringBuffer();
+    for (Object str : strings) {
+      sb.append(str.toString());
+    }
+    return sb;
+  }
+
+  @Override
+  public void compactionCompleted(String region, int bucket, boolean isMajor) {
+    // do nothing for compaction events. Hoplog Organizer depends on addition
+    // and deletion of hoplogs only
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java
new file mode 100644
index 0000000..e622749
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/Hoplog.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import com.gemstone.gemfire.internal.hll.ICardinality;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumMap;
+
+
+/**
+ * Ordered sequence file
+ */
+public interface Hoplog extends Closeable, Comparable<Hoplog>  {
+  public static final boolean NOP_WRITE = Boolean.getBoolean("Hoplog.NOP_WRITE");
+  
+  /** the gemfire magic number for sorted oplogs */
+  public static final byte[] MAGIC = new byte[] { 0x47, 0x53, 0x4F, 0x50 };
+
+  /**
+   * @return an instance of cached reader, creates one if does not exist
+   * @throws IOException
+   */
+  HoplogReader getReader() throws IOException;
+
+  /**
+   * Creates a new sorted writer.
+   * 
+   * @param keys
+   *          an estimate of the number of keys to be written
+   * @return the writer
+   * @throws IOException
+   *           error creating writer
+   */
+  HoplogWriter createWriter(int keys) throws IOException;
+
+  /**
+   * @param listener listener of reader's activity
+   */
+  void setReaderActivityListener(HoplogReaderActivityListener listener);
+  
+  /**
+   * @return file name
+   */
+  String getFileName();
+
+  /**
+   * @return Entry count estimate for this hoplog
+   */
+  public ICardinality getEntryCountEstimate() throws IOException;
+
+  /**
+   * renames the file to the input name
+   * 
+   * @throws IOException
+   */
+  void rename(String name) throws IOException;
+
+  /**
+   * Deletes the sorted oplog file
+   */
+  void delete() throws IOException;
+  
+  /**
+   * Returns true if the hoplog is closed for reads.
+   * @return true if closed
+   */
+  boolean isClosed();
+  
+  /**
+   * @param clearCache clear this sorted oplog's cache if true
+   * @throws IOException 
+   */
+  void close(boolean clearCache) throws IOException;
+  
+  /**
+   * @return the modification timestamp of the file
+   */
+  long getModificationTimeStamp();
+  
+  /**
+   * @return the size of file
+   */
+  long getSize();
+
+  /**
+   * Reads sorted oplog file.
+   */
+  public interface HoplogReader extends HoplogSetReader<byte[], byte[]> {
+    /**
+     * Returns a byte buffer based view of the value linked to the key
+     */
+    ByteBuffer get(byte[] key) throws IOException;
+
+    /**
+     * @return Returns the bloom filter associated with this sorted oplog file.
+     */
+    BloomFilter getBloomFilter() throws IOException;
+
+    /**
+     * @return number of KV pairs in the file, including tombstone entries
+     */
+    long getEntryCount();
+
+    /**
+     * Returns the {@link ICardinality} implementation that is useful for
+     * estimating the size of this Hoplog.
+     * 
+     * @return the cardinality estimator
+     */
+    ICardinality getCardinalityEstimator();
+  }
+
+  /**
+   * Provides hoplog's reader's activity related events to owners
+   * 
+   */
+  public interface HoplogReaderActivityListener {
+    /**
+     * Invoked when a reader is created and an active reader did not exist
+     * earlier
+     */
+    public void readerCreated();
+    
+    /**
+     * Invoked when an active reader is closed
+     */
+    public void readerClosed();
+  }
+
+  /**
+   * Writes key/value pairs in a sorted oplog file. Each entry that is appended must have a key that
+   * is greater than or equal to the previous key.
+   */
+  public interface HoplogWriter extends Closeable {
+    /**
+     * Appends another key and value. The key is expected to be greater than or equal to the last
+     * key that was appended.
+     * @param key
+     * @param value
+     */
+    void append(byte[] key, byte[] value) throws IOException;
+
+    /**
+     * Appends another key and value. The key is expected to be greater than or equal to the last
+     * key that was appended.
+     */
+    void append(ByteBuffer key, ByteBuffer value) throws IOException;
+
+    void close(EnumMap<Meta, byte[]> metadata) throws IOException;
+    
+    /**
+     * flushes all outstanding data into the OS buffers on all DN replicas 
+     * @throws IOException
+     */
+    void hsync() throws IOException;
+    
+    /**
+     * Gets the size of the data that has already been written
+     * to the writer.  
+     * 
+     * @return number of bytes already written to the writer
+     */
+    public long getCurrentSize() throws IOException; 
+  }
+
+  /**
+   * Identifies the gemfire sorted oplog versions.
+   */
+  public enum HoplogVersion {
+    V1;
+
+    /**
+     * Returns the version string as bytes.
+     * 
+     * @return the byte form
+     */
+    public byte[] toBytes() {
+      return name().getBytes();
+    }
+
+    /**
+     * Constructs the version from a byte array.
+     * 
+     * @param version
+     *          the byte form of the version
+     * @return the version enum
+     */
+    public static HoplogVersion fromBytes(byte[] version) {
+      return HoplogVersion.valueOf(new String(version));
+    }
+  }
+
+  /**
+   * Names the available metadata keys that will be stored in the sorted oplog.
+   */
+  public enum Meta {
+    /** identifies the soplog as a gemfire file, required */
+    GEMFIRE_MAGIC,
+
+    /** identifies the soplog version, required */
+    SORTED_OPLOG_VERSION,
+    
+    /** identifies the gemfire version the soplog was created with */
+    GEMFIRE_VERSION,
+
+    /** identifies the statistics data */
+    STATISTICS,
+
+    /** identifies the embedded comparator types */
+    COMPARATORS,
+    
+    /** identifies the pdx type data, optional */
+    PDX,
+
+    /**
+     * identifies the hyperLogLog byte[] which estimates the cardinality for
+     * only one hoplog
+     */
+    LOCAL_CARDINALITY_ESTIMATE,
+
+    /**
+     * represents the hyperLogLog byte[] after upgrading the constant from
+     * 0.1 to 0.03 (in gfxd 1.4)
+     */
+    LOCAL_CARDINALITY_ESTIMATE_V2
+    ;
+
+    /**
+     * Converts the metadata name to bytes.
+     */
+    public byte[] toBytes() {
+      return ("gemfire." + name()).getBytes();
+    }
+
+    /**
+     * Converts the byte form of the name to an enum.
+     * 
+     * @param key
+     *          the key as bytes
+     * @return the enum form
+     */
+    public static Meta fromBytes(byte[] key) {
+      return Meta.valueOf(new String(key).substring("gemfire.".length()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java
new file mode 100644
index 0000000..7b8415e
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogConfig.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+
+/**
+ * This interface defines all the hoplog configuration related constants. One
+ * location simplifies searching for a constant
+ * 
+ */
+public interface HoplogConfig {
+  // max number of open files per bucket. by default each region has 113
+  // buckets. A typical hdfs deployment has 5 DN each allowing 4096 open
+  // files. The intent is to use around 40 % of these and hence the default
+  // value is 72
+  public static final String BUCKET_MAX_OPEN_HFILES_CONF = "hoplog.bucket.max.open.files";
+  public final Integer BUCKET_MAX_OPEN_HFILES_DEFAULT = 72;
+  
+  public static final String HFILE_BLOCK_SIZE_CONF = "hoplog.hfile.block.size";
+  
+  // Region maintenance activity interval. default is 2 mins
+  public static final String JANITOR_INTERVAL_SECS = "hoplog.janitor.interval.secs";
+  public static final long JANITOR_INTERVAL_SECS_DEFAULT = 120l;
+  
+  // Maximum number of milliseconds to wait for suspension action to complete
+  public static final String SUSPEND_MAX_WAIT_MS = "hoplog.suspend.max.wait.ms";
+  public static final long SUSPEND_MAX_WAIT_MS_DEFAULT = 1000l;
+  
+  // Compaction request queue limit configuraiton
+  public static final String COMPCATION_QUEUE_CAPACITY = "hoplog.compaction.queue.capacity";
+  public static final int COMPCATION_QUEUE_CAPACITY_DEFAULT = 500;
+  
+  // Compaction request queue limit configuraiton
+  public static final String COMPACTION_FILE_RATIO = "hoplog.compaction.file.ratio";
+  public static final float COMPACTION_FILE_RATIO_DEFAULT = 1.3f;
+  
+  //Amount of time before deleting old temporary files
+  public static final String TMP_FILE_EXPIRATION = "hoplog.tmp.file.expiration.ms";
+  public static final long TMP_FILE_EXPIRATION_DEFAULT = 10 * 60 * 1000;
+  
+  // If this property is set as true, GF will let DFS client cache FS objects
+  public static final String USE_FS_CACHE = "hoplog.use.fs.cache";
+
+  // If set hdfs store will be able to connect to local file System
+  public static final String ALLOW_LOCAL_HDFS_PROP = "hoplog.ALLOW_LOCAL_HDFS";
+  
+  // The following constants are used to read kerberos authentication related
+  // configuration. Currently these configurations are provided as client config
+  // file while hdfs store is created
+  public static final String KERBEROS_PRINCIPAL = "gemfirexd.kerberos.principal";
+  public static final String KERBEROS_KEYTAB_FILE= "gemfirexd.kerberos.keytab.file";
+  public static final String PERFORM_SECURE_HDFS_CHECK_PROP = "gemfire.PERFORM_SECURE_HDFS_CHECK";
+  
+  // clean up interval file that exposed to MapReduce jobs
+  public static final String CLEAN_UP_INTERVAL_FILE_NAME = "cleanUpInterval";
+  // Compression settings
+  public static final String COMPRESSION = "hoplog.compression.algorithm";
+  public static final String COMPRESSION_DEFAULT = "NONE";
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java
new file mode 100644
index 0000000..7c3de03
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HoplogListener.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
+
+import java.io.IOException;
+
+/**
+ * Defines an observer of asynchronous operations on sorted oplog files associated with a bucket.
+ */
+public interface HoplogListener {
+  /**
+   * Notifies creation of new sorted oplog files. A new file will be created after compaction or
+   * other bucket maintenance activities
+   * 
+   * @throws IOException
+   */
+  void hoplogCreated(String regionFolder, int bucketId, Hoplog... oplogs) throws IOException;
+
+  /**
+   * Notifies file deletion. A file becomes redundant after compaction or other bucket maintenance
+   * activities
+   * @throws IOException 
+   */
+  void hoplogDeleted(String regionFolder, int bucketId, Hoplog... oplogs) throws IOException;
+  
+  /**
+   * Notifies completion of a hoplog compaction cycle. 
+   * @param region Region on which compaction was performed
+   * @param bucket bucket id
+   * @param isMajor true if major compaction was executed
+   */
+  void compactionCompleted(String region, int bucket, boolean isMajor);
+}



[15/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java b/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
index c5b5d3a..74efd51 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/cache/wan/GatewaySender.java
@@ -96,6 +96,8 @@ public interface GatewaySender {
 
   public static final int DEFAULT_DISPATCHER_THREADS = 5;
   
+  public static final int DEFAULT_HDFS_DISPATCHER_THREADS = 5;
+  
   public static final OrderPolicy DEFAULT_ORDER_POLICY = OrderPolicy.KEY;
   /**
    * The default maximum amount of memory (MB) to allow in the queue before

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
index bd78f5a..77f24a3 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/DSFIDFactory.java
@@ -52,6 +52,7 @@ import com.gemstone.gemfire.cache.client.internal.locator.LocatorStatusRequest;
 import com.gemstone.gemfire.cache.client.internal.locator.LocatorStatusResponse;
 import com.gemstone.gemfire.cache.client.internal.locator.QueueConnectionRequest;
 import com.gemstone.gemfire.cache.client.internal.locator.QueueConnectionResponse;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.internal.CqEntry;
 import com.gemstone.gemfire.cache.query.internal.CumulativeNonDistinctResults;
@@ -1022,6 +1023,8 @@ public final class DSFIDFactory implements DataSerializableFixedID {
         RemoteFetchVersionMessage.FetchVersionReplyMessage.class);
     registerDSFID(RELEASE_CLEAR_LOCK_MESSAGE, ReleaseClearLockMessage.class);
     registerDSFID(PR_TOMBSTONE_MESSAGE, PRTombstoneMessage.class);
+    registerDSFID(HDFS_GATEWAY_EVENT_IMPL, HDFSGatewayEventImpl.class);
+    
     registerDSFID(REQUEST_RVV_MESSAGE, InitialImageOperation.RequestRVVMessage.class);
     registerDSFID(RVV_REPLY_MESSAGE, InitialImageOperation.RVVReplyMessage.class);
     registerDSFID(SNAPPY_COMPRESSED_CACHED_DESERIALIZABLE, SnappyCompressedCachedDeserializable.class);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
index 7427f90..5d52346 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/DataSerializableFixedID.java
@@ -103,6 +103,7 @@ public interface DataSerializableFixedID extends SerializationVersions {
   public static final short JOIN_RESPONSE = -143;
   public static final short JOIN_REQUEST = -142;
 
+  public static final short HDFS_GATEWAY_EVENT_IMPL = -141;
   public static final short SNAPPY_COMPRESSED_CACHED_DESERIALIZABLE = -140;
   
   public static final short GATEWAY_EVENT_IMPL = -136;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
index f8740db..9b0446f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
@@ -33,6 +33,7 @@ import com.gemstone.gemfire.cache.CacheLoader;
 import com.gemstone.gemfire.cache.CacheLoaderException;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.CustomExpiry;
 import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.Declarable;
@@ -49,7 +50,10 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.SubscriptionAttributes;
+import com.gemstone.gemfire.compression.CompressionException;
 import com.gemstone.gemfire.compression.Compressor;
+import com.gemstone.gemfire.internal.InternalDataSerializer;
+import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 
@@ -104,6 +108,8 @@ public class RemoteRegionAttributes implements RegionAttributes,
   private String[] gatewaySendersDescs;
   private boolean isGatewaySenderEnabled = false;
   private String[] asyncEventQueueDescs;
+  private String hdfsStoreName;
+  private boolean hdfsWriteOnly;
   private String compressorDesc;
   private boolean offHeap;
 
@@ -155,6 +161,8 @@ public class RemoteRegionAttributes implements RegionAttributes,
     this.isDiskSynchronous = attr.isDiskSynchronous();
     this.gatewaySendersDescs = getDescs(attr.getGatewaySenderIds().toArray());
     this.asyncEventQueueDescs = getDescs(attr.getAsyncEventQueueIds().toArray());
+	this.hdfsStoreName = attr.getHDFSStoreName();
+    this.hdfsWriteOnly = attr.getHDFSWriteOnly();
     this.compressorDesc = getDesc(attr.getCompressor());
     this.offHeap = attr.getOffHeap();
   }
@@ -411,6 +419,7 @@ public class RemoteRegionAttributes implements RegionAttributes,
   
     DataSerializer.writeString(this.compressorDesc, out);
     out.writeBoolean(this.offHeap);
+    DataSerializer.writeString(this.hdfsStoreName, out);
   }
   
   public void fromData(DataInput in) throws IOException, ClassNotFoundException {
@@ -459,6 +468,7 @@ public class RemoteRegionAttributes implements RegionAttributes,
   
     this.compressorDesc = DataSerializer.readString(in);
     this.offHeap = in.readBoolean();
+    this.hdfsStoreName = DataSerializer.readString(in);
   }
   
   private String[] getDescs(Object[] l) {
@@ -626,6 +636,15 @@ public class RemoteRegionAttributes implements RegionAttributes,
     return this.evictionAttributes;
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CustomEvictionAttributes getCustomEvictionAttributes() {
+    // TODO: HDFS: no support for custom eviction attributes from remote yet
+    return null;
+  }
+
   public boolean getCloningEnabled() {
     // TODO Auto-generated method stub
     return this.cloningEnable;
@@ -634,6 +653,12 @@ public class RemoteRegionAttributes implements RegionAttributes,
   public String getDiskStoreName() {
     return this.diskStoreName;
   }
+  public String getHDFSStoreName() {
+	    return this.hdfsStoreName;
+	  }
+  public boolean getHDFSWriteOnly() {
+    return this.hdfsWriteOnly;
+  }
   public boolean isDiskSynchronous() {
     return this.isDiskSynchronous;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
index 92eaa01..1f8da88 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractBucketRegionQueue.java
@@ -34,6 +34,8 @@ import com.gemstone.gemfire.cache.Operation;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSBucketRegionQueue;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSGatewayEventImpl;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
@@ -457,8 +459,17 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
     }
     waitIfQueueFull();
     
+    int sizeOfHdfsEvent = -1;
     try {
-
+      if (this instanceof HDFSBucketRegionQueue) {
+        // need to fetch the size before event is inserted in queue.
+        // fix for #50016
+        if (this.getBucketAdvisor().isPrimary()) {
+          HDFSGatewayEventImpl hdfsEvent = (HDFSGatewayEventImpl)event.getValue();
+          sizeOfHdfsEvent = hdfsEvent.getSizeOnHDFSInBytes(!((HDFSBucketRegionQueue)this).isBucketSorted);
+        }
+      }
+      
       didPut = virtualPut(event, false, false, null, false, startPut, true);
       
       checkReadiness();
@@ -481,7 +492,7 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
       destroyKey(key);
       didPut = false;
     } else {
-      addToEventQueue(key, didPut, event);
+      addToEventQueue(key, didPut, event, sizeOfHdfsEvent);
     }
     return didPut;
   }
@@ -511,7 +522,8 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
   }
   
   protected abstract void clearQueues();
-  protected abstract void addToEventQueue(Object key, boolean didPut, EntryEventImpl event);
+  protected abstract void addToEventQueue(Object key, boolean didPut, EntryEventImpl event, 
+      int sizeOfHdfsEvent);
   
   @Override
   public void afterAcquiringPrimaryState() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
index d37f025..10644cb 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegion.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.logging.log4j.Logger;
@@ -45,6 +46,7 @@ import com.gemstone.gemfire.cache.CacheLoaderException;
 import com.gemstone.gemfire.cache.CacheStatistics;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.CustomExpiry;
 import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.DiskWriteAttributes;
@@ -52,6 +54,7 @@ import com.gemstone.gemfire.cache.EntryExistsException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.EvictionAttributesMutator;
+import com.gemstone.gemfire.cache.EvictionCriteria;
 import com.gemstone.gemfire.cache.ExpirationAction;
 import com.gemstone.gemfire.cache.ExpirationAttributes;
 import com.gemstone.gemfire.cache.MembershipAttributes;
@@ -97,6 +100,7 @@ import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
 import com.gemstone.gemfire.internal.util.ArrayUtils;
 import com.gemstone.gemfire.pdx.internal.PeerTypeRegistration;
+import com.google.common.util.concurrent.Service.State;
 
 /**
  * Takes care of RegionAttributes, AttributesMutator, and some no-brainer method
@@ -232,6 +236,8 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
 
   protected EvictionAttributesImpl evictionAttributes = new EvictionAttributesImpl();
 
+  protected CustomEvictionAttributes customEvictionAttributes;
+
   /** The membership attributes defining required roles functionality */
   protected MembershipAttributes membershipAttributes;
 
@@ -254,6 +260,10 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
   
   protected String poolName;
   
+  protected String hdfsStoreName;
+  
+  protected boolean hdfsWriteOnly;
+  
   protected Compressor compressor;
   
   /**
@@ -888,6 +898,16 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
     return this.subscriptionAttributes;
   }
   
+  @Override
+  public final String getHDFSStoreName() {
+    return this.hdfsStoreName;
+  }
+  
+  @Override
+  public final boolean getHDFSWriteOnly() {
+    return this.hdfsWriteOnly;
+  }
+  
   /**
    * Get IndexManger for region
    */
@@ -1708,6 +1728,7 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
       this.setEvictionController(this.evictionAttributes
           .createEvictionController(this, attrs.getOffHeap()));
     }
+    this.customEvictionAttributes = attrs.getCustomEvictionAttributes();
     storeCacheListenersField(attrs.getCacheListeners());
     assignCacheLoader(attrs.getCacheLoader());
     assignCacheWriter(attrs.getCacheWriter());
@@ -1765,6 +1786,8 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
             + "when multiuser-authentication is true.");
       }
     }
+    this.hdfsStoreName = attrs.getHDFSStoreName();
+    this.hdfsWriteOnly = attrs.getHDFSWriteOnly();
 
     this.diskStoreName = attrs.getDiskStoreName();
     this.isDiskSynchronous = attrs.isDiskSynchronous();
@@ -1830,12 +1853,52 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
     return this.evictionAttributes;
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CustomEvictionAttributes getCustomEvictionAttributes() {
+    return this.customEvictionAttributes;
+  }
+
   public EvictionAttributesMutator getEvictionAttributesMutator()
   {
     return this.evictionAttributes;
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CustomEvictionAttributes setCustomEvictionAttributes(long newStart,
+      long newInterval) {
+    checkReadiness();
+
+    if (this.customEvictionAttributes == null) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.AbstractRegion_NO_CUSTOM_EVICTION_SET
+              .toLocalizedString(getFullPath()));
+    }
+
+    if (newStart == 0) {
+      newStart = this.customEvictionAttributes.getEvictorStartTime();
+    }
+    this.customEvictionAttributes = new CustomEvictionAttributesImpl(
+        this.customEvictionAttributes.getCriteria(), newStart, newInterval,
+        newStart == 0 && newInterval == 0);
+
+//    if (this.evService == null) {
+//      initilializeCustomEvictor();
+//    } else {// we are changing the earlier one which is already started.
+//      EvictorService service = getEvictorTask();
+//      service.changeEvictionInterval(newInterval);
+//      if (newStart != 0)
+//        service.changeStartTime(newStart);
+//    }
 
+    return this.customEvictionAttributes;
+  }
+  
   public void setEvictionController(LRUAlgorithm evictionController)
   {
     this.evictionController = evictionController;
@@ -1974,6 +2037,7 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
   
   /**
   * @since 8.1
+  * property used to find region operations that reach out to HDFS multiple times
   */
   @Override
   public ExtensionPoint<Region<?, ?>> getExtensionPoint() {
@@ -1983,4 +2047,87 @@ public abstract class AbstractRegion implements Region, RegionAttributes,
   public boolean getOffHeap() {
     return this.offHeap;
   }
+  /**
+   * property used to find region operations that reach out to HDFS multiple times
+   */
+  private static final boolean DEBUG_HDFS_CALLS = Boolean.getBoolean("DebugHDFSCalls");
+
+  /**
+   * throws exception if region operation goes out to HDFS multiple times
+   */
+  private static final boolean THROW_ON_MULTIPLE_HDFS_CALLS = Boolean.getBoolean("throwOnMultipleHDFSCalls");
+
+  private ThreadLocal<CallLog> logHDFSCalls = DEBUG_HDFS_CALLS ? new ThreadLocal<CallLog>() : null;
+
+  public void hdfsCalled(Object key) {
+    if (!DEBUG_HDFS_CALLS) {
+      return;
+    }
+    logHDFSCalls.get().addStack(new Throwable());
+    logHDFSCalls.get().setKey(key);
+  }
+  public final void operationStart() {
+    if (!DEBUG_HDFS_CALLS) {
+      return;
+    }
+    if (logHDFSCalls.get() == null) {
+      logHDFSCalls.set(new CallLog());
+      //InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:operationStart", new Throwable());
+    } else {
+      logHDFSCalls.get().incNestedCall();
+      //InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:incNestedCall:", new Throwable());
+    }
+  }
+  public final void operationCompleted() {
+    if (!DEBUG_HDFS_CALLS) {
+      return;
+    }
+    //InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:operationCompleted", new Throwable());
+    if (logHDFSCalls.get() != null && logHDFSCalls.get().decNestedCall() < 0) {
+      logHDFSCalls.get().assertCalls();
+      logHDFSCalls.set(null);
+    }
+  }
+
+  public static class CallLog {
+    private List<Throwable> stackTraces = new ArrayList<Throwable>();
+    private Object key;
+    private int nestedCall = 0;
+    public void incNestedCall() {
+      nestedCall++;
+    }
+    public int decNestedCall() {
+      return --nestedCall;
+    }
+    public void addStack(Throwable stack) {
+      this.stackTraces.add(stack);
+    }
+    public void setKey(Object key) {
+      this.key = key;
+    }
+    public void assertCalls() {
+      if (stackTraces.size() > 1) {
+        Throwable firstTrace = new Throwable();
+        Throwable lastTrace = firstTrace;
+        for (Throwable t : this.stackTraces) {
+          lastTrace.initCause(t);
+          lastTrace = t;
+        }
+        if (THROW_ON_MULTIPLE_HDFS_CALLS) {
+          throw new RuntimeException("SWAP:For key:"+key+" HDFS get called more than once: ", firstTrace);
+        } else {
+          InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, "SWAP:For key:"+key+" HDFS get called more than once: ", firstTrace);
+        }
+      }
+    }
+  }
+
+  public EvictionCriteria getEvictionCriteria() {
+    EvictionCriteria criteria = null;
+    if (this.customEvictionAttributes != null
+        && !this.customEvictionAttributes.isEvictIncoming()) {
+      criteria = this.customEvictionAttributes.getCriteria();
+    }
+    return criteria;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
index 46a851d..b936e3f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
@@ -870,7 +870,15 @@ public abstract class AbstractRegionEntry implements RegionEntry,
         removeEntry = true;
       }
 
-      if (removeEntry) {
+      // See #47887, we do not insert a tombstone for evicted HDFS
+      // entries since the value is still present in HDFS
+      // Check if we have to evict or just do destroy.
+      boolean forceRemoveEntry = 
+          (event.isEviction() || event.isExpiration()) 
+          && event.getRegion().isUsedForPartitionedRegionBucket()
+          && event.getRegion().getPartitionedRegion().isHDFSRegion();
+
+      if (removeEntry || forceRemoveEntry) {
         boolean isThisTombstone = isTombstone();
         if(inTokenMode && !event.getOperation().isEviction()) {
           setValue(region, Token.DESTROYED);  
@@ -1390,7 +1398,27 @@ public abstract class AbstractRegionEntry implements RegionEntry,
   /**
    * {@inheritDoc}
    */
+  @Override
+  public final boolean isMarkedForEviction() {
+    return areAnyBitsSet(MARKED_FOR_EVICTION);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public final void setMarkedForEviction() {
+    setBits(MARKED_FOR_EVICTION);
+  }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public final void clearMarkedForEviction() {
+    clearBits(~MARKED_FOR_EVICTION);
+  }
+  
   @Override
   public final synchronized void decRefCount(NewLRUClockHand lruList, LocalRegion lr) {
     if (TXManagerImpl.decRefCount(this)) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
index 75a1e32..3286373 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
@@ -18,6 +18,7 @@
 package com.gemstone.gemfire.internal.cache;
 
 
+import java.io.IOException;
 import java.lang.reflect.Method;
 import java.util.Collection;
 import java.util.HashSet;
@@ -35,6 +36,7 @@ import com.gemstone.gemfire.InvalidDeltaException;
 import com.gemstone.gemfire.cache.CacheRuntimeException;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.DiskAccessException;
 import com.gemstone.gemfire.cache.EntryExistsException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
@@ -81,6 +83,9 @@ import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
 import com.gemstone.gemfire.internal.sequencelog.EntryLogger;
 import com.gemstone.gemfire.internal.util.concurrent.CustomEntryConcurrentHashMap;
+import com.gemstone.gemfire.pdx.PdxInstance;
+import com.gemstone.gemfire.pdx.PdxSerializationException;
+import com.gemstone.gemfire.pdx.internal.ConvertableToBytes;
 
 /**
  * Abstract implementation of {@link RegionMap}that has all the common
@@ -298,6 +303,10 @@ public abstract class AbstractRegionMap implements RegionMap {
 
   public RegionEntry getEntry(Object key) {
     RegionEntry re = (RegionEntry)_getMap().get(key);
+    if (re != null && re.isMarkedForEviction()) {
+      // entry has been faulted in from HDFS
+      return null;
+    }
     return re;
   }
 
@@ -328,12 +337,16 @@ public abstract class AbstractRegionMap implements RegionMap {
   @Override
   public final RegionEntry getOperationalEntryInVM(Object key) {
     RegionEntry re = (RegionEntry)_getMap().get(key);
+    if (re != null && re.isMarkedForEviction()) {
+      // entry has been faulted in from HDFS
+      return null;
+    }
     return re;
   }
  
 
   public final void removeEntry(Object key, RegionEntry re, boolean updateStat) {
-    if (re.isTombstone() && _getMap().get(key) == re){
+    if (re.isTombstone() && _getMap().get(key) == re && !re.isMarkedForEviction()){
       logger.fatal(LocalizedMessage.create(LocalizedStrings.AbstractRegionMap_ATTEMPT_TO_REMOVE_TOMBSTONE), new Exception("stack trace"));
       return; // can't remove tombstones except from the tombstone sweeper
     }
@@ -349,7 +362,7 @@ public abstract class AbstractRegionMap implements RegionMap {
       EntryEventImpl event, final LocalRegion owner,
       final IndexUpdater indexUpdater) {
     boolean success = false;
-    if (re.isTombstone()&& _getMap().get(key) == re) {
+    if (re.isTombstone()&& _getMap().get(key) == re && !re.isMarkedForEviction()) {
       logger.fatal(LocalizedMessage.create(LocalizedStrings.AbstractRegionMap_ATTEMPT_TO_REMOVE_TOMBSTONE), new Exception("stack trace"));
       return; // can't remove tombstones except from the tombstone sweeper
     }
@@ -358,6 +371,18 @@ public abstract class AbstractRegionMap implements RegionMap {
         indexUpdater.onEvent(owner, event, re);
       }
 
+      //This is messy, but custom eviction calls removeEntry
+      //rather than re.destroy I think to avoid firing callbacks, etc.
+      //However, the value still needs to be set to removePhase1
+      //in order to remove the entry from disk.
+      if(event.isCustomEviction() && !re.isRemoved()) {
+        try {
+          re.removePhase1(owner, false);
+        } catch (RegionClearedException e) {
+          //that's ok, we were just trying to do evict incoming eviction
+        }
+      }
+      
       if (_getMap().remove(key, re)) {
         re.removePhase2();
         success = true;
@@ -1144,7 +1169,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                         // transaction conflict (caused by eviction) when the entry
                         // is being added to transaction state.
                         if (isEviction) {
-                          if (!confirmEvictionDestroy(oldRe)) {
+                          if (!confirmEvictionDestroy(oldRe) || (owner.getEvictionCriteria() != null && !owner.getEvictionCriteria().doEvict(event))) {
                             opCompleted = false;
                             return opCompleted;
                           }
@@ -1399,7 +1424,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                   // See comment above about eviction checks
                   if (isEviction) {
                     assert expectedOldValue == null;
-                    if (!confirmEvictionDestroy(re)) {
+                    if (!confirmEvictionDestroy(re) || (owner.getEvictionCriteria() != null && !owner.getEvictionCriteria().doEvict(event))) {
                       opCompleted = false;
                       return opCompleted;
                     }
@@ -1481,6 +1506,12 @@ public abstract class AbstractRegionMap implements RegionMap {
                   }
                 } // !isRemoved
                 else { // already removed
+                  if (owner.isHDFSReadWriteRegion() && re.isRemovedPhase2()) {
+                    // For HDFS region there may be a race with eviction
+                    // so retry the operation. fixes bug 49150
+                    retry = true;
+                    continue;
+                  }
                   if (re.isTombstone() && event.getVersionTag() != null) {
                     // if we're dealing with a tombstone and this is a remote event
                     // (e.g., from cache client update thread) we need to update
@@ -2654,7 +2685,11 @@ public abstract class AbstractRegionMap implements RegionMap {
       boolean onlyExisting, boolean returnTombstone) {
     Object key = event.getKey();
     RegionEntry retVal = null;
-    retVal = getEntry(event);
+    if (event.isFetchFromHDFS()) {
+      retVal = getEntry(event);
+    } else {
+      retVal = getEntryInVM(key);
+    }
     if (onlyExisting) {
       if (!returnTombstone && (retVal != null && retVal.isTombstone())) {
         return null;
@@ -2953,6 +2988,47 @@ public abstract class AbstractRegionMap implements RegionMap {
                   else if (re != null && owner.isUsedForPartitionedRegionBucket()) {
                   BucketRegion br = (BucketRegion)owner;
                   CachePerfStats stats = br.getPartitionedRegion().getCachePerfStats();
+                  long startTime= stats.startCustomEviction();
+                  CustomEvictionAttributes csAttr = br.getCustomEvictionAttributes();
+                  // No need to update indexes if entry was faulted in but operation did not succeed. 
+                  if (csAttr != null && (csAttr.isEvictIncoming() || re.isMarkedForEviction())) {
+                    
+                    if (csAttr.getCriteria().doEvict(event)) {
+                      stats.incEvictionsInProgress();
+                      // set the flag on event saying the entry should be evicted 
+                      // and not indexed
+                      @Released EntryEventImpl destroyEvent = EntryEventImpl.create (owner, Operation.DESTROY, event.getKey(),
+                          null/* newValue */, null, false, owner.getMyId());
+                      try {
+
+                      destroyEvent.setOldValueFromRegion();
+                      destroyEvent.setCustomEviction(true);
+                      destroyEvent.setPossibleDuplicate(event.isPossibleDuplicate());
+                      if(logger.isDebugEnabled()) {
+                        logger.debug("Evicting the entry " + destroyEvent);
+                      }
+                      if(result != null) {
+                        removeEntry(event.getKey(),re, true, destroyEvent,owner, indexUpdater);
+                      }
+                      else{
+                        removeEntry(event.getKey(),re, true, destroyEvent,owner, null);
+                      }
+                      //mark the region entry for this event as evicted 
+                      event.setEvicted();
+                      stats.incEvictions();
+                      if(logger.isDebugEnabled()) {
+                        logger.debug("Evicted the entry " + destroyEvent);
+                      }
+                      //removeEntry(event.getKey(), re);
+                      } finally {
+                        destroyEvent.release();
+                        stats.decEvictionsInProgress();
+                      }
+                    } else {
+                      re.clearMarkedForEviction();
+                    }
+                  }
+                  stats.endCustomEviction(startTime);
                 }
               } // try
             }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
index c241c6b..3038059 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketAdvisor.java
@@ -1316,6 +1316,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor  {
             ((BucketRegion)br).processPendingSecondaryExpires();
           }
           if (br instanceof BucketRegionQueue) { // Shouldn't it be AbstractBucketRegionQueue
+            // i.e. this stats is not getting incremented for HDFSBucketRegionQueue!!
             BucketRegionQueue brq = (BucketRegionQueue)br;
             brq.incQueueSize(brq.size());
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
index f5ae0fb..6e4f426 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegion.java
@@ -26,6 +26,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
 
 import org.apache.logging.log4j.Logger;
@@ -34,6 +35,7 @@ import com.gemstone.gemfire.CancelException;
 import com.gemstone.gemfire.CopyHelper;
 import com.gemstone.gemfire.DataSerializer;
 import com.gemstone.gemfire.DeltaSerializationException;
+import com.gemstone.gemfire.GemFireIOException;
 import com.gemstone.gemfire.InternalGemFireError;
 import com.gemstone.gemfire.InvalidDeltaException;
 import com.gemstone.gemfire.SystemFailure;
@@ -41,16 +43,20 @@ import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.CacheWriter;
 import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
 import com.gemstone.gemfire.cache.DiskAccessException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.EvictionAction;
 import com.gemstone.gemfire.cache.EvictionAlgorithm;
 import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.EvictionCriteria;
 import com.gemstone.gemfire.cache.ExpirationAction;
 import com.gemstone.gemfire.cache.Operation;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
 import com.gemstone.gemfire.cache.partition.PartitionListener;
 import com.gemstone.gemfire.cache.query.internal.IndexUpdater;
 import com.gemstone.gemfire.distributed.DistributedMember;
@@ -84,11 +90,13 @@ import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.internal.cache.versions.VersionStamp;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.internal.cache.wan.GatewaySenderEventImpl;
+import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
 import com.gemstone.gemfire.internal.concurrent.Atomics;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
 import com.gemstone.gemfire.internal.logging.log4j.LogMarker;
+import com.gemstone.gemfire.internal.offheap.StoredObject;
 import com.gemstone.gemfire.internal.offheap.annotations.Released;
 import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
@@ -225,6 +233,8 @@ implements Bucket
     return eventSeqNum;
   }
 
+  protected final AtomicReference<HoplogOrganizer> hoplog = new AtomicReference<HoplogOrganizer>();
+  
   public BucketRegion(String regionName, RegionAttributes attrs,
       LocalRegion parentRegion, GemFireCacheImpl cache,
       InternalRegionArguments internalRegionArgs) {
@@ -882,6 +892,12 @@ implements Bucket
 
     beginLocalWrite(event);
     try {
+      // increment the tailKey so that invalidate operations are written to HDFS
+      if (this.partitionedRegion.hdfsStoreName != null) {
+        /* MergeGemXDHDFSToGFE Disabled this while porting. Is this required? */
+        //assert this.partitionedRegion.isLocalParallelWanEnabled();
+        handleWANEvent(event);
+      }
       // which performs the local op.
       // The ARM then calls basicInvalidatePart2 with the entry synchronized.
       if ( !hasSeenEvent(event) ) {
@@ -1136,6 +1152,20 @@ implements Bucket
       if (this.partitionedRegion.isParallelWanEnabled()) {
         handleWANEvent(event);
       }
+      // In GemFire EVICT_DESTROY is not distributed, so in order to remove the entry
+      // from memory, allow the destroy to proceed. fixes #49784
+      if (event.isLoadedFromHDFS() && !getBucketAdvisor().isPrimary()) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("Put the destory event in HDFS queue on secondary "
+              + "and return as event is HDFS loaded " + event);
+        }
+        notifyGatewaySender(EnumListenerEvent.AFTER_DESTROY, event);
+        return;
+      }else{
+        if (logger.isDebugEnabled()) {
+          logger.debug("Going ahead with the destroy on GemFire system");
+        }
+      }
       // This call should invoke AbstractRegionMap (aka ARM) destroy method
       // which calls the CacheWriter, then performs the local op.
       // The ARM then calls basicDestroyPart2 with the entry synchronized.
@@ -1334,7 +1364,39 @@ implements Bucket
   }
 
   @Override
+  public boolean isHDFSRegion() {
+    return this.partitionedRegion.isHDFSRegion();
+  }
+
+  @Override
+  public boolean isHDFSReadWriteRegion() {
+    return this.partitionedRegion.isHDFSReadWriteRegion();
+  }
+
+  @Override
+  protected boolean isHDFSWriteOnly() {
+    return this.partitionedRegion.isHDFSWriteOnly();
+  }
+
+  @Override
   public int sizeEstimate() {
+    if (isHDFSReadWriteRegion()) {
+      try {
+        checkForPrimary();
+        ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
+        if (q == null) return 0;
+        int hdfsBucketRegionSize = q.getBucketRegionQueue(
+            partitionedRegion, getId()).size();
+        int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate();
+        if (logger.isDebugEnabled()) {
+          logger.debug("for bucket " + getName() + " estimateSize returning "
+                  + (hdfsBucketRegionSize + hoplogEstimate));
+        }
+        return hdfsBucketRegionSize + hoplogEstimate;
+      } catch (ForceReattemptException e) {
+        throw new PrimaryBucketException(e.getLocalizedMessage(), e);
+      }
+    }
     return size();
   }
 
@@ -1391,14 +1453,14 @@ implements Bucket
    *                 if there is a serialization problem
    * see LocalRegion#getDeserializedValue(RegionEntry, KeyInfo, boolean, boolean,  boolean, EntryEventImpl, boolean, boolean, boolean)
    */
-  private RawValue getSerialized(Object key,
-                                 boolean updateStats,
-                                 boolean doNotLockEntry,
-                                 EntryEventImpl clientEvent,
-                                 boolean returnTombstones)
+  private RawValue getSerialized(Object key, boolean updateStats, boolean doNotLockEntry, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) 
       throws EntryNotFoundException, IOException {
     RegionEntry re = null;
-    re = this.entries.getEntry(key);
+    if (allowReadFromHDFS) {
+      re = this.entries.getEntry(key);
+    } else {
+      re = this.entries.getOperationalEntryInVM(key);
+    }
     if (re == null) {
       return NULLVALUE;
     }
@@ -1442,18 +1504,13 @@ implements Bucket
    * 
    * @param keyInfo
    * @param generateCallbacks
-   * @param clientEvent holder for the entry's version information
+   * @param clientEvent holder for the entry's version information 
    * @param returnTombstones TODO
    * @return serialized (byte) form
    * @throws IOException if the result is not serializable
    * @see LocalRegion#get(Object, Object, boolean, EntryEventImpl)
    */
-  public RawValue getSerialized(KeyInfo keyInfo,
-                                boolean generateCallbacks,
-                                boolean doNotLockEntry,
-                                ClientProxyMembershipID requestingClient,
-                                EntryEventImpl clientEvent,
-                                boolean returnTombstones) throws IOException {
+  public RawValue getSerialized(KeyInfo keyInfo, boolean generateCallbacks, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws IOException {
     checkReadiness();
     checkForNoAccess();
     CachePerfStats stats = getCachePerfStats();
@@ -1463,7 +1520,7 @@ implements Bucket
     try {
       RawValue valueBytes = NULLVALUE;
       boolean isCreate = false;
-      RawValue result = getSerialized(keyInfo.getKey(), true, doNotLockEntry, clientEvent, returnTombstones);
+      RawValue result = getSerialized(keyInfo.getKey(), true, doNotLockEntry, clientEvent, returnTombstones, allowReadFromHDFS);
       isCreate = result == NULLVALUE || (result.getRawValue() == Token.TOMBSTONE && !returnTombstones);
       miss = (result == NULLVALUE || Token.isInvalid(result.getRawValue()));
       if (miss) {
@@ -1475,7 +1532,7 @@ implements Bucket
             return REQUIRES_ENTRY_LOCK;
           }
           Object value = nonTxnFindObject(keyInfo, isCreate,
-              generateCallbacks, result.getRawValue(), true, true, requestingClient, clientEvent, false);
+              generateCallbacks, result.getRawValue(), true, true, requestingClient, clientEvent, false, allowReadFromHDFS);
           if (value != null) {
             result = new RawValue(value);
           }
@@ -2414,8 +2471,36 @@ implements Bucket
   }
 
   public void beforeAcquiringPrimaryState() {
+    try {
+      createHoplogOrganizer();
+    } catch (IOException e) {
+      // 48990: when HDFS was down, gemfirexd should still start normally
+      logger.warn(LocalizedStrings.HOPLOG_NOT_STARTED_YET, e);
+    } catch(Throwable e) {
+      /*MergeGemXDHDFSToGFE changed this code to checkReadiness*/
+      // SystemFailure.checkThrowable(e);
+      this.checkReadiness();
+      //49333 - no matter what, we should elect a primary.
+      logger.error(LocalizedStrings.LocalRegion_UNEXPECTED_EXCEPTION, e);
+    }
+  }
+
+  public HoplogOrganizer<?> createHoplogOrganizer() throws IOException {
+    if (getPartitionedRegion().isHDFSRegion()) {
+      HoplogOrganizer<?> organizer = hoplog.get();
+      if (organizer != null) {
+        //  hoplog is recreated by anther thread
+        return organizer;
+      }
+
+      HoplogOrganizer hdfs = hoplog.getAndSet(getPartitionedRegion().hdfsManager.create(getId()));
+      assert hdfs == null;
+      return hoplog.get();
+    } else {
+      return null;
+    }
   }
-
+  
   public void afterAcquiringPrimaryState() {
     
   }
@@ -2423,13 +2508,105 @@ implements Bucket
    * Invoked when a primary bucket is demoted.
    */
   public void beforeReleasingPrimaryLockDuringDemotion() {
+    releaseHoplogOrganizer();
   }
 
+  protected void releaseHoplogOrganizer() {
+    // release resources during a clean transition
+    HoplogOrganizer hdfs = hoplog.getAndSet(null);
+    if (hdfs != null) {
+      getPartitionedRegion().hdfsManager.close(getId());
+    }
+  }
+  
+  public HoplogOrganizer<?> getHoplogOrganizer() throws HDFSIOException {
+    HoplogOrganizer<?> organizer = hoplog.get();
+    if (organizer == null) {
+      synchronized (getBucketAdvisor()) {
+        checkForPrimary();
+        try {
+          organizer = createHoplogOrganizer();
+        } catch (IOException e) {
+          throw new HDFSIOException("Failed to create Hoplog organizer due to ", e);
+        }
+        if (organizer == null) {
+          throw new HDFSIOException("Hoplog organizer is not available for " + this);
+        }
+      }
+    }
+    return organizer;
+  }
+  
   @Override
   public RegionAttributes getAttributes() {
     return this;
   }
 
+  @Override
+  public void hdfsCalled(Object key) {
+    this.partitionedRegion.hdfsCalled(key);
+  }
+
+  @Override
+  protected void clearHDFSData() {
+    //clear the HDFS data if present
+    if (getPartitionedRegion().isHDFSReadWriteRegion()) {
+      // Clear the queue
+      ConcurrentParallelGatewaySenderQueue q = getHDFSQueue();
+      if (q == null) return;
+      q.clear(getPartitionedRegion(), this.getId());
+      HoplogOrganizer organizer = hoplog.get();
+      if (organizer != null) {
+        try {
+          organizer.clear();
+        } catch (IOException e) {
+          throw new GemFireIOException(LocalizedStrings.HOPLOG_UNABLE_TO_DELETE_HDFS_DATA.toLocalizedString(), e);
+        }
+      }
+    }
+  }
+  
+  public EvictionCriteria getEvictionCriteria() {
+    return this.partitionedRegion.getEvictionCriteria();
+  }
+  
+  public CustomEvictionAttributes getCustomEvictionAttributes() {
+    return this.partitionedRegion.getCustomEvictionAttributes();
+  }
+  
+  /**
+   * @return true if the evict destroy was done; false if it was not needed
+   */
+  public boolean customEvictDestroy(Object key)
+  {
+    checkReadiness();
+    @Released final EntryEventImpl event = 
+          generateCustomEvictDestroyEvent(key);
+    event.setCustomEviction(true);
+    boolean locked = false;
+    try {
+      locked = beginLocalWrite(event);
+      return mapDestroy(event,
+                        false, // cacheWrite
+                        true,  // isEviction
+                        null); // expectedOldValue
+    }
+    catch (CacheWriterException error) {
+      throw new Error(LocalizedStrings.LocalRegion_CACHE_WRITER_SHOULD_NOT_HAVE_BEEN_CALLED_FOR_EVICTDESTROY.toLocalizedString(), error);
+    }
+    catch (TimeoutException anotherError) {
+      throw new Error(LocalizedStrings.LocalRegion_NO_DISTRIBUTED_LOCK_SHOULD_HAVE_BEEN_ATTEMPTED_FOR_EVICTDESTROY.toLocalizedString(), anotherError);
+    }
+    catch (EntryNotFoundException yetAnotherError) {
+      throw new Error(LocalizedStrings.LocalRegion_ENTRYNOTFOUNDEXCEPTION_SHOULD_BE_MASKED_FOR_EVICTDESTROY.toLocalizedString(), yetAnotherError);
+    } finally {
+      if (locked) {
+        endLocalWrite(event);
+      }
+      event.release();
+    }
+  }
+
   public boolean areSecondariesPingable() {
     
     Set<InternalDistributedMember> hostingservers = this.partitionedRegion.getRegionAdvisor()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
index 0243cde..0facd93 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/BucketRegionQueue.java
@@ -441,7 +441,7 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
     }
   }
 
-  protected void addToEventQueue(Object key, boolean didPut, EntryEventImpl event) {
+  protected void addToEventQueue(Object key, boolean didPut, EntryEventImpl event, int sizeOfHDFSEvent) {
     if (didPut) {
       if (this.initialized) {
         this.eventSeqNumQueue.add(key);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
index 4a34771..6f673c7 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor.java
@@ -38,6 +38,8 @@ import com.gemstone.gemfire.cache.InterestPolicy;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.SubscriptionAttributes;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
+import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
 import com.gemstone.gemfire.distributed.Role;
 import com.gemstone.gemfire.distributed.internal.DistributionAdvisor;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
@@ -1226,16 +1228,30 @@ public class CacheDistributionAdvisor extends DistributionAdvisor  {
       public boolean include(final Profile profile) {
         if (profile instanceof CacheProfile) {
           final CacheProfile cp = (CacheProfile)profile;
-          if (allAsyncEventIds.equals(cp.asyncEventQueueIds)) {
+          /*Since HDFS queues are created only when a region is created, this check is 
+           * unnecessary. Also this check is creating problem because hdfs queue is not 
+           * created on an accessor. Hence removing this check for hdfs queues. */
+          Set<String> allAsyncEventIdsNoHDFS = removeHDFSQueues(allAsyncEventIds);
+          Set<String> profileQueueIdsNoHDFS = removeHDFSQueues(cp.asyncEventQueueIds);
+          if (allAsyncEventIdsNoHDFS.equals(profileQueueIdsNoHDFS)) {
             return true;
           }else{
-            differAsycnQueueIds.add(allAsyncEventIds);
-            differAsycnQueueIds.add(cp.asyncEventQueueIds);
+            differAsycnQueueIds.add(allAsyncEventIdsNoHDFS);
+            differAsycnQueueIds.add(profileQueueIdsNoHDFS);
             return false;
           }
         }
         return false;
       }
+      private Set<String> removeHDFSQueues(Set<String> queueIds){
+        Set<String> queueIdsWithoutHDFSQueues = new HashSet<String>();
+        for (String queueId: queueIds){
+          if (!queueId.startsWith(HDFSStoreFactoryImpl.DEFAULT_ASYNC_QUEUE_ID_FOR_HDFS)){
+            queueIdsWithoutHDFSQueues.add(queueId);
+          }
+        }
+        return queueIdsWithoutHDFSQueues;
+      }
     });
     return differAsycnQueueIds;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
index ad84963..382c537 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java
@@ -156,6 +156,13 @@ public class CachePerfStats {
   protected static final int compressionPreCompressedBytesId;
   protected static final int compressionPostCompressedBytesId;
   
+  protected static final int evictByCriteria_evictionsId;// total actual evictions (entries evicted)
+  protected static final int evictByCriteria_evictionTimeId;// total eviction time including product + user expr. 
+  protected static final int evictByCriteria_evictionsInProgressId;
+  protected static final int evictByCriteria_evaluationsId;// total eviction attempts
+  protected static final int evictByCriteria_evaluationTimeId;// time taken to evaluate user expression.
+  
+
   /** The Statistics object that we delegate most behavior to */
   protected final Statistics stats;
 
@@ -514,6 +521,12 @@ public class CachePerfStats {
     compressionDecompressionsId = type.nameToId("decompressions");
     compressionPreCompressedBytesId = type.nameToId("preCompressedBytes");
     compressionPostCompressedBytesId = type.nameToId("postCompressedBytes");
+    
+    evictByCriteria_evictionsId = type.nameToId("evictByCriteria_evictions");
+    evictByCriteria_evictionTimeId = type.nameToId("evictByCriteria_evictionTime"); 
+    evictByCriteria_evictionsInProgressId = type.nameToId("evictByCriteria_evictionsInProgress");
+    evictByCriteria_evaluationsId= type.nameToId("evictByCriteria_evaluations");
+    evictByCriteria_evaluationTimeId = type.nameToId("evictByCriteria_evaluationTime");
   }
   
   ////////////////////////  Constructors  ////////////////////////
@@ -1341,4 +1354,66 @@ public class CachePerfStats {
       stats.incLong(exportTimeId, getStatTime() - start);
     }
   }
+  
+//  // used for the case of evict on incoming
+  public long startCustomEviction() {
+    return NanoTimer.getTime();
+  }
+
+  // used for the case of evict on incoming
+  public void endCustomEviction(long start) {
+    long ts = NanoTimer.getTime();
+    stats.incLong(evictByCriteria_evictionTimeId, ts - start);
+  }
+
+  public void incEvictionsInProgress() {
+    this.stats.incLong(evictByCriteria_evictionsInProgressId, 1);
+  }
+
+  public void decEvictionsInProgress() {
+    this.stats.incLong(evictByCriteria_evictionsInProgressId, -1);
+  }
+
+  public void incEvictions() {
+    this.stats.incLong(evictByCriteria_evictionsId, 1);
+  }
+
+  public void incEvaluations() {
+    this.stats.incLong(evictByCriteria_evaluationsId, 1);
+  }
+
+  public void incEvaluations(int delta) {
+    this.stats.incLong(evictByCriteria_evaluationsId, delta);
+  }
+  
+  public long startEvaluation() {
+    return NanoTimer.getTime();
+  }
+
+  public void endEvaluation(long start, long notEvaluationTime) {
+    long ts = NanoTimer.getTime();
+    long totalTime = ts - start;
+    long evaluationTime = totalTime - notEvaluationTime;
+    stats.incLong(evictByCriteria_evaluationTimeId, evaluationTime);
+  }
+
+  public long getEvictions() {
+    return stats.getLong(evictByCriteria_evictionsId);
+  }
+
+  public long getEvictionsInProgress() {
+    return stats.getLong(evictByCriteria_evictionsInProgressId);
+  }
+
+  public long getEvictionsTime() {
+    return stats.getLong(evictByCriteria_evictionTimeId);
+  }
+
+  public long getEvaluations() {
+    return stats.getLong(evictByCriteria_evaluationsId);
+  }
+
+  public long getEvaluationTime() {
+    return stats.getLong(evictByCriteria_evaluationTimeId);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
index 72edc10..1441144 100755
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/ColocationHelper.java
@@ -107,6 +107,9 @@ public class ColocationHelper {
   }
     private static PartitionedRegion getColocatedPR(
       final PartitionedRegion partitionedRegion, final String colocatedWith) {
+    logger.info(LocalizedMessage.create(
+        LocalizedStrings.HOPLOG_0_COLOCATE_WITH_REGION_1_NOT_INITIALIZED_YET,
+        new Object[] { partitionedRegion.getFullPath(), colocatedWith }));
     PartitionedRegion colocatedPR = (PartitionedRegion) partitionedRegion
         .getCache().getPartitionedRegion(colocatedWith, false);
     assert colocatedPR != null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java
new file mode 100644
index 0000000..0c82f97
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/CustomEvictionAttributesImpl.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.internal.cache;
+
+import com.gemstone.gemfire.cache.CustomEvictionAttributes;
+import com.gemstone.gemfire.cache.EvictionCriteria;
+
+/**
+ * Concrete instance of {@link CustomEvictionAttributes}.
+ * 
+ * @since gfxd 1.0
+ */
+public final class CustomEvictionAttributesImpl extends
+    CustomEvictionAttributes {
+
+  public CustomEvictionAttributesImpl(EvictionCriteria<?, ?> criteria,
+      long startTime, long interval, boolean evictIncoming) {
+    super(criteria, startTime, interval, evictIncoming);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
index cafdb80..f8475ae 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistTXState.java
@@ -145,7 +145,7 @@ public class DistTXState extends TXState {
               } 
             } 
           } // end if primary
-        }
+        } // end non-hdfs buckets
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
index 6a7b4f2..a6d2488 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedCacheOperation.java
@@ -863,6 +863,8 @@ public abstract class DistributedCacheOperation {
 
     private final static int INHIBIT_NOTIFICATIONS_MASK = 0x400;
 
+	protected final static short FETCH_FROM_HDFS = 0x200;
+    
     protected final static short IS_PUT_DML = 0x100;
 
     public boolean needsRouting;
@@ -1365,6 +1367,7 @@ public abstract class DistributedCacheOperation {
       if ((extBits & INHIBIT_NOTIFICATIONS_MASK) != 0) {
         this.inhibitAllNotifications = true;
 	  if (this instanceof PutAllMessage) {
+        ((PutAllMessage) this).setFetchFromHDFS((extBits & FETCH_FROM_HDFS) != 0);
         ((PutAllMessage) this).setPutDML((extBits & IS_PUT_DML) != 0);
       }
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
index b6aa1b6..2817fdd 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedPutAllOperation.java
@@ -856,6 +856,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
     PutAllMessage msg = new PutAllMessage();
     msg.eventId = event.getEventId();
     msg.context = event.getContext();
+	msg.setFetchFromHDFS(event.isFetchFromHDFS());
     msg.setPutDML(event.isPutDML());
     return msg;
   }
@@ -870,7 +871,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
   public PutAllPRMessage createPRMessagesNotifyOnly(int bucketId) {
     final EntryEventImpl event = getBaseEvent();
     PutAllPRMessage prMsg = new PutAllPRMessage(bucketId, putAllDataSize, true,
-        event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), false /*isPutDML*/);
+        event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), false, false /*isPutDML*/);
     if (event.getContext() != null) {
       prMsg.setBridgeContext(event.getContext());
     }
@@ -899,7 +900,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
       PutAllPRMessage prMsg = (PutAllPRMessage)prMsgMap.get(bucketId);
       if (prMsg == null) {
         prMsg = new PutAllPRMessage(bucketId.intValue(), putAllDataSize, false,
-            event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), event.isPutDML());
+            event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument(), event.isFetchFromHDFS(), event.isPutDML());
         prMsg.setTransactionDistributed(event.getRegion().getCache().getTxManager().isDistributed());
 
         // set dpao's context(original sender) into each PutAllMsg
@@ -1076,6 +1077,9 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
 
     protected EventID eventId = null;
     
+    // By default, fetchFromHDFS == true;
+    private transient boolean fetchFromHDFS = true;
+    
     private transient boolean isPutDML = false;
 
     protected static final short HAS_BRIDGE_CONTEXT = UNRESERVED_FLAGS_START;
@@ -1133,11 +1137,12 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
      *          the region the entry is put in
      */
     public void doEntryPut(PutAllEntryData entry, DistributedRegion rgn,
-        boolean requiresRegionContext, boolean isPutDML) {
+        boolean requiresRegionContext, boolean fetchFromHDFS, boolean isPutDML) {
       @Released EntryEventImpl ev = PutAllMessage.createEntryEvent(entry, getSender(), 
           this.context, rgn,
           requiresRegionContext, this.possibleDuplicate,
           this.needsRouting, this.callbackArg, true, skipCallbacks);
+	  ev.setFetchFromHDFS(fetchFromHDFS);
       ev.setPutDML(isPutDML);
       // we don't need to set old value here, because the msg is from remote. local old value will get from next step
       try {
@@ -1232,7 +1237,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
               logger.debug("putAll processing {} with {} sender={}", putAllData[i], putAllData[i].versionTag, sender);
             }
             putAllData[i].setSender(sender);
-            doEntryPut(putAllData[i], rgn, requiresRegionContext,  isPutDML);
+            doEntryPut(putAllData[i], rgn, requiresRegionContext,  fetchFromHDFS, isPutDML);
           }
         }
       }, ev.getEventId());
@@ -1361,6 +1366,10 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
       return Arrays.asList(ops);
     }
     
+    public void setFetchFromHDFS(boolean val) {
+      this.fetchFromHDFS = val;
+    }
+    
     public void setPutDML(boolean val) {
       this.isPutDML = val;
     }
@@ -1368,6 +1377,9 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation
     @Override
     protected short computeCompressedExtBits(short bits) {
       bits = super.computeCompressedExtBits(bits);
+      if (fetchFromHDFS) {
+        bits |= FETCH_FROM_HDFS;
+      }
       if (isPutDML) {
         bits |= IS_PUT_DML;
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
index 226d914..addba8e 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
@@ -17,6 +17,8 @@
 
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.internal.offheap.annotations.OffHeapIdentifier.ABSTRACT_REGION_ENTRY_FILL_IN_VALUE;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
@@ -111,6 +113,8 @@ import com.gemstone.gemfire.internal.cache.versions.ConcurrentCacheModificationE
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
+import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor;
 import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueConfigurationException;
 import com.gemstone.gemfire.internal.cache.wan.GatewaySenderConfigurationException;
 import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
@@ -1260,6 +1264,8 @@ public class DistributedRegion extends LocalRegion implements
   private final Set<DistributedMember> memoryThresholdReachedMembers =
     new CopyOnWriteArraySet<DistributedMember>();
 
+  private ConcurrentParallelGatewaySenderQueue hdfsQueue;
+
   /** Sets and returns giiMissingRequiredRoles */
   private boolean checkInitialImageForReliability(
       InternalDistributedMember imageTarget,
@@ -2418,16 +2424,9 @@ public class DistributedRegion extends LocalRegion implements
   /** @return the deserialized value */
   @Override
   @Retained
-  protected Object findObjectInSystem(KeyInfo keyInfo,
-                                      boolean isCreate,
-                                      TXStateInterface txState,
-                                      boolean generateCallbacks,
-                                      Object localValue,
-                                      boolean disableCopyOnRead,
-                                      boolean preferCD,
-                                      ClientProxyMembershipID requestingClient,
-                                      EntryEventImpl clientEvent,
-                                      boolean returnTombstones)
+  protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate,
+      TXStateInterface txState, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead,
+        boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS)
       throws CacheLoaderException, TimeoutException
   {
     checkForLimitedOrNoAccess();
@@ -2546,6 +2545,18 @@ public class DistributedRegion extends LocalRegion implements
     }
   }
   
+  protected ConcurrentParallelGatewaySenderQueue getHDFSQueue() {
+    if (this.hdfsQueue == null) {
+      String asyncQId = this.getPartitionedRegion().getHDFSEventQueueName();
+      final AsyncEventQueueImpl asyncQ =  (AsyncEventQueueImpl)this.getCache().getAsyncEventQueue(asyncQId);
+      final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender();
+      AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor();
+      if (ep == null) return null;
+      hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue();
+    }
+    return hdfsQueue;
+  }
+
   /** hook for subclasses to note that a cache load was performed
    * @see BucketRegion#performedLoad
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
index e241622..2b826ce 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
@@ -193,8 +193,16 @@ public class EntryEventImpl
   /** version tag for concurrency checks */
   protected VersionTag versionTag;
 
+  /** boolean to indicate that this operation should be optimized by not fetching from HDFS*/
+  private transient boolean fetchFromHDFS = true;
+  
   private transient boolean isPutDML = false;
 
+  /** boolean to indicate that the RegionEntry for this event was loaded from HDFS*/
+  private transient boolean loadedFromHDFS= false;
+  
+  private transient boolean isCustomEviction = false;
+  
   /** boolean to indicate that the RegionEntry for this event has been evicted*/
   private transient boolean isEvicted = false;
   
@@ -650,6 +658,14 @@ public class EntryEventImpl
     return this.op.isEviction();
   }
 
+  public final boolean isCustomEviction() {
+    return this.isCustomEviction;
+  }
+  
+  public final void setCustomEviction(boolean customEvict) {
+    this.isCustomEviction = customEvict;
+  }
+  
   public final void setEvicted() {
     this.isEvicted = true;
   }
@@ -3031,6 +3047,13 @@ public class EntryEventImpl
   public boolean isOldValueOffHeap() {
     return isOffHeapReference(this.oldValue);
   }
+  public final boolean isFetchFromHDFS() {
+    return fetchFromHDFS;
+  }
+
+  public final void setFetchFromHDFS(boolean fetchFromHDFS) {
+    this.fetchFromHDFS = fetchFromHDFS;
+  }
 
   public final boolean isPutDML() {
     return this.isPutDML;
@@ -3039,4 +3062,12 @@ public class EntryEventImpl
   public final void setPutDML(boolean val) {
     this.isPutDML = val;
   }
+
+  public final boolean isLoadedFromHDFS() {
+    return loadedFromHDFS;
+  }
+
+  public final void setLoadedFromHDFS(boolean loadedFromHDFS) {
+    this.loadedFromHDFS = loadedFromHDFS;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java
new file mode 100644
index 0000000..9054d6d
--- /dev/null
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/EvictorService.java
@@ -0,0 +1,284 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.cache;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.EvictionCriteria;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.google.common.util.concurrent.AbstractScheduledService;
+import com.gemstone.gemfire.internal.offheap.Releasable;
+/**
+ * Schedules each iteration periodically. EvictorService takes absolute time and
+ * a period as input and schedules Eviction at absolute times by calculating the
+ * interval. For scheduling the next eviction iteration it also takes into
+ * account the time taken to complete one iteration. If an iteration takes more
+ * time than the specified period then another iteration is scheduled
+ * immediately.
+ * 
+ * 
+ */
+
+public class EvictorService extends AbstractScheduledService {
+
+  private final EvictionCriteria<Object, Object> criteria;
+
+  // period is always in seconds
+  private long interval;
+
+  private volatile boolean stopScheduling;
+
+  private long nextScheduleTime;
+
+  private GemFireCacheImpl cache;
+
+  private Region region;
+  
+  private volatile ScheduledExecutorService executorService;
+
+  public EvictorService(EvictionCriteria<Object, Object> criteria,
+      long evictorStartTime, long evictorInterval, TimeUnit unit, Region r) {
+    this.criteria = criteria;
+    this.interval = unit.toSeconds(evictorInterval);
+    this.region = r;
+    try {
+      this.cache = GemFireCacheImpl.getExisting();
+    } catch (CacheClosedException cce) {
+      
+    }
+    //TODO: Unless we revisit System.currentTimeMillis or cacheTimeMillis keep the default
+//    long now = (evictorStartTime != 0 ? evictorStartTime
+//        + this.cache.getDistributionManager().getCacheTimeOffset() : this.cache
+//        .getDistributionManager().cacheTimeMillis()) / 1000;
+    long now = this.cache.getDistributionManager().cacheTimeMillis() / 1000;
+    if (this.cache.getLoggerI18n().fineEnabled()) {
+      this.cache.getLoggerI18n().fine(
+          "EvictorService: The startTime(now) is " + now + " evictorStartTime : " + evictorStartTime);
+    }
+    
+    this.nextScheduleTime = now + 10;
+
+    if (this.cache.getLoggerI18n().fineEnabled()) {
+      this.cache.getLoggerI18n().fine(
+          "EvictorService: The startTime is " + this.nextScheduleTime);
+    }
+  }
+
+  @Override
+  protected void runOneIteration() throws Exception {
+    if (this.cache.getLoggerI18n().fineEnabled()) {
+      this.cache.getLoggerI18n()
+          .fine(
+              "EvictorService: Running the iteration at "
+                  + cache.cacheTimeMillis());
+    }
+    if (stopScheduling || checkCancelled(cache)) {
+      stopScheduling(); // if check cancelled
+      if (this.cache.getLoggerI18n().fineEnabled()) {
+        this.cache
+            .getLoggerI18n()
+            .fine(
+                "EvictorService: Abort eviction since stopScheduling OR cancel in progress. Evicted 0 entries ");
+      }
+      return;
+    }
+    CachePerfStats stats = ((LocalRegion)this.region).getCachePerfStats();
+    long startEvictionTime = stats.startCustomEviction();
+    int evicted = 0;
+    long startEvaluationTime = stats.startEvaluation();
+    Iterator<Entry<Object, Object>> keysItr = null;
+    long totalIterationsTime = 0;
+    
+    keysItr = this.criteria.getKeysToBeEvicted(this.cache
+        .getDistributionManager().cacheTimeMillis(), this.region);
+    try {
+    stats.incEvaluations(this.region.size());
+    // if we have been asked to stop scheduling
+    // or the cache is closing stop in between.
+    
+    
+    while (keysItr.hasNext() && !stopScheduling && !checkCancelled(cache)) {
+      Map.Entry<Object, Object> entry = keysItr.next();
+      long startIterationTime = this.cache
+          .getDistributionManager().cacheTimeMillis();
+      Object routingObj = entry.getValue();
+      if (this.cache.getLoggerI18n().fineEnabled()) {
+        this.cache.getLoggerI18n().fine(
+            "EvictorService: Going to evict the following entry " + entry);
+      }
+      if (this.region instanceof PartitionedRegion) {
+        try {
+          PartitionedRegion pr = (PartitionedRegion)this.region;
+          stats.incEvictionsInProgress();
+          int bucketId = PartitionedRegionHelper.getHashKey(pr, routingObj);
+          BucketRegion br = pr.getDataStore().getLocalBucketById(bucketId);
+          // This has to be called on BucketRegion directly and not on the PR as
+          // PR doesn't allow operation on Secondary buckets.
+          if (br != null) {
+            if (this.cache.getLoggerI18n().fineEnabled()) {
+              this.cache.getLoggerI18n().fine(
+                  "EvictorService: Going to evict the following entry " + entry
+                      + " from bucket " + br);
+            }
+            if (br.getBucketAdvisor().isPrimary()) {
+              boolean succ = false;
+              try {
+                succ = br.customEvictDestroy(entry.getKey());
+              } catch (PrimaryBucketException e) {
+                if (this.cache.getLoggerI18n().fineEnabled()) {
+                  this.cache.getLoggerI18n().warning(
+                      LocalizedStrings.EVICTORSERVICE_CAUGHT_EXCEPTION_0, e);
+                }
+              }
+              
+              if (succ)
+                evicted++;
+              if (this.cache.getLoggerI18n().fineEnabled()) {
+                this.cache.getLoggerI18n()
+                    .fine(
+                        "EvictorService: Evicted the following entry " + entry
+                            + " from bucket " + br + " successfully " + succ
+                            + " the value in buk is " /*
+                                                       * +
+                                                       * br.get(entry.getKey())
+                                                       */);
+              }
+            }
+          }
+          stats.incEvictions();
+        } catch (Exception e) {
+          if (this.cache.getLoggerI18n().fineEnabled()) {
+            this.cache.getLoggerI18n().warning(
+                LocalizedStrings.EVICTORSERVICE_CAUGHT_EXCEPTION_0, e);
+          }
+          // TODO:
+          // Do the exception handling .
+          // Check if the bucket is present
+          // If the entry could not be evicted then log the warning
+          // Log any other exception.
+        }finally{
+          stats.decEvictionsInProgress();
+          long endIterationTime = this.cache
+              .getDistributionManager().cacheTimeMillis();
+          totalIterationsTime += (endIterationTime - startIterationTime);
+        }
+      }
+    }
+    }finally {
+      if(keysItr instanceof Releasable) {
+        ((Releasable)keysItr).release();
+      }
+    }
+    stats.endEvaluation(startEvaluationTime, totalIterationsTime);    
+    
+    if (this.cache.getLoggerI18n().fineEnabled()) {
+      this.cache.getLoggerI18n().fine(
+          "EvictorService: Completed an iteration at time "
+              + this.cache.getDistributionManager().cacheTimeMillis() / 1000
+              + ". Evicted " + evicted + " entries.");
+    }
+    stats.endCustomEviction(startEvictionTime);
+  }
+
+  private boolean checkCancelled(GemFireCacheImpl cache) {
+    if (cache.getCancelCriterion().cancelInProgress() != null) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  protected Scheduler scheduler() {
+    return new CustomScheduler() {
+      @Override
+      protected Schedule getNextSchedule() throws Exception {
+        // get the current time in seconds from DM.
+        // it takes care of clock skew etc in different VMs
+        long now = cache.getDistributionManager().cacheTimeMillis() / 1000;
+        if (cache.getLoggerI18n().fineEnabled()) {
+          cache.getLoggerI18n().fine("EvictorService: Now is " + now);
+        }
+        long delay = 0;
+        if (now < nextScheduleTime) {
+          delay = nextScheduleTime - now;
+        }
+        nextScheduleTime += interval;
+        // calculate the next immediate time i.e. schedule time in seconds
+        // set the schedule.delay to that scheduletime - currenttime
+        if (cache.getLoggerI18n().fineEnabled()) {
+          cache.getLoggerI18n().fine(
+              "EvictorService: Returning the next schedule with delay " + delay
+                  + " next schedule is at : " + nextScheduleTime);
+        }
+
+        return new Schedule(delay, TimeUnit.SECONDS);
+      }
+    };
+  }
+
+  /**
+   * Region.destroy and Region.close should make sure to call this method. This
+   * will be called here.
+   */
+  public void stopScheduling() {
+    this.stopScheduling = true;
+  }
+
+  // this will be called when we stop the service.
+  // not sure if we have to do any cleanup
+  // to stop the service call stop()
+  @Override
+  protected void shutDown() throws Exception {
+    this.executorService.shutdownNow();
+    this.region= null;
+    this.cache = null;
+  }
+
+  // This will be called when we start the service.
+  // not sure if we have to any intialization
+  @Override
+  protected void startUp() throws Exception {
+
+  }
+
+  public void changeEvictionInterval(long newInterval) {
+    this.interval = newInterval / 1000;
+    if (cache.getLoggerI18n().fineEnabled()) {
+      cache.getLoggerI18n().fine(
+          "EvictorService: New interval is " + this.interval);
+    }
+  }
+
+  public void changeStartTime(long newStart) {
+    this.nextScheduleTime = newStart/1000;
+    if (cache.getLoggerI18n().fineEnabled()) {
+      cache.getLoggerI18n().fine("EvictorService: New start time is " + this.nextScheduleTime);
+    }
+  }
+  
+  protected ScheduledExecutorService executor() {
+    this.executorService = super.executor();
+    return this.executorService;
+  }
+
+}


[09/25] incubator-geode git commit: GEODE-10: Reinstating HDFS persistence code

Posted by up...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
new file mode 100644
index 0000000..a1c9eb1
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
@@ -0,0 +1,520 @@
+ /*=========================================================================
+   * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+   * This product is protected by U.S. and international copyright
+   * and intellectual property laws. Pivotal products are covered by
+   * one or more patents listed at http://www.pivotal.io/patents.
+   *=========================================================================
+   */
+
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.TestCase;
+
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.CacheXmlException;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.hdfs.HDFSStore;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.junit.experimental.categories.Category;
+
+/**
+ * A test class for testing the configuration option for HDFS 
+ * 
+ * @author Hemant Bhanawat
+ * @author Ashvin Agrawal
+ */
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSConfigJUnitTest extends TestCase {
+  private GemFireCacheImpl c;
+
+  public HDFSConfigJUnitTest() {
+    super();
+  }
+
+  @Override
+  public void setUp() {
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
+    this.c = createCache();
+    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
+  }
+
+  @Override
+  public void tearDown() {
+    this.c.close();
+  }
+    
+    public void testHDFSStoreCreation() throws Exception {
+      this.c.close();
+      this.c = createCache();
+      try {
+        HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
+        HDFSStore store = hsf.create("myHDFSStore");
+        RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+        Region r1 = rf1.setHDFSStoreName("myHDFSStore").create("r1");
+       
+        r1.put("k1", "v1");
+        
+        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
+        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
+        assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
+        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
+        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 3600);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 256MB", store.getWriteOnlyFileRolloverSize() == 256);
+        this.c.close();
+        
+        
+        this.c = createCache();
+        hsf = this.c.createHDFSStoreFactory();
+        hsf.create("myHDFSStore");
+        
+        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION_WRITEONLY_HDFS_STORE).setHDFSStoreName("myHDFSStore")
+              .create("r1");
+       
+        r1.put("k1", "v1");
+        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
+        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
+        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true);
+        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
+        assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 60000", store.getBatchInterval()== 60000);
+        assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: true", store.getSynchronousDiskWrite()== true);
+        
+        this.c.close();
+
+        this.c = createCache();
+        
+        File directory = new File("HDFS" + "_disk_"
+            + System.currentTimeMillis());
+        directory.mkdir();
+        File[] dirs1 = new File[] { directory };
+        DiskStoreFactory dsf = this.c.createDiskStoreFactory();
+        dsf.setDiskDirs(dirs1);
+        dsf.create("mydisk");
+        
+        
+        hsf = this.c.createHDFSStoreFactory();
+        hsf.setBatchSize(50);
+        hsf.setDiskStoreName("mydisk");
+        hsf.setBufferPersistent(true);
+        hsf.setBatchInterval(50);
+        hsf.setSynchronousDiskWrite(false);
+        hsf.setHomeDir("/home/hemant");
+        hsf.setNameNodeURL("mymachine");
+        hsf.setWriteOnlyFileRolloverSize(1);
+        hsf.setWriteOnlyFileRolloverInterval(10);
+        hsf.create("myHDFSStore");
+        
+        
+        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION_WRITEONLY_HDFS_STORE).setHDFSStoreName("myHDFSStore")
+            .setHDFSWriteOnly(true).create("r1");
+       
+        r1.put("k1", "v1");
+        store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
+        
+        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 50", store.getBatchSize()== 50);
+        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: true", store.getBufferPersistent()== true);
+        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true);
+        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: mydisk", store.getDiskStoreName()== "mydisk");
+        assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName()== "myHDFSStore");
+        assertTrue("Mismatch in attributes, actual.getFolderPath: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir() + " and expected getDiskStoreName: /home/hemant", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir()== "/home/hemant");
+        assertTrue("Mismatch in attributes, actual.getNamenode: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()+ " and expected getDiskStoreName: mymachine", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()== "mymachine");
+        assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 50 ", store.getBatchSize()== 50);
+        assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isPersistent: false", store.getSynchronousDiskWrite()== false);
+        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 1MB", store.getWriteOnlyFileRolloverSize() == 1);
+        this.c.close();
+      } finally {
+        this.c.close();
+      }
+    }
+       
+    public void testCacheXMLParsing() throws Exception {
+      try {
+        this.c.close();
+
+        Region r1 = null;
+
+        // use a cache.xml to recover
+        this.c = createCache();
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos), true); 
+        pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
+//      pw.println("<?xml version=\"1.0\"?>");
+//      pw.println("<!DOCTYPE cache PUBLIC");
+//      pw.println("  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
+//      pw.println("  \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
+        pw.println("<cache ");
+        pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
+        pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
+        pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
+        pw.println("version=\"9.0\">");
+
+        pw.println("  <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\"  home-dir=\"mypath\" />");
+        pw.println("  <region name=\"r1\" refid=\"PARTITION_HDFS\">");
+        pw.println("    <region-attributes hdfs-store-name=\"myHDFSStore\"/>");
+        pw.println("  </region>");
+        pw.println("</cache>");
+        pw.close();
+        byte[] bytes = baos.toByteArray();  
+        this.c.loadCacheXml(new ByteArrayInputStream(bytes));
+        
+        r1 = this.c.getRegion("/r1");
+        HDFSStoreImpl store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
+        r1.put("k1", "v1");
+        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
+        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
+        assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
+        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
+        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 3600);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 256MB", store.getWriteOnlyFileRolloverSize() == 256);
+        
+        this.c.close();
+        
+        // use a cache.xml to recover
+        this.c = createCache();
+        baos = new ByteArrayOutputStream();
+        pw = new PrintWriter(new OutputStreamWriter(baos), true);
+        pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
+//      pw.println("<?xml version=\"1.0\"?>");
+//      pw.println("<!DOCTYPE cache PUBLIC");
+//      pw.println("  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
+//      pw.println("  \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
+        pw.println("<cache ");
+        pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
+        pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
+        pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
+        pw.println("version=\"9.0\">");
+        pw.println("  <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\"  home-dir=\"mypath\" />");
+        pw.println("  <region name=\"r1\" refid=\"PARTITION_WRITEONLY_HDFS_STORE\">");
+        pw.println("    <region-attributes hdfs-store-name=\"myHDFSStore\"/>");
+        pw.println("  </region>");
+        pw.println("</cache>");
+        pw.close();
+        bytes = baos.toByteArray();  
+        this.c.loadCacheXml(new ByteArrayInputStream(bytes));
+        
+        r1 = this.c.getRegion("/r1");
+        store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
+        r1.put("k1", "v1");
+        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
+        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
+        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: false", r1.getAttributes().getHDFSWriteOnly()== false);
+        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
+        
+        this.c.close();
+        
+        // use a cache.xml to recover
+        this.c = createCache();
+        baos = new ByteArrayOutputStream();
+        pw = new PrintWriter(new OutputStreamWriter(baos), true);
+        pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
+//        pw.println("<?xml version=\"1.0\"?>");
+//        pw.println("<!DOCTYPE cache PUBLIC");
+//        pw.println("  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
+//        pw.println("  \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
+        pw.println("<cache ");
+        pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
+        pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
+        pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
+        pw.println("version=\"9.0\">");
+
+        pw.println("  <disk-store name=\"mydiskstore\"/>");
+        pw.println("  <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\"  home-dir=\"mypath\" max-write-only-file-size=\"1\" write-only-file-rollover-interval=\"10\" ");
+        pw.println("    batch-size=\"151\" buffer-persistent =\"true\" disk-store=\"mydiskstore\" synchronous-disk-write=\"false\" batch-interval=\"50\"");
+        pw.println("  />");
+        pw.println("  <region name=\"r1\" refid=\"PARTITION_WRITEONLY_HDFS_STORE\">");
+        pw.println("    <region-attributes hdfs-store-name=\"myHDFSStore\" hdfs-write-only=\"false\">");
+        pw.println("    </region-attributes>");
+        pw.println("  </region>");
+        pw.println("</cache>");
+        pw.close();
+        bytes = baos.toByteArray();
+        this.c.loadCacheXml(new ByteArrayInputStream(bytes));
+        
+        r1 = this.c.getRegion("/r1");
+        store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
+        r1.put("k1", "v1");
+        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 151", store.getBatchSize()== 151);
+        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: true", store.getBufferPersistent()== true);
+        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== false);
+        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: mydiskstore", store.getDiskStoreName().equals("mydiskstore"));
+        assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName().equals("myHDFSStore"));
+        assertTrue("Mismatch in attributes, actual.getFolderPath: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir() + " and expected getDiskStoreName: mypath", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir().equals("mypath"));
+        assertTrue("Mismatch in attributes, actual.getNamenode: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()+ " and expected getDiskStoreName: mynamenode", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL().equals("mynamenode"));
+        assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 50", store.getBatchInterval()== 50);
+        assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: false", store.getSynchronousDiskWrite()== false);
+        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
+        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 1MB", store.getWriteOnlyFileRolloverSize() == 1);
+        
+        this.c.close();
+      } finally {
+          this.c.close();
+      }
+    }
+   
+  /**
+   * Validates if hdfs store conf is getting completely and correctly parsed
+   */
+  public void testHdfsStoreConfFullParsing() {
+    String conf = createStoreConf("123");
+    this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
+    HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store");
+    assertEquals("namenode url mismatch.", "url", store.getNameNodeURL());
+    assertEquals("home-dir mismatch.", "dir", store.getHomeDir());
+    assertEquals("hdfs-client-config-file mismatch.", "client", store.getHDFSClientConfigFile());
+    assertEquals("read-cache-size mismatch.", 24.5f, store.getBlockCacheSize());
+    
+    assertFalse("compaction auto-compact mismatch.", store.getMinorCompaction());
+    assertTrue("compaction auto-major-compact mismatch.", store.getMajorCompaction());
+    assertEquals("compaction max-concurrency", 23, store.getMinorCompactionThreads());
+    assertEquals("compaction max-major-concurrency", 27, store.getMajorCompactionThreads());
+    assertEquals("compaction major-interval", 711, store.getPurgeInterval());
+  }
+  
+  /**
+   * Validates that the config defaults are set even with minimum XML configuration 
+   */
+  public void testHdfsStoreConfMinParse() {
+    this.c.loadCacheXml(new ByteArrayInputStream(XML_MIN_CONF.getBytes()));
+    HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store");
+    assertEquals("namenode url mismatch.", "url", store.getNameNodeURL());
+    assertEquals("home-dir mismatch.", "gemfire", store.getHomeDir());
+    
+    assertTrue("compaction auto-compact mismatch.", store.getMinorCompaction());
+    assertTrue("compaction auto-major-compact mismatch.", store.getMajorCompaction());
+    assertEquals("compaction max-input-file-size mismatch.", 512, store.getInputFileSizeMax());
+    assertEquals("compaction min-input-file-count.", 4, store.getInputFileCountMin());
+    assertEquals("compaction max-iteration-size.", 10, store.getInputFileCountMax());
+    assertEquals("compaction max-concurrency", 10, store.getMinorCompactionThreads());
+    assertEquals("compaction max-major-concurrency", 2, store.getMajorCompactionThreads());
+    assertEquals("compaction major-interval", 720, store.getMajorCompactionInterval());
+    assertEquals("compaction cleanup-interval", 30, store.getPurgeInterval());
+  }
+  
+  /**
+   * Validates that cache creation fails if a compaction configuration is
+   * provided which is not applicable to the selected compaction strategy
+   */
+  public void testHdfsStoreInvalidCompactionConf() {
+    String conf = createStoreConf("123");
+    try {
+      this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
+      // expected
+    } catch (CacheXmlException e) {
+      fail();
+    }
+  }
+  
+  /**
+   * Validates that cache creation fails if a compaction configuration is
+   * provided which is not applicable to the selected compaction strategy
+   */
+  public void testInvalidConfigCheck() throws Exception {
+    this.c.close();
+
+    this.c = createCache();
+
+    HDFSStoreFactory hsf; 
+    hsf = this.c.createHDFSStoreFactory();
+    
+    try {
+      hsf.setInputFileSizeMax(-1);
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setInputFileCountMin(-1);
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setInputFileCountMax(-1);
+      //expected
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+    }
+    try {
+      hsf.setMinorCompactionThreads(-1);
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setMajorCompactionInterval(-1);
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setMajorCompactionThreads(-1);
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setPurgeInterval(-1);
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setInputFileCountMin(2);
+      hsf.setInputFileCountMax(1);
+      hsf.create("test");
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+    try {
+      hsf.setInputFileCountMax(1);
+      hsf.setInputFileCountMin(2);
+      hsf.create("test");
+      fail("validation failed");
+    } catch (IllegalArgumentException e) {
+      //expected
+    }
+  }
+  
+  /**
+   * Validates cache creation fails if invalid integer size configuration is provided
+   * @throws Exception
+   */
+  public void testHdfsStoreConfInvalidInt() throws Exception {
+    String conf = createStoreConf("NOT_INTEGER");
+    try {
+      this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
+      fail();
+    } catch (CacheXmlException e) {
+      // expected
+    }
+  }
+  
+
+  private static String XML_MIN_CONF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n"
+  + "<cache \n"
+  + "xmlns=\"http://schema.pivotal.io/gemfire/cache\"\n"
+  + "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
+  + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"\n"
+  + "version=\"9.0\">" +
+          "  <hdfs-store name=\"store\" namenode-url=\"url\" />" +
+          "</cache>";
+   
+  private static String XML_FULL_CONF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n"
+                                        + "<cache \n"
+                                        + "xmlns=\"http://schema.pivotal.io/gemfire/cache\"\n"
+                                        + "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
+                                        + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"\n"
+                                        + "version=\"9.0\">"
+      + "  <hdfs-store name=\"store\" namenode-url=\"url\" "
+      + "              home-dir=\"dir\" "
+      + "              read-cache-size=\"24.5\" "
+      + "              max-write-only-file-size=\"FILE_SIZE_CONF\" "
+      + "              minor-compaction-threads = \"23\""
+      + "              major-compaction-threads = \"27\""
+      + "              major-compaction=\"true\" "
+      + "              minor-compaction=\"false\" "
+      + "              major-compaction-interval=\"781\" "
+      + "              purge-interval=\"711\" hdfs-client-config-file=\"client\" />\n"
+      + "</cache>";
+  // potential replacement targets
+  String FILE_SIZE_CONF_SUBSTRING = "FILE_SIZE_CONF";
+  
+  private String createStoreConf(String fileSize) {
+    String result = XML_FULL_CONF;
+    
+    String replaceWith = (fileSize == null) ? "123" : fileSize;
+    result = result.replaceFirst(FILE_SIZE_CONF_SUBSTRING, replaceWith);
+
+    return result;
+  }
+  
+  public void _testBlockCacheConfiguration() throws Exception {
+    this.c.close();
+    this.c = createCache();
+    try {
+      HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
+      
+      //Configure a block cache to cache about 20 blocks.
+      long heapSize = HeapMemoryMonitor.getTenuredPoolMaxMemory();
+      int blockSize = StoreFile.DEFAULT_BLOCKSIZE_SMALL;
+      int blockCacheSize = 5 * blockSize;
+      int entrySize = blockSize / 2;
+      
+      
+      float percentage = 100 * (float) blockCacheSize / (float) heapSize;
+      hsf.setBlockCacheSize(percentage);
+      HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
+      RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+      //Create a region that evicts everything
+      LocalRegion r1 = (LocalRegion) rf1.setHDFSStoreName("myHDFSStore").setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
+     
+      //Populate about many times our block cache size worth of data
+      //We want to try to cache at least 5 blocks worth of index and metadata
+      byte[] value = new byte[entrySize];
+      int numEntries = 10 * blockCacheSize / entrySize;
+      for(int i = 0; i < numEntries; i++) {
+        r1.put(i, value);
+      }
+
+      //Wait for the events to be written to HDFS.
+      Set<String> queueIds = r1.getAsyncEventQueueIds();
+      assertEquals(1, queueIds.size());
+      AsyncEventQueueImpl queue = (AsyncEventQueueImpl) c.getAsyncEventQueue(queueIds.iterator().next());
+      long end = System.nanoTime() + TimeUnit.SECONDS.toNanos(120);
+      while(queue.size() > 0 && System.nanoTime() < end) {
+        Thread.sleep(10);
+      }
+      assertEquals(0, queue.size());
+      
+      
+      Thread.sleep(10000);
+
+      //Do some reads to cache some blocks. Note that this doesn't
+      //end up caching data blocks, just index and bloom filters blocks.
+      for(int i = 0; i < numEntries; i++) {
+        r1.get(i);
+      }
+      
+      long statSize = store.getStats().getBlockCache().getBytesCached();
+      assertTrue("Block cache stats expected to be near " + blockCacheSize + " was " + statSize, 
+          blockCacheSize / 2  < statSize &&
+          statSize <=  2 * blockCacheSize);
+      
+      long currentSize = store.getBlockCache().getCurrentSize();
+      assertTrue("Block cache size expected to be near " + blockCacheSize + " was " + currentSize, 
+          blockCacheSize / 2  < currentSize &&
+          currentSize <= 2 * blockCacheSize);
+      
+    } finally {
+      this.c.close();
+    }
+  }
+
+  protected GemFireCacheImpl createCache() {
+    return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").set("log-level", "info")
+    .create();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
new file mode 100644
index 0000000..75dfa93
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
@@ -0,0 +1,227 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Operation;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.asyncqueue.internal.ParallelAsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache.hdfs.internal.SortedListForAsyncQueueJUnitTest.KeyValue;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.internal.cache.BucketRegion;
+import com.gemstone.gemfire.internal.cache.CachedDeserializable;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
+import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@SuppressWarnings("rawtypes")
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HDFSEntriesSetJUnitTest extends TestCase {
+  private GemFireCacheImpl cache;
+  private HDFSStoreImpl store;
+  private PartitionedRegion region;
+  private BucketRegion bucket;
+  private HDFSParallelGatewaySenderQueue queue;
+  
+  private HDFSBucketRegionQueue brq;
+  private HoplogOrganizer hdfs;
+  
+  public void setUp() throws Exception {
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
+    cache = (GemFireCacheImpl) new CacheFactory()
+        .set("mcast-port", "0")
+        .set("log-level", "info")
+        .create();
+    
+    HDFSStoreFactory hsf = this.cache.createHDFSStoreFactory();
+    hsf.setHomeDir("hoplogs");
+    store = (HDFSStoreImpl) hsf.create("test");
+
+    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    paf.setTotalNumBuckets(1);
+    
+    RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    region = (PartitionedRegion) rf.setHDFSStoreName("test").setPartitionAttributes(paf.create()).create("test");
+    
+    // prime the region so buckets get created
+    region.put("test", "test");
+    GatewaySenderAttributes g = new GatewaySenderAttributes();
+    g.isHDFSQueue = true;
+    g.id = "HDFSEntriesSetJUnitTest_Queue";
+    ParallelAsyncEventQueueImpl gatewaySender = new ParallelAsyncEventQueueImpl(cache, g);
+    Set<Region> set = new HashSet<Region>();
+    set.add(region);
+    
+    queue = new HDFSParallelGatewaySenderQueue(gatewaySender, set, 0, 1);
+    brq = (HDFSBucketRegionQueue)((PartitionedRegion) queue.getRegion()).getDataStore().getLocalBucketById(0);
+    bucket = region.getDataStore().getLocalBucketById(0);
+        
+    HdfsRegionManager mgr = HDFSRegionDirector.getInstance().manageRegion(region, "test", null);
+    hdfs =  mgr.<SortedHoplogPersistedEvent>create(0);
+    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
+  }
+  
+  public void tearDown() throws Exception {
+    store.getFileSystem().delete(new Path("hoplogs"), true);
+    hdfs.close();
+    
+    cache.close();
+  }
+  
+  public void testEmptyIterator() throws Exception {
+    checkIteration(Collections.<String>emptyList(), new KeyValue[] { }, new KeyValue[] { });
+  }
+  
+  public void testQueueOnlyIterator() throws Exception {
+    KeyValue[] qvals = new KeyValue[] {
+      new KeyValue("K0", "0"),
+      new KeyValue("K1", "1"),
+      new KeyValue("K2", "2"),
+      new KeyValue("K3", "3"),
+      new KeyValue("K4", "4")
+    };
+    checkIteration(getExpected(), qvals, new KeyValue[] { });
+  }
+  
+  public void testHdfsOnlyIterator() throws Exception {
+    KeyValue[] hvals = new KeyValue[] {
+      new KeyValue("K0", "0"),
+      new KeyValue("K1", "1"),
+      new KeyValue("K2", "2"),
+      new KeyValue("K3", "3"),
+      new KeyValue("K4", "4")
+    };
+    checkIteration(getExpected(), new KeyValue[] { }, hvals);
+  }
+  
+  public void testUnevenIterator() throws Exception {
+    KeyValue[] qvals = new KeyValue[] {
+        new KeyValue("K0", "0"),
+        new KeyValue("K2", "2"),
+      };
+
+    KeyValue[] hvals = new KeyValue[] {
+      new KeyValue("K1", "1"),
+      new KeyValue("K3", "3"),
+      new KeyValue("K4", "4")
+    };
+    
+    checkIteration(getExpected(), qvals, hvals);
+  }
+
+  public void testEitherOrIterator() throws Exception {
+    KeyValue[] qvals = new KeyValue[] {
+        new KeyValue("K0", "0"),
+        new KeyValue("K2", "2"),
+        new KeyValue("K4", "4")
+      };
+
+    KeyValue[] hvals = new KeyValue[] {
+      new KeyValue("K1", "1"),
+      new KeyValue("K3", "3")
+    };
+    
+    checkIteration(getExpected(), qvals, hvals);
+  }
+
+  public void testDuplicateIterator() throws Exception {
+    KeyValue[] qvals = new KeyValue[] {
+        new KeyValue("K0", "0"),
+        new KeyValue("K1", "1"),
+        new KeyValue("K2", "2"),
+        new KeyValue("K3", "3"),
+        new KeyValue("K4", "4"),
+        new KeyValue("K4", "4")
+      };
+
+    KeyValue[] hvals = new KeyValue[] {
+        new KeyValue("K0", "0"),
+        new KeyValue("K1", "1"),
+        new KeyValue("K2", "2"),
+        new KeyValue("K3", "3"),
+        new KeyValue("K4", "4"),
+        new KeyValue("K4", "4")
+    };
+    
+    checkIteration(getExpected(), qvals, hvals);
+  }
+
+  private List<String> getExpected() {
+    List<String> expected = new ArrayList<String>();
+    expected.add("0");
+    expected.add("1");
+    expected.add("2");
+    expected.add("3");
+    expected.add("4");
+    return expected;
+  }
+  
+  private void checkIteration(List<String> expected, KeyValue[] qvals, KeyValue[] hvals) 
+  throws Exception {
+    int seq = 0;
+    List<PersistedEventImpl> evts = new ArrayList<PersistedEventImpl>();
+    for (KeyValue kv : hvals) {
+      evts.add(new SortedHDFSQueuePersistedEvent(getNewEvent(kv.key, kv.value, seq++)));
+    }
+    hdfs.flush(evts.iterator(), evts.size());
+
+    for (KeyValue kv : qvals) {
+      queue.put(getNewEvent(kv.key, kv.value, seq++));
+    }
+
+    List<String> actual = new ArrayList<String>();
+    Iterator vals = new HDFSEntriesSet(bucket, brq, hdfs, IteratorType.VALUES, null).iterator();
+    while (vals.hasNext()) {
+      Object val = vals.next();
+      if(val instanceof CachedDeserializable) {
+        val = ((CachedDeserializable) val).getDeserializedForReading();
+      }
+      actual.add((String) val);
+    }
+    
+    assertEquals(expected, actual);
+  }
+  
+  private HDFSGatewayEventImpl getNewEvent(Object key, Object value, long seq) throws Exception {
+    EntryEventImpl evt = EntryEventImpl.create(region, Operation.CREATE,
+        key, value, null, false, (DistributedMember) cache.getMyId());
+    
+    evt.setEventId(new EventID(cache.getDistributedSystem()));
+    HDFSGatewayEventImpl event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, evt, null, true, 0);
+    event.setShadowKey(seq);
+    
+    return event;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
new file mode 100644
index 0000000..b8cbb0d
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
@@ -0,0 +1,191 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
+import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
+import com.gemstone.gemfire.test.junit.categories.HoplogTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category({IntegrationTest.class, HoplogTest.class})
+public class HdfsStoreMutatorJUnitTest extends BaseHoplogTestCase {
+  public void testMutatorInitialState() {
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    assertEquals(-1, mutator.getWriteOnlyFileRolloverInterval());
+    assertEquals(-1, mutator.getWriteOnlyFileRolloverSize());
+    
+    assertEquals(-1, mutator.getInputFileCountMax());
+    assertEquals(-1, mutator.getInputFileSizeMax());
+    assertEquals(-1, mutator.getInputFileCountMin());
+    assertEquals(-1, mutator.getMinorCompactionThreads());
+    assertNull(mutator.getMinorCompaction());
+    
+    assertEquals(-1, mutator.getMajorCompactionInterval());
+    assertEquals(-1, mutator.getMajorCompactionThreads());
+    assertNull(mutator.getMajorCompaction());
+    
+    assertEquals(-1, mutator.getPurgeInterval());
+    
+    assertEquals(-1, mutator.getBatchSize());
+    assertEquals(-1, mutator.getBatchInterval());
+  }
+  
+  public void testMutatorSetInvalidValue() {
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+
+    try {
+      mutator.setWriteOnlyFileRolloverInterval(-3);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setWriteOnlyFileRolloverSize(-5);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    
+    try {
+      mutator.setInputFileCountMin(-1);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setInputFileCountMax(-1);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setInputFileSizeMax(-1);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setMinorCompactionThreads(-9);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setMajorCompactionInterval(-6);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setMajorCompactionThreads(-1);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      mutator.setPurgeInterval(-4);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+/*    try {
+      qMutator.setBatchSizeMB(-985);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      qMutator.setBatchTimeInterval(-695);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+*/    
+    try {
+      mutator.setInputFileCountMin(10);
+      mutator.setInputFileCountMax(5);
+      hdfsStore.alter(mutator);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+  }
+  
+  public void testMutatorReturnsUpdatedValues() {
+    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
+    
+    mutator.setWriteOnlyFileRolloverInterval(121);
+    mutator.setWriteOnlyFileRolloverSize(234);
+    
+    mutator.setInputFileCountMax(87);
+    mutator.setInputFileSizeMax(45);
+    mutator.setInputFileCountMin(34);
+    mutator.setMinorCompactionThreads(843);
+    mutator.setMinorCompaction(false);
+
+    mutator.setMajorCompactionInterval(26);
+    mutator.setMajorCompactionThreads(92);
+    mutator.setMajorCompaction(false);
+    
+    mutator.setPurgeInterval(328);
+    
+    mutator.setBatchSize(985);
+    mutator.setBatchInterval(695);
+    
+    assertEquals(121, mutator.getWriteOnlyFileRolloverInterval());
+    assertEquals(234, mutator.getWriteOnlyFileRolloverSize());
+    
+    assertEquals(87, mutator.getInputFileCountMax());
+    assertEquals(45, mutator.getInputFileSizeMax());
+    assertEquals(34, mutator.getInputFileCountMin());
+    assertEquals(843, mutator.getMinorCompactionThreads());
+    assertFalse(mutator.getMinorCompaction());
+    
+    assertEquals(26, mutator.getMajorCompactionInterval());
+    assertEquals(92, mutator.getMajorCompactionThreads());
+    assertFalse(mutator.getMajorCompaction());
+    
+    assertEquals(328, mutator.getPurgeInterval());
+    
+    assertEquals(985, mutator.getBatchSize());
+    assertEquals(695, mutator.getBatchInterval());
+    
+    // repeat the cycle once more
+    mutator.setWriteOnlyFileRolloverInterval(14);
+    mutator.setWriteOnlyFileRolloverSize(56);
+    
+    mutator.setInputFileCountMax(93);
+    mutator.setInputFileSizeMax(85);
+    mutator.setInputFileCountMin(64);
+    mutator.setMinorCompactionThreads(59);
+    mutator.setMinorCompaction(true);
+    
+    mutator.setMajorCompactionInterval(26);
+    mutator.setMajorCompactionThreads(92);
+    mutator.setMajorCompaction(false);
+    
+    mutator.setPurgeInterval(328);
+    
+    assertEquals(14, mutator.getWriteOnlyFileRolloverInterval());
+    assertEquals(56, mutator.getWriteOnlyFileRolloverSize());
+    
+    assertEquals(93, mutator.getInputFileCountMax());
+    assertEquals(85, mutator.getInputFileSizeMax());
+    assertEquals(64, mutator.getInputFileCountMin());
+    assertEquals(59, mutator.getMinorCompactionThreads());
+    assertTrue(mutator.getMinorCompaction());
+    
+    assertEquals(26, mutator.getMajorCompactionInterval());
+    assertEquals(92, mutator.getMajorCompactionThreads());
+    assertFalse(mutator.getMajorCompaction());
+    
+    assertEquals(328, mutator.getPurgeInterval());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9f3f10fd/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
new file mode 100644
index 0000000..290f8d1
--- /dev/null
+++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
@@ -0,0 +1,415 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.cache.hdfs.internal;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
+import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.VM;
+
+/**
+ * A class for testing the recovery after restart for GemFire cluster that has
+ * HDFS regions
+ * 
+ * @author Hemant Bhanawat
+ */
+@SuppressWarnings({ "serial", "deprecation", "rawtypes" })
+public class RegionRecoveryDUnitTest extends CacheTestCase {
+  public RegionRecoveryDUnitTest(String name) {
+    super(name);
+  }
+
+  private static String homeDir = null;
+
+  @Override
+  public void preTearDownCacheTestCase() throws Exception {
+    for (int h = 0; h < Host.getHostCount(); h++) {
+      Host host = Host.getHost(h);
+      SerializableCallable cleanUp = cleanUpStores();
+      for (int v = 0; v < host.getVMCount(); v++) {
+        VM vm = host.getVM(v);
+        vm.invoke(cleanUp);
+      }
+    }
+    super.preTearDownCacheTestCase();
+  }
+
+  public SerializableCallable cleanUpStores() throws Exception {
+    SerializableCallable cleanUp = new SerializableCallable() {
+      public Object call() throws Exception {
+        if (homeDir != null) {
+          // Each VM will try to delete the same directory. But that's okay as
+          // the subsequent invocations will be no-ops.
+          FileUtil.delete(new File(homeDir));
+          homeDir = null;
+        }
+        return 0;
+      }
+    };
+    return cleanUp;
+  }
+
+  /**
+   * Tests a basic restart of the system. Events if in HDFS should be read back.
+   * The async queue is not persisted so we wait until async queue persists the
+   * items to HDFS.
+   * 
+   * @throws Exception
+   */
+  public void testBasicRestart() throws Exception {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+    VM vm3 = host.getVM(3);
+
+    // Going two level up to avoid home directories getting created in
+    // VM-specific directory. This avoids failures in those tests where
+    // datastores are restarted and bucket ownership changes between VMs.
+    homeDir = "../../testBasicRestart";
+    String uniqueName = "testBasicRestart";
+
+    createServerRegion(vm0, 11, 1, 500, 500, homeDir, uniqueName);
+    createServerRegion(vm1, 11, 1, 500, 500, homeDir, uniqueName);
+    createServerRegion(vm2, 11, 1, 500, 500, homeDir, uniqueName);
+    createServerRegion(vm3, 11, 1, 500, 500, homeDir, uniqueName);
+
+    doPuts(vm0, uniqueName, 1, 50);
+    doPuts(vm1, uniqueName, 40, 100);
+    doPuts(vm2, uniqueName, 40, 100);
+    doPuts(vm3, uniqueName, 90, 150);
+
+    cacheClose(vm0, true);
+    cacheClose(vm1, true);
+    cacheClose(vm2, true);
+    cacheClose(vm3, true);
+
+    createServerRegion(vm0, 11, 1, 500, 500, homeDir, uniqueName);
+    createServerRegion(vm1, 11, 1, 500, 500, homeDir, uniqueName);
+    createServerRegion(vm2, 11, 1, 500, 500, homeDir, uniqueName);
+    createServerRegion(vm3, 11, 1, 500, 500, homeDir, uniqueName);
+
+    verifyGetsForValue(vm0, uniqueName, 1, 50, false);
+    verifyGetsForValue(vm1, uniqueName, 40, 100, false);
+    verifyGetsForValue(vm2, uniqueName, 40, 100, false);
+    verifyGetsForValue(vm3, uniqueName, 90, 150, false);
+
+    cacheClose(vm0, false);
+    cacheClose(vm1, false);
+    cacheClose(vm2, false);
+    cacheClose(vm3, false);
+
+    disconnectFromDS();
+
+  }
+
+  /**
+   * Servers are stopped and restarted. Disabled due to bug 48067.
+   */
+  public void testPersistedAsyncQueue_Restart() throws Exception {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+    VM vm3 = host.getVM(3);
+
+    // Going two level up to avoid home directories getting created in
+    // VM-specific directory. This avoids failures in those tests where
+    // datastores are restarted and bucket ownership changes between VMs.
+    homeDir = "../../testPersistedAsyncQueue_Restart";
+    String uniqueName = "testPersistedAsyncQueue_Restart";
+
+    // create cache and region
+    createPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
+    createPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
+    createPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
+    createPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
+
+    // do some puts
+    AsyncInvocation a0 = doAsyncPuts(vm0, uniqueName, 1, 50);
+    AsyncInvocation a1 = doAsyncPuts(vm1, uniqueName, 40, 100);
+    AsyncInvocation a2 = doAsyncPuts(vm2, uniqueName, 40, 100);
+    AsyncInvocation a3 = doAsyncPuts(vm3, uniqueName, 90, 150);
+
+    a3.join();
+    a2.join();
+    a1.join();
+    a0.join();
+
+    // close the cache
+    cacheClose(vm0, true);
+    cacheClose(vm1, true);
+    cacheClose(vm2, true);
+    cacheClose(vm3, true);
+
+    // recreate the cache and regions
+    a3 = createAsyncPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
+    a2 = createAsyncPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
+    a1 = createAsyncPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
+    a0 = createAsyncPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
+
+    a3.join();
+    a2.join();
+    a1.join();
+    a0.join();
+
+    // these gets should probably fetch the data from async queue
+    verifyGetsForValue(vm0, uniqueName, 1, 50, false);
+    verifyGetsForValue(vm1, uniqueName, 40, 100, false);
+    verifyGetsForValue(vm2, uniqueName, 40, 100, false);
+    verifyGetsForValue(vm3, uniqueName, 90, 150, false);
+
+    // these gets wait for sometime before fetching the data. this will ensure
+    // that the reads are done from HDFS
+    verifyGetsForValue(vm0, uniqueName, 1, 50, true);
+    verifyGetsForValue(vm1, uniqueName, 40, 100, true);
+    verifyGetsForValue(vm2, uniqueName, 40, 100, true);
+    verifyGetsForValue(vm3, uniqueName, 90, 150, true);
+
+    cacheClose(vm0, false);
+    cacheClose(vm1, false);
+    cacheClose(vm2, false);
+    cacheClose(vm3, false);
+
+    disconnectFromDS();
+  }
+
+  /**
+   * Stops a single server. A different node becomes primary for the buckets on
+   * the stopped node. Everything should work fine. Disabled due to bug 48067
+   * 
+   */
+  public void testPersistedAsyncQueue_ServerRestart() throws Exception {
+    disconnectFromDS();
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+    VM vm3 = host.getVM(3);
+
+    // Going two level up to avoid home directories getting created in
+    // VM-specific directory. This avoids failures in those tests where
+    // datastores are restarted and bucket ownership changes between VMs.
+    homeDir = "../../testPAQ_ServerRestart";
+    String uniqueName = "testPAQ_ServerRestart";
+
+    createPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
+    createPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
+    createPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
+    createPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
+
+    AsyncInvocation a0 = doAsyncPuts(vm0, uniqueName, 1, 50);
+    AsyncInvocation a1 = doAsyncPuts(vm1, uniqueName, 50, 75);
+    AsyncInvocation a2 = doAsyncPuts(vm2, uniqueName, 75, 100);
+    AsyncInvocation a3 = doAsyncPuts(vm3, uniqueName, 100, 150);
+
+    a3.join();
+    a2.join();
+    a1.join();
+    a0.join();
+
+    cacheClose(vm0, false);
+
+    // these gets should probably fetch the data from async queue
+    verifyGetsForValue(vm1, uniqueName, 1, 50, false);
+    verifyGetsForValue(vm2, uniqueName, 40, 100, false);
+    verifyGetsForValue(vm3, uniqueName, 70, 150, false);
+
+    // these gets wait for sometime before fetching the data. this will ensure
+    // that
+    // the reads are done from HDFS
+    verifyGetsForValue(vm2, uniqueName, 1, 100, true);
+    verifyGetsForValue(vm3, uniqueName, 40, 150, true);
+
+    cacheClose(vm1, false);
+    cacheClose(vm2, false);
+    cacheClose(vm3, false);
+
+    disconnectFromDS();
+  }
+
+  private int createPersistedServerRegion(final VM vm, final int totalnumOfBuckets,
+      final int batchSize, final int batchInterval, final int maximumEntries, 
+      final String folderPath, final String uniqueName) throws IOException {
+    
+    return (Integer) vm.invoke(new PersistedRegionCreation(vm, totalnumOfBuckets,
+      batchSize, batchInterval, maximumEntries, folderPath, uniqueName));
+  }
+  private AsyncInvocation createAsyncPersistedServerRegion(final VM vm, final int totalnumOfBuckets,
+      final int batchSize, final int batchInterval, final int maximumEntries, final String folderPath, 
+      final String uniqueName) throws IOException {
+    
+    return (AsyncInvocation) vm.invokeAsync(new PersistedRegionCreation(vm, totalnumOfBuckets,
+      batchSize, batchInterval, maximumEntries, folderPath, uniqueName));
+  }
+  
+  class PersistedRegionCreation extends SerializableCallable {
+    private VM vm;
+    private int totalnumOfBuckets;
+    private int batchSize;
+    private int maximumEntries;
+    private String folderPath;
+    private String uniqueName;
+    private int batchInterval;
+
+    PersistedRegionCreation(final VM vm, final int totalnumOfBuckets,
+        final int batchSize, final int batchInterval, final int maximumEntries,
+        final String folderPath, final String uniqueName) throws IOException {
+      this.vm = vm;
+      this.totalnumOfBuckets = totalnumOfBuckets;
+      this.batchSize = batchSize;
+      this.maximumEntries = maximumEntries;
+      this.folderPath = new File(folderPath).getCanonicalPath();
+      this.uniqueName = uniqueName;
+      this.batchInterval = batchInterval;
+    }
+
+    public Object call() throws Exception {
+
+      AttributesFactory af = new AttributesFactory();
+      af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+      PartitionAttributesFactory paf = new PartitionAttributesFactory();
+      paf.setTotalNumBuckets(totalnumOfBuckets);
+      paf.setRedundantCopies(1);
+
+      af.setPartitionAttributes(paf.create());
+
+      HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
+      hsf.setHomeDir(folderPath);
+      homeDir = folderPath; // for clean-up in tearDown2()
+      hsf.setBatchSize(batchSize);
+      hsf.setBatchInterval(batchInterval);
+      hsf.setBufferPersistent(true);
+      hsf.setDiskStoreName(uniqueName + vm.getPid());
+
+      getCache().createDiskStoreFactory().create(uniqueName + vm.getPid());
+
+      af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
+      af.setHDFSStoreName(uniqueName);
+      af.setHDFSWriteOnly(false);
+
+      hsf.create(uniqueName);
+
+      createRootRegion(uniqueName, af.create());
+
+      return 0;
+    }
+  };
+
+  private int createServerRegion(final VM vm, final int totalnumOfBuckets,
+      final int batchSize, final int batchInterval, final int maximumEntries,
+      final String folderPath, final String uniqueName) {
+    SerializableCallable createRegion = new SerializableCallable() {
+      public Object call() throws Exception {
+        AttributesFactory af = new AttributesFactory();
+        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setTotalNumBuckets(totalnumOfBuckets);
+        paf.setRedundantCopies(1);
+        af.setPartitionAttributes(paf.create());
+
+        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
+        homeDir = new File(folderPath).getCanonicalPath();
+        hsf.setHomeDir(homeDir);
+        hsf.setBatchSize(batchSize);
+        hsf.setBatchInterval(batchInterval);
+        hsf.setBufferPersistent(false);
+        hsf.setMaxMemory(1);
+        hsf.create(uniqueName);
+        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
+
+        af.setHDFSWriteOnly(false);
+        af.setHDFSStoreName(uniqueName);
+        createRootRegion(uniqueName, af.create());
+
+        return 0;
+      }
+    };
+
+    return (Integer) vm.invoke(createRegion);
+  }
+
+  private void cacheClose(VM vm, final boolean sleep) {
+    vm.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        if (sleep)
+          Thread.sleep(2000);
+        getCache().getLogger().info("Cache close in progress ");
+        getCache().close();
+        getCache().getDistributedSystem().disconnect();
+        getCache().getLogger().info("Cache closed");
+        return null;
+      }
+    });
+
+  }
+
+  private void doPuts(VM vm, final String regionName, final int start, final int end) throws Exception {
+    vm.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion(regionName);
+        getCache().getLogger().info("Putting entries ");
+        for (int i = start; i < end; i++) {
+          r.put("K" + i, "V" + i);
+        }
+        return null;
+      }
+
+    });
+  }
+
+  private AsyncInvocation doAsyncPuts(VM vm, final String regionName,
+      final int start, final int end) throws Exception {
+    return vm.invokeAsync(new SerializableCallable() {
+      public Object call() throws Exception {
+        Region r = getRootRegion(regionName);
+        getCache().getLogger().info("Putting entries ");
+        for (int i = start; i < end; i++) {
+          r.put("K" + i, "V" + i);
+        }
+        return null;
+      }
+
+    });
+  }
+
+  private void verifyGetsForValue(VM vm, final String regionName, final int start, final int end, final boolean sleep) throws Exception {
+    vm.invoke(new SerializableCallable() {
+      public Object call() throws Exception {
+        if (sleep) {
+          Thread.sleep(2000);
+        }
+        getCache().getLogger().info("Getting entries ");
+        Region r = getRootRegion(regionName);
+        for (int i = start; i < end; i++) {
+          String k = "K" + i;
+          Object s = r.get(k);
+          String v = "V" + i;
+          assertTrue("The expected key " + v+ " didn't match the received value " + s, v.equals(s));
+        }
+        return null;
+      }
+
+    });
+
+  }
+}