You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by as...@apache.org on 2015/10/21 17:58:48 UTC
[01/15] incubator-geode git commit: GEODE-429: Remove HdfsStore
parser in cache xml
Repository: incubator-geode
Updated Branches:
refs/heads/feature/GEODE-409 ef5d9e2d6 -> 07d55bda1
GEODE-429: Remove HdfsStore parser in cache xml
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/12318e9c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/12318e9c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/12318e9c
Branch: refs/heads/feature/GEODE-409
Commit: 12318e9cf862795e46540fdf72836fd8cbba262d
Parents: 7f25197
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:36:25 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700
----------------------------------------------------------------------
.../hdfs/internal/HDFSConfigJUnitTest.java | 524 -------------------
1 file changed, 524 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/12318e9c/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
deleted file mode 100644
index 26e6c73..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ /dev/null
@@ -1,524 +0,0 @@
- /*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import junit.framework.TestCase;
-
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.CacheXmlException;
-import com.gemstone.gemfire.cache.DiskStoreFactory;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.junit.experimental.categories.Category;
-
-/**
- * A test class for testing the configuration option for HDFS
- *
- * @author Hemant Bhanawat
- * @author Ashvin Agrawal
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSConfigJUnitTest extends TestCase {
- private GemFireCacheImpl c;
-
- public HDFSConfigJUnitTest() {
- super();
- }
-
- @Override
- public void setUp() {
- System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
- this.c = createCache();
- AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
- }
-
- @Override
- public void tearDown() {
- this.c.close();
- }
-
- public void testHDFSStoreCreation() throws Exception {
- this.c.close();
- this.c = createCache();
- try {
- HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
- HDFSStore store = hsf.create("myHDFSStore");
- RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-// rf1.setHDFSStoreName("myHDFSStore");
- Region r1 = rf1.create("r1");
-
- r1.put("k1", "v1");
-
- assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
- assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
- assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
- assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
- assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 3600);
- assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 256MB", store.getWriteOnlyFileRolloverSize() == 256);
- this.c.close();
-
-
- this.c = createCache();
- hsf = this.c.createHDFSStoreFactory();
- hsf.create("myHDFSStore");
-
- RegionFactory<Object, Object> rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
-// rf.setHDFSStoreName("myHDFSStore");
- r1 = rf.create("r1");
-
- r1.put("k1", "v1");
- assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
- assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
- assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true);
- assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
- assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 60000", store.getBatchInterval()== 60000);
- assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: true", store.getSynchronousDiskWrite()== true);
-
- this.c.close();
-
- this.c = createCache();
-
- File directory = new File("HDFS" + "_disk_"
- + System.currentTimeMillis());
- directory.mkdir();
- File[] dirs1 = new File[] { directory };
- DiskStoreFactory dsf = this.c.createDiskStoreFactory();
- dsf.setDiskDirs(dirs1);
- dsf.create("mydisk");
-
-
- hsf = this.c.createHDFSStoreFactory();
- hsf.setBatchSize(50);
- hsf.setDiskStoreName("mydisk");
- hsf.setBufferPersistent(true);
- hsf.setBatchInterval(50);
- hsf.setSynchronousDiskWrite(false);
- hsf.setHomeDir("/home/hemant");
- hsf.setNameNodeURL("mymachine");
- hsf.setWriteOnlyFileRolloverSize(1);
- hsf.setWriteOnlyFileRolloverInterval(10);
- hsf.create("myHDFSStore");
-
-
- rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
-// rf.setHDFSStoreName("myHDFSStore").setHDFSWriteOnly(true);
- r1 = rf.create("r1");
-
- r1.put("k1", "v1");
- store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
-
- assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 50", store.getBatchSize()== 50);
- assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: true", store.getBufferPersistent()== true);
- assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true);
- assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: mydisk", store.getDiskStoreName()== "mydisk");
- assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName()== "myHDFSStore");
- assertTrue("Mismatch in attributes, actual.getFolderPath: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir() + " and expected getDiskStoreName: /home/hemant", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir()== "/home/hemant");
- assertTrue("Mismatch in attributes, actual.getNamenode: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()+ " and expected getDiskStoreName: mymachine", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()== "mymachine");
- assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 50 ", store.getBatchSize()== 50);
- assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isPersistent: false", store.getSynchronousDiskWrite()== false);
- assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
- assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 1MB", store.getWriteOnlyFileRolloverSize() == 1);
- this.c.close();
- } finally {
- this.c.close();
- }
- }
-
- public void testCacheXMLParsing() throws Exception {
- try {
- this.c.close();
-
- Region r1 = null;
-
- // use a cache.xml to recover
- this.c = createCache();
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos), true);
- pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
-// pw.println("<?xml version=\"1.0\"?>");
-// pw.println("<!DOCTYPE cache PUBLIC");
-// pw.println(" \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
-// pw.println(" \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
- pw.println("<cache ");
- pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
- pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
- pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
- pw.println("version=\"9.0\">");
-
- pw.println(" <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\" home-dir=\"mypath\" />");
- pw.println(" <region name=\"r1\" refid=\"PARTITION_HDFS\">");
- pw.println(" <region-attributes hdfs-store-name=\"myHDFSStore\"/>");
- pw.println(" </region>");
- pw.println("</cache>");
- pw.close();
- byte[] bytes = baos.toByteArray();
- this.c.loadCacheXml(new ByteArrayInputStream(bytes));
-
- r1 = this.c.getRegion("/r1");
- HDFSStoreImpl store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
- r1.put("k1", "v1");
- assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
- assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
- assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
- assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
- assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 3600);
- assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 256MB", store.getWriteOnlyFileRolloverSize() == 256);
-
- this.c.close();
-
- // use a cache.xml to recover
- this.c = createCache();
- baos = new ByteArrayOutputStream();
- pw = new PrintWriter(new OutputStreamWriter(baos), true);
- pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
-// pw.println("<?xml version=\"1.0\"?>");
-// pw.println("<!DOCTYPE cache PUBLIC");
-// pw.println(" \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
-// pw.println(" \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
- pw.println("<cache ");
- pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
- pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
- pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
- pw.println("version=\"9.0\">");
- pw.println(" <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\" home-dir=\"mypath\" />");
- pw.println(" <region name=\"r1\" refid=\"PARTITION_WRITEONLY_HDFS_STORE\">");
- pw.println(" <region-attributes hdfs-store-name=\"myHDFSStore\"/>");
- pw.println(" </region>");
- pw.println("</cache>");
- pw.close();
- bytes = baos.toByteArray();
- this.c.loadCacheXml(new ByteArrayInputStream(bytes));
-
- r1 = this.c.getRegion("/r1");
- store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
- r1.put("k1", "v1");
- assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
- assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
- assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: false", r1.getAttributes().getHDFSWriteOnly()== false);
- assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
-
- this.c.close();
-
- // use a cache.xml to recover
- this.c = createCache();
- baos = new ByteArrayOutputStream();
- pw = new PrintWriter(new OutputStreamWriter(baos), true);
- pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
-// pw.println("<?xml version=\"1.0\"?>");
-// pw.println("<!DOCTYPE cache PUBLIC");
-// pw.println(" \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
-// pw.println(" \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
- pw.println("<cache ");
- pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
- pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
- pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
- pw.println("version=\"9.0\">");
-
- pw.println(" <disk-store name=\"mydiskstore\"/>");
- pw.println(" <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\" home-dir=\"mypath\" max-write-only-file-size=\"1\" write-only-file-rollover-interval=\"10\" ");
- pw.println(" batch-size=\"151\" buffer-persistent =\"true\" disk-store=\"mydiskstore\" synchronous-disk-write=\"false\" batch-interval=\"50\"");
- pw.println(" />");
- pw.println(" <region name=\"r1\" refid=\"PARTITION_WRITEONLY_HDFS_STORE\">");
- pw.println(" <region-attributes hdfs-store-name=\"myHDFSStore\" hdfs-write-only=\"false\">");
- pw.println(" </region-attributes>");
- pw.println(" </region>");
- pw.println("</cache>");
- pw.close();
- bytes = baos.toByteArray();
- this.c.loadCacheXml(new ByteArrayInputStream(bytes));
-
- r1 = this.c.getRegion("/r1");
- store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
- r1.put("k1", "v1");
- assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 151", store.getBatchSize()== 151);
- assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: true", store.getBufferPersistent()== true);
- assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== false);
- assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: mydiskstore", store.getDiskStoreName().equals("mydiskstore"));
- assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName().equals("myHDFSStore"));
- assertTrue("Mismatch in attributes, actual.getFolderPath: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir() + " and expected getDiskStoreName: mypath", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir().equals("mypath"));
- assertTrue("Mismatch in attributes, actual.getNamenode: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()+ " and expected getDiskStoreName: mynamenode", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL().equals("mynamenode"));
- assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 50", store.getBatchInterval()== 50);
- assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: false", store.getSynchronousDiskWrite()== false);
- assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
- assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 1MB", store.getWriteOnlyFileRolloverSize() == 1);
-
- this.c.close();
- } finally {
- this.c.close();
- }
- }
-
- /**
- * Validates if hdfs store conf is getting completely and correctly parsed
- */
- public void testHdfsStoreConfFullParsing() {
- String conf = createStoreConf("123");
- this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
- HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store");
- assertEquals("namenode url mismatch.", "url", store.getNameNodeURL());
- assertEquals("home-dir mismatch.", "dir", store.getHomeDir());
- assertEquals("hdfs-client-config-file mismatch.", "client", store.getHDFSClientConfigFile());
- assertEquals("read-cache-size mismatch.", 24.5f, store.getBlockCacheSize());
-
- assertFalse("compaction auto-compact mismatch.", store.getMinorCompaction());
- assertTrue("compaction auto-major-compact mismatch.", store.getMajorCompaction());
- assertEquals("compaction max-concurrency", 23, store.getMinorCompactionThreads());
- assertEquals("compaction max-major-concurrency", 27, store.getMajorCompactionThreads());
- assertEquals("compaction major-interval", 711, store.getPurgeInterval());
- }
-
- /**
- * Validates that the config defaults are set even with minimum XML configuration
- */
- public void testHdfsStoreConfMinParse() {
- this.c.loadCacheXml(new ByteArrayInputStream(XML_MIN_CONF.getBytes()));
- HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store");
- assertEquals("namenode url mismatch.", "url", store.getNameNodeURL());
- assertEquals("home-dir mismatch.", "gemfire", store.getHomeDir());
-
- assertTrue("compaction auto-compact mismatch.", store.getMinorCompaction());
- assertTrue("compaction auto-major-compact mismatch.", store.getMajorCompaction());
- assertEquals("compaction max-input-file-size mismatch.", 512, store.getInputFileSizeMax());
- assertEquals("compaction min-input-file-count.", 4, store.getInputFileCountMin());
- assertEquals("compaction max-iteration-size.", 10, store.getInputFileCountMax());
- assertEquals("compaction max-concurrency", 10, store.getMinorCompactionThreads());
- assertEquals("compaction max-major-concurrency", 2, store.getMajorCompactionThreads());
- assertEquals("compaction major-interval", 720, store.getMajorCompactionInterval());
- assertEquals("compaction cleanup-interval", 30, store.getPurgeInterval());
- }
-
- /**
- * Validates that cache creation fails if a compaction configuration is
- * provided which is not applicable to the selected compaction strategy
- */
- public void testHdfsStoreInvalidCompactionConf() {
- String conf = createStoreConf("123");
- try {
- this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
- // expected
- } catch (CacheXmlException e) {
- fail();
- }
- }
-
- /**
- * Validates that cache creation fails if a compaction configuration is
- * provided which is not applicable to the selected compaction strategy
- */
- public void testInvalidConfigCheck() throws Exception {
- this.c.close();
-
- this.c = createCache();
-
- HDFSStoreFactory hsf;
- hsf = this.c.createHDFSStoreFactory();
-
- try {
- hsf.setInputFileSizeMax(-1);
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setInputFileCountMin(-1);
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setInputFileCountMax(-1);
- //expected
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- }
- try {
- hsf.setMinorCompactionThreads(-1);
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setMajorCompactionInterval(-1);
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setMajorCompactionThreads(-1);
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setPurgeInterval(-1);
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setInputFileCountMin(2);
- hsf.setInputFileCountMax(1);
- hsf.create("test");
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- try {
- hsf.setInputFileCountMax(1);
- hsf.setInputFileCountMin(2);
- hsf.create("test");
- fail("validation failed");
- } catch (IllegalArgumentException e) {
- //expected
- }
- }
-
- /**
- * Validates cache creation fails if invalid integer size configuration is provided
- * @throws Exception
- */
- public void testHdfsStoreConfInvalidInt() throws Exception {
- String conf = createStoreConf("NOT_INTEGER");
- try {
- this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
- fail();
- } catch (CacheXmlException e) {
- // expected
- }
- }
-
-
- private static String XML_MIN_CONF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n"
- + "<cache \n"
- + "xmlns=\"http://schema.pivotal.io/gemfire/cache\"\n"
- + "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
- + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"\n"
- + "version=\"9.0\">" +
- " <hdfs-store name=\"store\" namenode-url=\"url\" />" +
- "</cache>";
-
- private static String XML_FULL_CONF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n"
- + "<cache \n"
- + "xmlns=\"http://schema.pivotal.io/gemfire/cache\"\n"
- + "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
- + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"\n"
- + "version=\"9.0\">"
- + " <hdfs-store name=\"store\" namenode-url=\"url\" "
- + " home-dir=\"dir\" "
- + " read-cache-size=\"24.5\" "
- + " max-write-only-file-size=\"FILE_SIZE_CONF\" "
- + " minor-compaction-threads = \"23\""
- + " major-compaction-threads = \"27\""
- + " major-compaction=\"true\" "
- + " minor-compaction=\"false\" "
- + " major-compaction-interval=\"781\" "
- + " purge-interval=\"711\" hdfs-client-config-file=\"client\" />\n"
- + "</cache>";
- // potential replacement targets
- String FILE_SIZE_CONF_SUBSTRING = "FILE_SIZE_CONF";
-
- private String createStoreConf(String fileSize) {
- String result = XML_FULL_CONF;
-
- String replaceWith = (fileSize == null) ? "123" : fileSize;
- result = result.replaceFirst(FILE_SIZE_CONF_SUBSTRING, replaceWith);
-
- return result;
- }
-
- public void _testBlockCacheConfiguration() throws Exception {
- this.c.close();
- this.c = createCache();
- try {
- HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
-
- //Configure a block cache to cache about 20 blocks.
- long heapSize = HeapMemoryMonitor.getTenuredPoolMaxMemory();
- int blockSize = StoreFile.DEFAULT_BLOCKSIZE_SMALL;
- int blockCacheSize = 5 * blockSize;
- int entrySize = blockSize / 2;
-
-
- float percentage = 100 * (float) blockCacheSize / (float) heapSize;
- hsf.setBlockCacheSize(percentage);
- HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
- RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
- //Create a region that evicts everything
-// rf1.setHDFSStoreName("myHDFSStore");
- LocalRegion r1 = (LocalRegion) rf1.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
-
- //Populate about many times our block cache size worth of data
- //We want to try to cache at least 5 blocks worth of index and metadata
- byte[] value = new byte[entrySize];
- int numEntries = 10 * blockCacheSize / entrySize;
- for(int i = 0; i < numEntries; i++) {
- r1.put(i, value);
- }
-
- //Wait for the events to be written to HDFS.
- Set<String> queueIds = r1.getAsyncEventQueueIds();
- assertEquals(1, queueIds.size());
- AsyncEventQueueImpl queue = (AsyncEventQueueImpl) c.getAsyncEventQueue(queueIds.iterator().next());
- long end = System.nanoTime() + TimeUnit.SECONDS.toNanos(120);
- while(queue.size() > 0 && System.nanoTime() < end) {
- Thread.sleep(10);
- }
- assertEquals(0, queue.size());
-
-
- Thread.sleep(10000);
-
- //Do some reads to cache some blocks. Note that this doesn't
- //end up caching data blocks, just index and bloom filters blocks.
- for(int i = 0; i < numEntries; i++) {
- r1.get(i);
- }
-
- long statSize = store.getStats().getBlockCache().getBytesCached();
- assertTrue("Block cache stats expected to be near " + blockCacheSize + " was " + statSize,
- blockCacheSize / 2 < statSize &&
- statSize <= 2 * blockCacheSize);
-
- long currentSize = store.getBlockCache().getCurrentSize();
- assertTrue("Block cache size expected to be near " + blockCacheSize + " was " + currentSize,
- blockCacheSize / 2 < currentSize &&
- currentSize <= 2 * blockCacheSize);
-
- } finally {
- this.c.close();
- }
- }
-
- protected GemFireCacheImpl createCache() {
- return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").set("log-level", "info")
- .create();
- }
-}
[02/15] incubator-geode git commit: GEODE-429: Remove hdfsStore gfsh
commands
Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index f3c66b0..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,364 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * The DescribeHDFSStoreFunctionJUnitTest test suite class tests the contract
- * and functionality of the DescribeHDFSStoreFunction class. </p>
- *
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class DescribeHDFSStoreFunctionJUnitTest {
-
- private static final Logger logger = LogService.getLogger();
-
- private Mockery mockContext;
-
- @Before
- public void setup() {
- mockContext = new Mockery() {
- {
- setImposteriser(ClassImposteriser.INSTANCE);
- }
- };
- }
-
- @After
- public void tearDown() {
- mockContext.assertIsSatisfied();
- mockContext = null;
- }
-
- @Test
- public void testExecute() throws Throwable {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final HDFSStoreImpl mockHdfsStore = createMockHDFSStore(hdfsStoreName, "hdfs://localhost:9000", "testDir", 1024, 20, .25f,
- null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final LogService mockLogService = mockContext.mock(LogService.class, "LogService");
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockCache).findHDFSStore(hdfsStoreName);
- will(returnValue(mockHdfsStore));
- oneOf(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache, mockMember);
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final HDFSStoreConfigHolder hdfsStoreDetails = (HDFSStoreConfigHolder)results.get(0);
-
- assertNotNull(hdfsStoreDetails);
- assertEquals(hdfsStoreName, hdfsStoreDetails.getName());
- assertEquals("hdfs://localhost:9000", hdfsStoreDetails.getNameNodeURL());
- assertEquals("testDir", hdfsStoreDetails.getHomeDir());
- assertEquals(1024, hdfsStoreDetails.getWriteOnlyFileRolloverSize());
- assertEquals(20, hdfsStoreDetails.getWriteOnlyFileRolloverInterval());
- assertFalse(hdfsStoreDetails.getMinorCompaction());
- assertEquals("0.25", Float.toString(hdfsStoreDetails.getBlockCacheSize()));
- assertNull(hdfsStoreDetails.getHDFSClientConfigFile());
- assertTrue(hdfsStoreDetails.getMajorCompaction());
- assertEquals(20, hdfsStoreDetails.getMajorCompactionInterval());
- assertEquals(20, hdfsStoreDetails.getMajorCompactionThreads());
- assertEquals(10, hdfsStoreDetails.getMinorCompactionThreads());
- assertEquals(100, hdfsStoreDetails.getPurgeInterval());
-
- assertEquals(20, hdfsStoreDetails.getBatchSize());
- assertEquals(20, hdfsStoreDetails.getBatchInterval());
- assertNull(hdfsStoreDetails.getDiskStoreName());
- assertFalse(hdfsStoreDetails.getSynchronousDiskWrite());
- assertEquals(0, hdfsStoreDetails.getDispatcherThreads());
- assertEquals(1024, hdfsStoreDetails.getMaxMemory());
- assertFalse(hdfsStoreDetails.getBufferPersistent());
- }
-
-
- @Test
- public void testExecuteOnMemberHavingANonGemFireCache() throws Throwable {
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {{
- exactly(0).of(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
-
- }});
-
- final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache , mockMember);
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertTrue(results.isEmpty());
- }
-
-
- @Test(expected = HDFSStoreNotFoundException.class)
- public void testExecuteThrowingResourceNotFoundException() throws Throwable{
- final String hdfsStoreName = "testHdfsStore";
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {{
- oneOf(mockCache).findHDFSStore(hdfsStoreName);
- will(returnValue(null));
- oneOf(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }});
-
- final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache,mockMember);
-
- function.execute(mockFunctionContext);
-
- try {
- testResultSender.getResults();
- }
- catch (HDFSStoreNotFoundException e) {
- assertEquals(String.format("A hdfs store with name (%1$s) was not found on member (%2$s).",
- hdfsStoreName, memberName), e.getMessage());
- throw e;
- }
- }
-
-
- @Test(expected = RuntimeException.class)
- public void testExecuteThrowingRuntimeException() throws Throwable {
- final String hdfsStoreName = "testHdfsStore";
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {{
- oneOf(mockCache).findHDFSStore(hdfsStoreName);
- will(throwException(new RuntimeException("ExpectedStrings")));
- oneOf(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }});
-
- final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache, mockMember);
-
- function.execute(mockFunctionContext);
-
- try {
- testResultSender.getResults();
- }
- catch (RuntimeException e) {
- assertEquals("ExpectedStrings", e.getMessage());
- throw e;
- }
- }
-
-
- protected HDFSStoreImpl createMockHDFSStore(final String storeName, final String namenode, final String homeDir,
- final int maxFileSize, final int fileRolloverInterval, final float blockCachesize, final String clientConfigFile,
- final int batchSize, final int batchInterval, final String diskStoreName, final boolean syncDiskwrite,
- final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent, final boolean minorCompact,
- final boolean majorCompact, final int majorCompactionInterval, final int majorCompactionThreads,
- final int minorCompactionThreads, final int purgeInterval) {
-
- final HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, storeName);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockHdfsStore).getMajorCompaction();
- will(returnValue(majorCompact));
- oneOf(mockHdfsStore).getMajorCompactionInterval();
- will(returnValue(majorCompactionInterval));
- oneOf(mockHdfsStore).getMajorCompactionThreads();
- will(returnValue(majorCompactionThreads));
- oneOf(mockHdfsStore).getMinorCompactionThreads();
- will(returnValue(minorCompactionThreads));
- oneOf(mockHdfsStore).getPurgeInterval();
- will(returnValue(purgeInterval));
- oneOf(mockHdfsStore).getInputFileCountMax();
- will(returnValue(10));
- oneOf(mockHdfsStore).getInputFileSizeMax();
- will(returnValue(1024));
- oneOf(mockHdfsStore).getInputFileCountMin();
- will(returnValue(2));
- oneOf(mockHdfsStore).getBatchSize();
- will(returnValue(batchSize));
- oneOf(mockHdfsStore).getBatchInterval();
- will(returnValue(batchInterval));
- oneOf(mockHdfsStore).getDiskStoreName();
- will(returnValue(diskStoreName));
- oneOf(mockHdfsStore).getSynchronousDiskWrite();
- will(returnValue(syncDiskwrite));
- oneOf(mockHdfsStore).getBufferPersistent();
- will(returnValue(bufferPersistent));
- oneOf(mockHdfsStore).getDispatcherThreads();
- will(returnValue(dispatcherThreads));
- oneOf(mockHdfsStore).getMaxMemory();
- will(returnValue(maxMemory));
- oneOf(mockHdfsStore).getName();
- will(returnValue(storeName));
- oneOf(mockHdfsStore).getNameNodeURL();
- will(returnValue(namenode));
- oneOf(mockHdfsStore).getHomeDir();
- will(returnValue(homeDir));
- oneOf(mockHdfsStore).getWriteOnlyFileRolloverSize();
- will(returnValue(maxFileSize));
- oneOf(mockHdfsStore).getWriteOnlyFileRolloverInterval();
- will(returnValue(fileRolloverInterval));
- oneOf(mockHdfsStore).getMinorCompaction();
- will(returnValue(minorCompact));
- oneOf(mockHdfsStore).getBlockCacheSize();
- will(returnValue(blockCachesize));
- allowing(mockHdfsStore).getHDFSClientConfigFile();
- will(returnValue(clientConfigFile));
- }
- });
- return mockHdfsStore;
- }
-
- protected TestDescribeHDFSStoreFunction createDescribeHDFSStoreFunction(final Cache cache, DistributedMember member) {
- return new TestDescribeHDFSStoreFunction(cache, member);
- }
-
- protected static class TestDescribeHDFSStoreFunction extends DescribeHDFSStoreFunction {
- private static final long serialVersionUID = 1L;
-
- private final Cache cache;
-
- private final DistributedMember member;
-
- public TestDescribeHDFSStoreFunction(final Cache cache, DistributedMember member) {
- this.cache = cache;
- this.member = member;
- }
-
- @Override
- protected Cache getCache() {
- return this.cache;
- }
-
- @Override
- protected DistributedMember getDistributedMemberId(Cache cache) {
- return member;
- }
- }
-
- protected static class TestResultSender implements ResultSender {
-
- private final List<Object> results = new LinkedList<Object>();
-
- private Throwable t;
-
- protected List<Object> getResults() throws Throwable {
- if (t != null) {
- throw t;
- }
- return Collections.unmodifiableList(results);
- }
-
- public void lastResult(final Object lastResult) {
- results.add(lastResult);
- }
-
- public void sendResult(final Object oneResult) {
- results.add(oneResult);
- }
-
- public void sendException(final Throwable t) {
- this.t = t;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index 08e18ec..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * The DestroyHDFSStoreFunctionJUnitTest test suite class tests the contract and
- * functionality of the DestroyHDFSStoreFunction class. </p>
- *
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class DestroyHDFSStoreFunctionJUnitTest {
-
- private static final Logger logger = LogService.getLogger();
-
- private Mockery mockContext;
-
- @Before
- public void setup() {
- mockContext = new Mockery() {
- {
- setImposteriser(ClassImposteriser.INSTANCE);
- }
- };
- }
-
- @After
- public void tearDown() {
- mockContext.assertIsSatisfied();
- mockContext = null;
- }
-
- @Test
- public void testExecute() throws Throwable {
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
- final HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreImpl");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
- final TestResultSender testResultSender = new TestResultSender();
- final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockCache).findHDFSStore(hdfsStoreName);
- will(returnValue(mockHdfsStore));
- one(mockHdfsStore).destroy();
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("Success", result.getMessage());
-
- }
-
- @Test
- @SuppressWarnings("unchecked")
- public void testExecuteOnMemberHavingNoHDFSStore() throws Throwable {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final TestResultSender testResultSender = new TestResultSender();
- final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockCache).findHDFSStore(hdfsStoreName);
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("Hdfs store not found on this member", result.getMessage());
- }
-
- @Test
- public void testExecuteOnMemberWithNoCache() throws Throwable {
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String hdfsStoreName = "mockHdfsStore";
-
- final TestResultSender testResultSender = new TestResultSender();
- final DestroyHDFSStoreFunction function = new TestDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity) {
- private static final long serialVersionUID = 1L;
-
- @Override
- protected Cache getCache() {
- throw new CacheClosedException("Expected");
- }
- };
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals("", result.getMemberIdOrName());
- assertNull(result.getMessage());
- }
-
- @Test
- public void testExecuteHandleRuntimeException() throws Throwable {
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final TestResultSender testResultSender = new TestResultSender();
- final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(hdfsStoreName));
- oneOf(mockCache).findHDFSStore(hdfsStoreName);
- will(throwException(new RuntimeException("expected")));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("expected", result.getThrowable().getMessage());
-
- }
-
- protected TestDestroyHDFSStoreFunction createDestroyHDFSStoreFunction(final Cache cache, DistributedMember member,
- XmlEntity xml) {
- return new TestDestroyHDFSStoreFunction(cache, member, xml);
- }
-
- protected static class TestDestroyHDFSStoreFunction extends DestroyHDFSStoreFunction {
- private static final long serialVersionUID = 1L;
-
- private final Cache cache;
-
- private final DistributedMember member;
-
- private final XmlEntity xml;
-
- public TestDestroyHDFSStoreFunction(final Cache cache, DistributedMember member, XmlEntity xml) {
- this.cache = cache;
- this.member = member;
- this.xml = xml;
- }
-
- @Override
- protected Cache getCache() {
- return this.cache;
- }
-
- @Override
- protected DistributedMember getDistributedMember(Cache cache) {
- return member;
- }
-
- @Override
- protected XmlEntity getXMLEntity(String storeName) {
- return xml;
- }
-
- }
-
- protected static class TestResultSender implements ResultSender {
-
- private final List<Object> results = new LinkedList<Object>();
-
- private Throwable t;
-
- protected List<Object> getResults() throws Throwable {
- if (t != null) {
- throw t;
- }
- return Collections.unmodifiableList(results);
- }
-
- public void lastResult(final Object lastResult) {
- results.add(lastResult);
- }
-
- public void sendResult(final Object oneResult) {
- results.add(oneResult);
- }
-
- public void sendException(final Throwable t) {
- this.t = t;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
deleted file mode 100644
index 11bc430..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * The ListHDFSStoreFunctionJUnitTest test suite class tests the contract and functionality of the
- * ListHDFSStoreFunction.
- * </p>
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class ListHDFSStoresFunctionJUnitTest {
- private Mockery mockContext;
-
- @Before
- public void setup() {
- mockContext = new Mockery() {
- {
- setImposteriser(ClassImposteriser.INSTANCE);
- }
- };
- }
-
- @After
- public void tearDown() {
- mockContext.assertIsSatisfied();
- mockContext = null;
- }
-
-
- @Test
- public void testExecute() throws Throwable {
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final TestResultSender testResultSender = new TestResultSender();
-
- final HDFSStoreImpl mockHdfsStoreOne = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreOne");
- final HDFSStoreImpl mockHdfsStoreTwo = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreTwo");
- final HDFSStoreImpl mockHdfsStoreThree = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreThree");
-
- final List<HDFSStoreImpl> mockHdfsStores = new ArrayList<HDFSStoreImpl>();
-
- mockHdfsStores.add(mockHdfsStoreOne);
- mockHdfsStores.add(mockHdfsStoreTwo);
- mockHdfsStores.add(mockHdfsStoreThree);
-
- final List<String> storeNames = new ArrayList<String>();
- storeNames.add("hdfsStoreOne");
- storeNames.add("hdfsStoreTwo");
- storeNames.add("hdfsStoreThree");
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockCache).getHDFSStores();
- will(returnValue(mockHdfsStores));
- exactly(3).of(mockMember).getId();
- will(returnValue(memberId));
- exactly(3).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockHdfsStoreOne).getName();
- will(returnValue(storeNames.get(0)));
- oneOf(mockHdfsStoreTwo).getName();
- will(returnValue(storeNames.get(1)));
- oneOf(mockHdfsStoreThree).getName();
- will(returnValue(storeNames.get(2)));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final Set<HdfsStoreDetails> listHdfsStoreFunctionresults = (Set<HdfsStoreDetails>)results.get(0);
-
- assertNotNull(listHdfsStoreFunctionresults);
- assertEquals(3, listHdfsStoreFunctionresults.size());
-
- Collections.sort(storeNames);
-
- for (HdfsStoreDetails listHdfsStoreFunctionresult : listHdfsStoreFunctionresults) {
- assertTrue(storeNames.contains(listHdfsStoreFunctionresult.getStoreName()));
- assertTrue(storeNames.remove(listHdfsStoreFunctionresult.getStoreName()));
- assertEquals(memberId, listHdfsStoreFunctionresult.getMemberId());
- assertEquals(memberName, listHdfsStoreFunctionresult.getMemberName());
- }
- }
-
-
- @Test(expected = CacheClosedException.class)
- public void testExecuteOnMemberWithNoCache() throws Throwable {
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final TestListHDFSStoresFunction testListHdfsStoresFunction =
- new TestListHDFSStoresFunction(mockContext.mock(Cache.class, "MockCache"), mockMember) {
- @Override protected Cache getCache() {
- throw new CacheClosedException("Expected");
- }
- };
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {{
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }});
-
- testListHdfsStoresFunction.execute(mockFunctionContext);
-
- try {
- testResultSender.getResults();
- }
- catch (CacheClosedException expected) {
- assertEquals("Expected", expected.getMessage());
- throw expected;
- }
- }
-
- @Test
- @SuppressWarnings("unchecked")
- public void testExecuteOnMemberHavingNoHDFSStores() throws Throwable {
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {{
- oneOf(mockCache).getHDFSStores();
- will(returnValue(Collections.emptyList()));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }});
-
- final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final Set<HdfsStoreDetails> hdfsStoreDetails = (Set<HdfsStoreDetails>) results.get(0);
-
- assertNotNull(hdfsStoreDetails);
- assertTrue(hdfsStoreDetails.isEmpty());
- }
-
- @Test
- @SuppressWarnings("unchecked")
- public void testExecuteOnMemberWithANonGemFireCache() throws Throwable {
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, null);
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final Set<HdfsStoreDetails> hdfsStoreDetails = (Set<HdfsStoreDetails>)results.get(0);
-
- assertNotNull(hdfsStoreDetails);
- assertTrue(hdfsStoreDetails.isEmpty());
- }
-
-
- @Test(expected = RuntimeException.class)
- public void testExecuteThrowsRuntimeException() throws Throwable {
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
- final TestResultSender testResultSender = new TestResultSender();
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockCache).getHDFSStores();
- will(throwException(new RuntimeException("expected")));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
-
- function.execute(mockFunctionContext);
-
- try {
- testResultSender.getResults();
- } catch (Throwable throwable) {
- assertTrue(throwable instanceof RuntimeException);
- assertEquals("expected", throwable.getMessage());
- throw throwable;
- }
- }
-
- protected ListHDFSStoresFunction createListHDFSStoresFunction(final Cache cache, DistributedMember member) {
- return new TestListHDFSStoresFunction(cache, member);
- }
-
- protected static class TestListHDFSStoresFunction extends ListHDFSStoresFunction {
- private static final long serialVersionUID = 1L;
-
- private final Cache cache;
-
- DistributedMember member;
-
- @Override
- protected DistributedMember getDistributedMemberId(Cache cache) {
- return member;
- }
-
- public TestListHDFSStoresFunction(final Cache cache, DistributedMember member) {
- assert cache != null: "The Cache cannot be null!";
- this.cache = cache;
- this.member = member;
- }
-
- @Override
- protected Cache getCache() {
- return cache;
- }
- }
-
- protected static class TestResultSender implements ResultSender {
-
- private final List<Object> results = new LinkedList<Object>();
-
- private Throwable t;
-
- protected List<Object> getResults() throws Throwable {
- if (t != null) {
- throw t;
- }
- return Collections.unmodifiableList(results);
- }
-
- public void lastResult(final Object lastResult) {
- results.add(lastResult);
- }
-
- public void sendResult(final Object oneResult) {
- results.add(oneResult);
- }
-
- public void sendException(final Throwable t) {
- this.t = t;
- }
- }
-}
[14/15] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit
and Dunits
Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
deleted file mode 100644
index 011d82b..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager.CompactionRequest;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSCompactionManagerJUnitTest extends BaseHoplogTestCase {
- /**
- * Tests queueing of major and minor compaction requests in respective queues
- */
- public void testMinMajCompactionIsolation() throws Exception {
- // no-op compactor
- Compactor compactor = new AbstractCompactor() {
- Object minor = new Object();
- Object major = new Object();
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- try {
- if (isMajor) {
- synchronized (major) {
- major.wait();
- }
- } else {
- synchronized (minor) {
- minor.wait();
- }
- }
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- return true;
- }
- };
-
- // compaction is disabled. all requests will wait in queue
- HDFSCompactionManager instance = HDFSCompactionManager.getInstance(hdfsStore);
- alterMinorCompaction(hdfsStore, true);
- alterMajorCompaction(hdfsStore, true);
-
- assertEquals(0, instance.getMinorCompactor().getActiveCount());
- assertEquals(0, instance.getMajorCompactor().getActiveCount());
-
- //minor request
- CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- //major request
- cr = new CompactionRequest("region", 0, compactor, true);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-
- //wait for requests to get in queue
- TimeUnit.MILLISECONDS.sleep(50);
- assertEquals(1, instance.getMinorCompactor().getActiveCount());
- assertEquals(1, instance.getMajorCompactor().getActiveCount());
- }
-
- /**
- * Tests compaction pause. Once compaction is stopped, requests will
- * start getting rejected
- */
- public void testAlterAutoMinorCompaction() throws Exception {
- // each new compaction execution increments counter by 1. this way track how many pending tasks
- final AtomicInteger totalExecuted = new AtomicInteger(0);
- Compactor compactor = new AbstractCompactor() {
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- totalExecuted.incrementAndGet();
- return true;
- }
- };
-
- // compaction is enabled. submit requests and after some time counter should be 0
- alterMinorCompaction(hdfsStore, true);
- CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- cr = new CompactionRequest("region", 1, compactor, false);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-
- int totalWait = 20;
- while (totalWait > 0 && 2 != totalExecuted.get()) {
- // wait for operations to complete. The execution will terminate as soon as possible
- System.out.println("waiting one small cycle for dummy request to complete");
- TimeUnit.MILLISECONDS.sleep(50);
- totalWait--;
- }
- assertEquals(2, totalExecuted.get());
-
- // so compaction works. now disable comapction and submit large number of requests till rejected
- // execution counter should not increase
- alterMinorCompaction(hdfsStore, false);
- boolean success = false;
- int i = 0;
- do {
- cr = new CompactionRequest("region", ++i, compactor, false);
- success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- } while (success);
-
- TimeUnit.MILLISECONDS.sleep(500);
- assertEquals(2, totalExecuted.get());
- }
- public void testAlterAutoMajorCompaction() throws Exception {
- // each new compaction execution increments counter by 1. this way track how many pending tasks
- final AtomicInteger totalExecuted = new AtomicInteger(0);
- Compactor compactor = new AbstractCompactor() {
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- totalExecuted.incrementAndGet();
- return true;
- }
- };
-
- // compaction is enabled. submit requests and after some time counter should be 0
- alterMajorCompaction(hdfsStore, true);
- CompactionRequest cr = new CompactionRequest("region", 0, compactor, true);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- cr = new CompactionRequest("region", 1, compactor, true);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-
- int totalWait = 20;
- while (totalWait > 0 && 2 != totalExecuted.get()) {
- // wait for operations to complete. The execution will terminate as soon as possible
- System.out.println("waiting one small cycle for dummy request to complete");
- TimeUnit.MILLISECONDS.sleep(50);
- totalWait--;
- }
- assertEquals(2, totalExecuted.get());
-
- // so compaction works. now disable comapction and submit large number of requests till rejected
- // execution counter should not increase
- alterMajorCompaction(hdfsStore, false);
- boolean success = false;
- int i = 0;
- do {
- cr = new CompactionRequest("region", ++i, compactor, true);
- success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- System.out.println("success: " + success);
- } while (success);
-
- TimeUnit.MILLISECONDS.sleep(500);
- assertEquals(2, totalExecuted.get());
- }
-
- /**
- * Tests duplicate compaction requests do not cause rejection
- */
- public void testDuplicateRequests() throws Exception {
- final AtomicBoolean barrierOpen = new AtomicBoolean(false);
- class TestCompactor extends AbstractCompactor {
- AtomicBoolean busy = new AtomicBoolean(false);
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- synchronized (barrierOpen) {
- busy.set(true);
- if (barrierOpen.get()) {
- return false;
- }
- try {
- barrierOpen.wait();
- } catch (InterruptedException e) {
- return false;
- }
- busy.set(false);
- }
- return true;
- }
- public boolean isBusy(boolean isMajor) {return busy.get();}
- };
-
- System.setProperty(HoplogConfig.COMPCATION_QUEUE_CAPACITY, "10");
-
- alterMinorCompaction(hdfsStore, true);
- alterMajorCompaction(hdfsStore, true);
- // capacity is 10, thread num is 2, so only the first 12 request will be
- // submitted
- for (int i = 0; i < 15; i++) {
- CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), true);
- boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- if (success) {
- assertTrue("failed for " + i, i < 12);
- } else {
- assertTrue("failed for " + i, i >= 12);
- }
- }
-
- synchronized (barrierOpen) {
- barrierOpen.set(true);
- barrierOpen.notifyAll();
- }
- TimeUnit.MILLISECONDS.sleep(100);
- barrierOpen.set(false);
-
- HDFSCompactionManager.getInstance(hdfsStore).reset();
- TestCompactor compactor = new TestCompactor();
- for (int i = 0; i < 10; i++) {
- TimeUnit.MILLISECONDS.sleep(20);
- CompactionRequest cr = new CompactionRequest("region", 0, compactor, true);
- boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- if (success) {
- assertTrue("failed for " + i, i < 2);
- } else {
- assertTrue("failed for " + i, i > 0);
- }
- }
- }
-
- public void testForceCompactionWithAutoDisabled() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("2"), ("2-1")));
- organizer.flush(items.iterator(), items.size());
-
- FileStatus[] files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- CompactionRequest cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true);
- HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- TimeUnit.MILLISECONDS.sleep(500);
-
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- organizer.forceCompaction(true);
- TimeUnit.MILLISECONDS.sleep(500);
-
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
- }
-
- /**
- * Test force major compaction completes on version upgrade even when there is only one hoplog
- */
- public void testForceCompaction() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("2"), ("2-1")));
- organizer.flush(items.iterator(), items.size());
-
- FileStatus[] files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- // isForced is true for user submitted compaction requests (through system procedure)
- // we do not want to compact an already compacted file
- CompactionRequest cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true, true/*isForced*/);
- Future<CompactionStatus> status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- status.get().equals(true);
-
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- // second request to force compact does not do anything
- status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- status.get().equals(false);
-
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- // upon version upgrade force compaction is allowed
- cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true, true, true);
- status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
- status.get().equals(true);
-
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
- files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(3, files.length); // + 1 for old major hoplog
- }
-
- /**
- * Test successful sequential submission
- */
- public void testSameBucketSeqRequest() throws Exception {
- final AtomicInteger counter = new AtomicInteger(0);
- Compactor compactor = new AbstractCompactor() {
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- counter.set(1);
- return true;
- }
- };
-
- HDFSCompactionManager.getInstance(hdfsStore).reset();
- alterMinorCompaction(hdfsStore, true);
- alterMajorCompaction(hdfsStore, true);
- CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
- assertEquals(0, counter.get());
- boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- assertEquals(true, success);
- while (!counter.compareAndSet(1, 0)) {
- TimeUnit.MILLISECONDS.sleep(20);
- }
-
- assertEquals(0, counter.get());
- success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- assertEquals(true, success);
- for (int i = 0; i < 10; i++) {
- TimeUnit.MILLISECONDS.sleep(20);
- if (counter.get() == 1) {
- break;
- }
- }
- assertEquals(1, counter.get());
- }
-
- public void testAlterMinorThreadsIncrease() throws Exception {
- doAlterCompactionThreads(false, false);
- }
- public void testAlterMinorThreadsDecrease() throws Exception {
- doAlterCompactionThreads(false, true);
- }
- public void testAlterMajorThreadsIncrease() throws Exception {
- doAlterCompactionThreads(true, false);
- }
- public void testAlterMajorThreadsDecrease() throws Exception {
- doAlterCompactionThreads(true, true);
- }
-
- public void doAlterCompactionThreads(final boolean testMajor, boolean decrease) throws Exception {
- final AtomicBoolean barrierOpen = new AtomicBoolean(false);
- final AtomicInteger counter = new AtomicInteger(0);
- class TestCompactor extends AbstractCompactor {
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- synchronized (barrierOpen) {
- if ((testMajor && !isMajor) || (!testMajor && isMajor)) {
- return true;
- }
- if (barrierOpen.get()) {
- return false;
- }
- try {
- barrierOpen.wait();
- } catch (InterruptedException e) {
- return false;
- }
- counter.incrementAndGet();
- }
- return true;
- }
- };
-
- System.setProperty(HoplogConfig.COMPCATION_QUEUE_CAPACITY, "1");
-
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- int defaultThreadCount = 10;
- if (testMajor) {
- alterMajorCompaction(hdfsStore, true);
- defaultThreadCount = 2;
- mutator.setMajorCompactionThreads(15);
- if (decrease) {
- mutator.setMajorCompactionThreads(1);
- }
- } else {
- alterMinorCompaction(hdfsStore, true);
- mutator.setMinorCompactionThreads(15);
- if (decrease) {
- mutator.setMinorCompactionThreads(1);
- }
- }
-
- // capacity is 1, thread num is 10 or 2, so only the first 11 or 3 request will be
- // submitted
- cache.getLogger().info("<ExpectedException action=add>java.util.concurrent.RejectedExecutionException</ExpectedException>");
- for (int i = 0; i < 15; i++) {
- CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), testMajor);
- boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- if (success) {
- assertTrue("failed for " + i, i <= defaultThreadCount);
- } else {
- assertTrue("failed for " + i, i > defaultThreadCount);
- }
- }
-
- TimeUnit.MILLISECONDS.sleep(500);
- assertEquals(0, counter.get());
- synchronized (barrierOpen) {
- barrierOpen.set(true);
- barrierOpen.notifyAll();
- }
- TimeUnit.MILLISECONDS.sleep(500);
- assertEquals(defaultThreadCount, counter.get());
-
- hdfsStore.alter(mutator);
-
- counter.set(0);
- barrierOpen.set(false);
- for (int i = 0; i < 15; i++) {
- TimeUnit.MILLISECONDS.sleep(100);
- CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), testMajor);
- boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
- if (decrease) {
- if (i > 3) {
- assertFalse("failed for " + i, success);
- }
- } else {
- assertTrue("failed for " + i, success);
- }
- }
- TimeUnit.MILLISECONDS.sleep(500);
- synchronized (barrierOpen) {
- barrierOpen.set(true);
- barrierOpen.notifyAll();
- }
- TimeUnit.MILLISECONDS.sleep(500);
- if (decrease) {
- assertTrue(counter.get() < 4);
- } else {
- assertEquals(15, counter.get());
- }
-
- cache.getLogger().info("<ExpectedException action=remove>java.util.concurrent.RejectedExecutionException</ExpectedException>");
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
deleted file mode 100644
index dc7b987..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.HoplogListenerForRegion;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionDirectorJUnitTest extends BaseHoplogTestCase {
- public void testDirector() throws Exception {
- int bucketId = 0;
-
- HdfsRegionManager mgr = regionManager;
-
- // no buckets have been created so far.
- assertEquals(0, director.getBucketCount("/" + getName()));
-
- // one bucket created
- mgr.create(bucketId);
- assertEquals(1, director.getBucketCount("/" + getName()));
-
- // close bucket test
- mgr.close(bucketId);
-
- // all buckets have been closed.
- assertEquals(0, director.getBucketCount("/" + getName()));
-
- mgr.create(bucketId);
- assertEquals(1, director.getBucketCount("/" + getName()));
- director.clear("/" + getName());
- try {
- assertEquals(0, director.getBucketCount("/" + getName()));
- fail("The region is no longer managed, hence an exception is expected");
- } catch (IllegalStateException e) {
- // exception expected as the region is no longer managed
- }
- }
-
- public void testCompactionEvents() throws Exception {
- final AtomicInteger counter = new AtomicInteger(0);
- HoplogListener myListener = new HoplogListener() {
- public void hoplogDeleted(String regionFolder, int bucketId, Hoplog... oplogs)
- throws IOException {
- }
- public void hoplogCreated(String regionFolder, int bucketId, Hoplog... oplogs)
- throws IOException {
- }
- public void compactionCompleted(String region, int bucket, boolean isMajor) {
- counter.incrementAndGet();
- }
- };
-
- HoplogListenerForRegion listenerManager = ((LocalRegion)region).getHoplogListener();
- listenerManager.addListener(myListener);
-
- HoplogOrganizer bucket = regionManager.create(0);
- // #1
- ArrayList<PersistedEventImpl> items = new ArrayList<PersistedEventImpl>();
- items.add(new TestEvent("1", "1"));
- bucket.flush(items.iterator(), items.size());
-
- // #2
- items.clear();
- items.add(new TestEvent("2", "1"));
- bucket.flush(items.iterator(), items.size());
-
- // #3
- items.clear();
- items.add(new TestEvent("3", "1"));
- bucket.flush(items.iterator(), items.size());
-
- // #4
- items.clear();
- items.add(new TestEvent("4", "1"));
- bucket.flush(items.iterator(), items.size());
-
- bucket.getCompactor().compact(false, false);
- assertEquals(1, counter.get());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
deleted file mode 100644
index 1d17232..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSStatsJUnitTest extends BaseHoplogTestCase {
- public void testStoreUsageStats() throws Exception {
- HoplogOrganizer bucket = regionManager.create(0);
-
- long oldUsage = 0;
- assertEquals(oldUsage, stats.getStoreUsageBytes());
-
- for (int j = 0; j < 5; j++) {
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- String key = ("key-" + (j * 100 + i));
- String value = ("value-" + System.nanoTime());
- items.add(new TestEvent(key, value));
- }
- bucket.flush(items.iterator(), 100);
- }
-
- assertTrue(0 < stats.getStoreUsageBytes());
- oldUsage = stats.getStoreUsageBytes();
-
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- assertEquals(2, stats.getStoreUsageBytes() / oldUsage);
-
- organizer.close();
- assertEquals(1, stats.getStoreUsageBytes() / oldUsage);
- }
-
- public void testWriteStats() throws Exception {
- HoplogOrganizer bucket = regionManager.create(0);
-
- // validate flush stats
- // flush and create many hoplogs and execute one compaction cycle also
- // 5 hoplogs, total 500 keys
- assertEquals(0, stats.getFlush().getCount());
- assertEquals(0, stats.getFlush().getBytes());
- assertEquals(0, stats.getActiveFileCount());
- int bytesSent = 0;
- for (int j = 0; j < 5; j++) {
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- String key = ("key-" + (j * 100 + i));
- String value = ("value-" + System.nanoTime());
- items.add(new TestEvent(key, value));
- bytesSent += (key.getBytes().length + value.getBytes().length);
- }
- bucket.flush(items.iterator(), 100);
-
- // verify stats show
- assertEquals(j + 1, stats.getFlush().getCount());
- assertTrue(stats.getFlush().getBytes() > bytesSent);
- assertEquals(j + 1, stats.getActiveFileCount());
- }
-
- // verify compaction stats
- assertEquals(0, stats.getMinorCompaction().getCount());
- assertEquals(0, stats.getMinorCompaction().getBytes());
- assertEquals(0, stats.getInactiveFileCount());
- bucket.getCompactor().compact(false, false);
- assertEquals(1, stats.getMinorCompaction().getCount());
- assertEquals(1, stats.getActiveFileCount());
- assertEquals(0, stats.getInactiveFileCount());
- assertEquals(stats.getMinorCompaction().getBytes(), stats.getFlush()
- .getBytes());
- }
-
- public void testInactiveFileStats() throws Exception {
- // steps
- // create files -> validate active and inactive file count
- // -> increment reference by using scanner-> compact -> verify active and inactive file count
- HoplogOrganizer bucket = regionManager.create(0);
- assertEquals(0, stats.getActiveFileCount());
- assertEquals(0, stats.getInactiveFileCount());
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int j = 0; j < 5; j++) {
- items.clear();
- for (int i = 0; i < 100; i++) {
- String key = ("key-" + (j * 100 + i));
- String value = ("value-" + System.nanoTime());
- items.add(new TestEvent(key, value));
- }
- bucket.flush(items.iterator(), 100);
- }
-
- assertEquals(5, stats.getActiveFileCount());
- assertEquals(0, stats.getInactiveFileCount());
-
- HoplogIterator<byte[], PersistedEventImpl> scanner = bucket.scan();
- bucket.getCompactor().compact(true, false);
- assertEquals(1, stats.getActiveFileCount());
- assertEquals(5, stats.getInactiveFileCount());
-
- scanner.close();
- assertEquals(1, stats.getActiveFileCount());
- assertEquals(0, stats.getInactiveFileCount());
- }
-
- public void testReadStats() throws Exception {
- HoplogOrganizer<SortedHoplogPersistedEvent> bucket = regionManager.create(0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
- }
- bucket.flush(items.iterator(), 100);
-
- // validate read stats
- assertEquals(0, stats.getRead().getCount());
- assertEquals(0, stats.getRead().getBytes());
- // number of bytes read must be greater than size of key and value and must be increasing
- int bytesRead = "key-1".getBytes().length + "value=1233232".getBytes().length;
- for (int i = 0; i < 5; i++) {
- long previousRead = stats.getRead().getBytes();
- PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + i));
- assertNotNull(e);
- assertEquals(i + 1, stats.getRead().getCount());
- assertTrue( (bytesRead + previousRead) < stats.getRead().getBytes());
- }
-
- //Make sure the block cache stats are being updated.
-// assertTrue(storeStats.getBlockCache().getMisses() > 0);
-// assertTrue(storeStats.getBlockCache().getBytesCached() > 0);
-// assertTrue(storeStats.getBlockCache().getCached() > 0);
-
- //Do a duplicate read to make sure we get a hit in the cache
-// bucket.read(BlobHelper.serializeToBlob("key-" + 0));
-// assertTrue(storeStats.getBlockCache().getHits() > 0);
- }
-
- public void testBloomStats() throws Exception {
- HoplogOrganizer bucket = regionManager.create(0);
-
- // create 10 hoplogs
- for (int j = 0; j < 5; j++) {
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- String key = ("key-" + (j * 100 + i));
- String value = ("value-" + System.nanoTime());
- items.add(new TestEvent(key, value));
- }
- bucket.flush(items.iterator(), 100);
- }
-
- // initially bloom stat will be zero
- // reading key in first hop will increase bloom hit by 1 (key 0 to 99)
- // reading key in 5 hoplog will increase bloom hit by 5 (key 400 to 499)
- assertEquals(0, stats.getBloom().getCount());
- bucket.read(BlobHelper.serializeToBlob("key-450"));
- assertEquals(1, stats.getBloom().getCount());
- bucket.read(BlobHelper.serializeToBlob("key-50"));
- assertEquals(6, stats.getBloom().getCount());
- }
-
- public void testScanStats() throws Exception {
- HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(
- testDataDir, "H-1-1.hop"),blockCache, stats, storeStats);
- createHoplog(5, hoplog);
-
- // initially scan stats will be zero. creating a scanner should increase
- // scan iteration stats and bytes. On scanner close scan count should be
- // incremented
- assertEquals(0, stats.getScan().getCount());
- assertEquals(0, stats.getScan().getBytes());
- assertEquals(0, stats.getScan().getTime());
- assertEquals(0, stats.getScan().getIterations());
- assertEquals(0, stats.getScan().getIterationTime());
-
- HoplogIterator<byte[], byte[]> scanner = hoplog.getReader().scan();
- assertEquals(0, stats.getScan().getCount());
- int count = 0;
- for (byte[] bs = null; scanner.hasNext(); ) {
- bs = scanner.next();
- count += bs.length + scanner.getValue().length;
- }
- assertEquals(count, stats.getScan().getBytes());
- assertEquals(5, stats.getScan().getIterations());
- assertTrue(0 < stats.getScan().getIterationTime());
- // getcount will be 0 as scanner.close is not being called
- assertEquals(0, stats.getScan().getCount());
- assertEquals(0, stats.getScan().getTime());
- assertEquals(1, stats.getScan().getInProgress());
-
- scanner.close();
- assertEquals(1, stats.getScan().getCount());
- assertTrue(0 < stats.getScan().getTime());
- assertTrue(stats.getScan().getIterationTime() <= stats.getScan().getTime());
- }
-
- /**
- * Validates two buckets belonging to same region update the same stats
- */
- public void testRegionBucketShareStats() throws Exception {
- HoplogOrganizer bucket1 = regionManager.create(0);
- HoplogOrganizer bucket2 = regionManager.create(1);
-
- // validate flush stats
- assertEquals(0, stats.getFlush().getCount());
- assertEquals(0, stats.getActiveFileCount());
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
- }
- bucket1.flush(items.iterator(), 100);
- assertEquals(1, stats.getFlush().getCount());
- assertEquals(1, stats.getActiveFileCount());
- items.clear();
-
- for (int i = 0; i < 100; i++) {
- items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
- }
- bucket2.flush(items.iterator(), 100);
- assertEquals(2, stats.getFlush().getCount());
- assertEquals(2, stats.getActiveFileCount());
- }
-
- @Override
- protected Cache createCache() {
- CacheFactory cf = new CacheFactory().set("mcast-port", "0")
- .set("log-level", "info")
- .set("enable-time-statistics", "true")
-// .set("statistic-archive-file", "statArchive.gfs")
- ;
- cache = cf.create();
-
- return cache;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
deleted file mode 100644
index ab1ccac..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog.SequenceFileIterator;
-import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * Test class to test hoplog functionality for streaming ingest
- *
- * @author hemantb
- *
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSUnsortedHoplogOrganizerJUnitTest extends BaseHoplogTestCase {
-
- /**
- * Tests flush operation
- */
- public void testFlush() throws Exception {
- int count = 10;
- int bucketId = (int) System.nanoTime();
- HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < count; i++) {
- items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
- }
-
- organizer.flush(items.iterator(), count);
- organizer.closeCurrentWriter();
-
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-
- // only one hoplog should exists
- assertEquals(1, hoplogs.length);
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
- }
-
- public void testAlterRollOverInterval() throws Exception {
- HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, 0);
-
- // flush 4 times with small delays. Only one seq file will be created
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int j = 0; j < 3; j++) {
- items.clear();
- for (int i = 0; i < 10; i++) {
- items.add(new TestEvent(("key-" + (i + 10 * j)), ("value-" + System.nanoTime())));
- }
- organizer.flush(items.iterator(), 10);
- TimeUnit.MILLISECONDS.sleep(1100);
- }
- organizer.closeCurrentWriter();
-
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-
- // only one hoplog should exists
- assertEquals(1, hoplogs.length);
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
-
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- mutator.setWriteOnlyFileRolloverInterval(1);
- hdfsStore.alter(mutator);
-
- TimeUnit.MILLISECONDS.sleep(1100);
- for (int j = 0; j < 2; j++) {
- items.clear();
- for (int i = 0; i < 10; i++) {
- items.add(new TestEvent(("key-" + (i + 10 * j)), ("value-" + System.nanoTime())));
- }
- organizer.flush(items.iterator(), 10);
- TimeUnit.MILLISECONDS.sleep(1100);
- }
- organizer.closeCurrentWriter();
- hoplogs = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
- assertEquals(3, hoplogs.length);
- }
-
- public void testSequenceFileScan() throws Exception {
- int count = 10000;
- int bucketId = (int) System.nanoTime();
- HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < count; i++) {
- items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
- }
-
- organizer.flush(items.iterator(), count);
- organizer.closeCurrentWriter();
-
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-
- // only one hoplog should exists
- assertEquals(1, hoplogs.length);
-
- SequenceFileDetails sfd = getSequenceFileDetails(hdfsStore.getFileSystem(), hoplogs[0].getPath());
-
- // End position is before a sync. Should read until sync.
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, sfd.indexOfKeyBeforeSecondSync ,
- 0, sfd.posBeforeSecondSync);
-
- // Start position is inside header. Should start from first key and go to next sync point.
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, sfd.indexOfKeyBeforeSecondSync,
- 10, sfd.posAfterFirstSync);
-
- // Start and end position are between two sync markers. Should not read any keys.
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 29, 28,
- sfd.posAfterFirstSync, sfd.posBeforeSecondSync - sfd.posAfterFirstSync);
-
- // Start position is after a sync and End position is beyond the file size.
- //Should read all the records after the next sync.
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), sfd.indexOfKeyAfterFirstSync, 9999,
- sfd.posBeforeFirstSync, 10000000);
-
- // Should read all the records.
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, 9999, 0, -1);
- }
-
- class SequenceFileDetails {
- public int posBeforeFirstSync;
- public int indexOfKeyBeforeFirstSync;
-
- public int posAfterFirstSync;
- public int indexOfKeyAfterFirstSync;
-
- public int posBeforeSecondSync;
- public int indexOfKeyBeforeSecondSync;
- }
-
- public SequenceFileDetails getSequenceFileDetails(FileSystem inputFS, Path sequenceFileName) throws Exception {
- SequenceFileDetails fd = new SequenceFileDetails();
- SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
-
- SequenceFileIterator iter = (SequenceFileIterator)hoplog.getReader().scan();;
- int currentkeyStartPos = 0;
- int cursorPos = 0;
- String currentKey = null;
- boolean firstSyncSeen = false;
- try {
- while (iter.hasNext()) {
- iter.next();
- currentkeyStartPos = cursorPos;
- currentKey = ((String)CacheServerHelper.deserialize(iter.getKey()));
- cursorPos = (int)iter.getPosition();
- if (iter.syncSeen()){
- if (firstSyncSeen) {
-
- fd.posBeforeSecondSync = currentkeyStartPos;
- fd.indexOfKeyBeforeSecondSync = Integer.parseInt(currentKey.substring(4));
- break;
- } else {
- fd.posBeforeFirstSync = currentkeyStartPos;
- fd.indexOfKeyBeforeFirstSync = Integer.parseInt(currentKey.substring(4));
-
- fd.posAfterFirstSync = cursorPos;
- fd.indexOfKeyAfterFirstSync = Integer.parseInt(currentKey.substring(4)) + 1;
- firstSyncSeen = true;
- }
- }
- }
-
- } catch (Exception e) {
- assertTrue(e.toString(), false);
- }
- iter.close();
- hoplog.close();
- return fd;
- }
-
- public void testClear() throws Exception {
- int count = 10;
- int bucketId = (int) System.nanoTime();
- HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < count; i++) {
- items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
- }
- organizer.flush(items.iterator(), count);
- organizer.closeCurrentWriter();
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
- assertEquals(1, hoplogs.length);
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
-
-
- // write another batch but do not close the data.
- organizer.flush(items.iterator(), count);
-
- organizer.clear();
-
- hoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
- // check file existence in bucket directory
- FileStatus[] expiredhoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-
- // two expired hoplog should exists
- assertEquals(2, expiredhoplogs.length);
- assertEquals(2, hoplogs.length);
- // check the expired hops name should be same
- assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) ||
- expiredhoplogs[1].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
- assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) ||
- expiredhoplogs[1].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
-
- // Test that second time clear should be harmless and should not result in extra files.
- organizer.clear();
- hoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
- // check file existence in bucket directory
- expiredhoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-
- // two expired hoplog should exists
- assertEquals(2, expiredhoplogs.length);
- assertEquals(2, hoplogs.length);
- // check the expired hops name should be same
- assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) ||
- expiredhoplogs[1].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
- assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) ||
- expiredhoplogs[1].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
-
-
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
- readSequenceFile(hdfsStore.getFileSystem(), hoplogs[1].getPath(), 0);
- }
-
- public void readSequenceFile(FileSystem inputFS, Path sequenceFileName, int index) throws IOException{
- readSequenceFile(inputFS, sequenceFileName, index, -1, 0, -1);
- }
- /**
- * Reads the sequence file assuming that it has keys and values starting from index that
- * is specified as parameter.
- *
- */
- public void readSequenceFile(FileSystem inputFS, Path sequenceFileName, int index, int endIndex,
- int startoffset, int length) throws IOException {
- SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
-
- HoplogIterator<byte[], byte[]> iter = null;
- if (length == -1){
- iter = hoplog.getReader().scan();
- }
- else {
- iter = hoplog.getReader().scan(startoffset, length);
- }
-
- try {
- while (iter.hasNext()) {
- iter.next();
- PersistedEventImpl te = UnsortedHoplogPersistedEvent.fromBytes(iter.getValue());
- String stringkey = ((String)CacheServerHelper.deserialize(iter.getKey()));
- assertTrue("Expected key: key-" + index + ". Actual key: " + stringkey , ((String)stringkey).equals("key-" + index));
- index++;
- }
- if (endIndex != -1)
- assertTrue ("The keys should have been until key-"+ endIndex + " but they are until key-"+ (index-1), index == endIndex + 1) ;
- } catch (Exception e) {
- assertTrue(e.toString(), false);
- }
- iter.close();
- hoplog.close();
- }
-
-}
[10/15] incubator-geode git commit: GEODE-429: Remove HDFS
persistence DataPolicy
Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
deleted file mode 100644
index 2f0cb3f..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-
-import dunit.SerializableCallable;
-
-@SuppressWarnings({ "serial", "rawtypes", "deprecation" })
-public class RegionWithHDFSPersistenceBasicDUnitTest extends
- RegionWithHDFSBasicDUnitTest {
-
- public RegionWithHDFSPersistenceBasicDUnitTest(String name) {
- super(name);
- }
-
- @Override
- protected SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets,
- final int batchSizeMB, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval, final boolean queuePersistent,
- final boolean writeonly, final long timeForRollover, final long maxFileSize) {
- SerializableCallable createRegion = new SerializableCallable() {
- public Object call() throws Exception {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
-
- af.setHDFSStoreName(uniqueName);
-
- af.setPartitionAttributes(paf.create());
- HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
- // Going two level up to avoid home directories getting created in
- // VM-specific directory. This avoids failures in those tests where
- // datastores are restarted and bucket ownership changes between VMs.
- homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
- hsf.setHomeDir(homeDir);
- hsf.setBatchSize(batchSizeMB);
- hsf.setBufferPersistent(queuePersistent);
- hsf.setMaxMemory(3);
- hsf.setBatchInterval(batchInterval);
- if (timeForRollover != -1) {
- hsf.setWriteOnlyFileRolloverInterval((int)timeForRollover);
- System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
- }
- if (maxFileSize != -1) {
- hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
- }
- hsf.create(uniqueName);
-
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-
- af.setHDFSWriteOnly(writeonly);
- Region r = createRootRegion(uniqueName, af.create());
- ((LocalRegion)r).setIsTest();
-
- return 0;
- }
- };
- return createRegion;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
deleted file mode 100644
index 5e2ba4f..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Tests regions operations when entries are not yet persisted
- * in HDFS but are in HDFSAsyncQueue
- * @author sbawaska
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSQueueRegionOperationsJUnitTest extends
- HDFSRegionOperationsJUnitTest {
-
- @Override
- protected int getBatchTimeInterval() {
- return 50*1000;
- }
-
- @Override
- protected void sleep(String regionPath) {
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
deleted file mode 100644
index 24cd1dc..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Properties;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOperationsJUnitTest {
- static {
- System.setProperty("gemfire.trackOffHeapRefCounts", "true");
- }
-
- @Override
- public void tearDown() throws Exception {
- super.tearDown();
- OffHeapTestUtil.checkOrphans();
- }
- @Override
- protected Region<Integer, String> createRegion(String regionName) {
- RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
- PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
- rf.setPartitionAttributes(prAttr);
- rf.setOffHeap(true);
-// rf.setHDFSStoreName(hdfsStore.getName());
- Region<Integer, String> r = rf.create(regionName);
-// addListener(r);
-
- ((PartitionedRegion) r).setQueryHDFS(true);
- return r;
- }
- @Override
- protected Properties getDSProps() {
- Properties props = super.getDSProps();
- props.setProperty("off-heap-memory-size", "50m");
- return props;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
deleted file mode 100644
index d96e31b..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Random;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.FixMethodOrder;
-import org.junit.experimental.categories.Category;
-import org.junit.runners.MethodSorters;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.DiskStore;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionAttributes;
-import com.gemstone.gemfire.cache.RegionDestroyedException;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.cardinality.HyperLogLog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Tests that region operations work as expected when data is in HDFS.
- * This test explicitly clears in-memory ConcurrentHashMap that back
- * AbstractRegionMap before validating region operations.
- *
- * @author sbawaska
- */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionOperationsJUnitTest extends TestCase {
-
- protected Cache cache;
- protected HDFSStore hdfsStore;
-
- public void setUp() throws Exception {
- Properties props = getDSProps();
- cache = new CacheFactory(props).create();
- System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
- String storeName = getName()+"-store";
- HDFSStoreFactory hsf = cache.createHDFSStoreFactory();
- hsf.setHomeDir(getName()+"-test");
- hsf.setBatchInterval(getBatchTimeInterval());
- hdfsStore = hsf.create(storeName);
- }
-
- protected Properties getDSProps() {
- Properties props = new Properties();
- props.put("mcast-port", "0");
- props.put("locators", "");
- props.put("log-level", "config");
- return props;
- }
-
- public void tearDown() throws Exception {
- for (Region r : cache.rootRegions()) {
- if (r != null) {
- r.close();
- }
- }
-
- if (cache.getRegion(getName()) != null) {
- cache.getRegion(getName()).destroyRegion();
- }
- DiskStore ds = cache.findDiskStore(null);
- if (ds != null) {
- ds.destroy();
- }
-
- ((HDFSStoreImpl)hdfsStore).getFileSystem().delete(new Path(hdfsStore.getHomeDir()), true);
- }
-
- protected int getBatchTimeInterval() {
- return 1000;
- }
-
- protected Region<Integer, String> createRegion(String regionName) {
- RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
- PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
- rf.setPartitionAttributes(prAttr);
-// rf.setHDFSStoreName(hdfsStore.getName());
- Region<Integer, String> r = rf.create(regionName);
-
- ((PartitionedRegion) r).setQueryHDFS(true);
- return r;
- }
-
- protected void clearBackingCHM(Region<Integer, String> r) {
- PartitionedRegion pr = (PartitionedRegion)r;
- for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
- assertTrue(br.getRegionMap() instanceof HDFSRegionMap);
- ((AbstractRegionMap)br.getRegionMap())._getMap().clear();
- // wait here to make sure that the queue has been flushed
- }
- sleep(pr.getFullPath());
- }
-
- protected void sleep(String regionPath) {
- String qname = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
- GemFireCacheImpl.getExisting().waitForSenderQueueFlush(qname, true, 30);
- }
-
- public void test010PUTDMLSupport() {
- Region<Integer, String> r = createRegion(getName());
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
- assertEquals(0, stats.getRead().getCount());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- assertEquals(100, stats.getRead().getCount());
- sleep(r.getFullPath());
- clearBackingCHM(r);
- LocalRegion lr = (LocalRegion) r;
- for (int i=0; i<200; i++) {
- EntryEventImpl ev = lr.newPutEntryEvent(i, "value"+i, null);
- lr.validatedPut(ev, System.currentTimeMillis());
- }
- // verify that read count on HDFS does not change
- assertEquals(100, stats.getRead().getCount());
- sleep(r.getFullPath());
- clearBackingCHM(r);
- for (int i=0; i<200; i++) {
- assertEquals("value"+i, r.get(i));
- }
- if (getBatchTimeInterval() > 1000) {
- // reads from async queue
- assertEquals(100, stats.getRead().getCount());
- } else {
- assertEquals(300, stats.getRead().getCount());
- }
- }
-
- public void test020GetOperationalData() throws Exception {
- Region<Integer, String> r = createRegion(getName());
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
- assertEquals(0, stats.getRead().getCount());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- int expectedReadsFromHDFS = 100;
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
- sleep(r.getFullPath());
- clearBackingCHM(r);
- LocalRegion lr = (LocalRegion) r;
- for (int i=0; i<200; i++) {
- if (i < 100) {
- assertEquals("value"+i, r.get(i));
- } else {
- assertNull(r.get(i));
- }
- }
- if (getBatchTimeInterval() > 1000) {
- // reads from async queue
- expectedReadsFromHDFS = 200; // initial 100 + 100 for misses
- } else {
- expectedReadsFromHDFS = 300; // initial 100 + 200 for reads
- }
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
- for (int i=0; i<200; i++) {
- assertNull(lr.get(i, null, true, false, false, null, null, false, false/*allowReadFromHDFS*/));
- }
- // no increase in HDFS reads
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-
- /**MergeGemXDHDFSToGFE Have not merged this API as this api is not called by any code*/
- // test the dataView API
- //for (int i=0; i<200; i++) {
- // assertNull(lr.getDataView().getLocally(i, null, i%10, lr, true, true, null, null, false, false/*allowReadFromHDFS*/));
- //}
- // no increase in HDFS reads
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
- }
-
- public void test030RemoveOperationalData() throws Exception {
- Region<Integer, String> r = createRegion(getName());
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
- assertEquals(0, stats.getRead().getCount());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- int expectedReadsFromHDFS = 100;
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
- sleep(r.getFullPath());
- PartitionedRegion lr = (PartitionedRegion) r;
- for(int i =0; i < 50; i++) {
- lr.getBucketRegion(i).customEvictDestroy(i);
- }
- for (int i=0; i<200; i++) {
- if (i < 100) {
- assertEquals("value"+i, r.get(i));
- } else {
- assertNull(r.get(i));
- }
- }
- if (getBatchTimeInterval() > 1000) {
- // reads from async queue
- expectedReadsFromHDFS = 200; // initial 100 + 100 for misses
- } else {
- expectedReadsFromHDFS = 250; // initial 100 + 200 for reads + 50 for
- }
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
- for (int i=0; i<50; i++) {
- assertNull(lr.get(i, null, true, false, false, null, null, false, false/*allowReadFromHDFS*/));
- }
- for (int i=50; i<100; i++) {
- assertEquals("value"+i, lr.get(i, null, true, false, false, null,null, false, false/*allowReadFromHDFS*/));
- }
- for (int i=100; i<200; i++) {
- assertNull(lr.get(i, null, true, false, false, null, null, false, false/*allowReadFromHDFS*/));
- }
- // no increase in HDFS reads
- assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
- }
-
- public void _test040NoAutoEviction() throws Exception {
- if (!cache.isClosed()) {
- tearDown();
- cache.close();
- System.setProperty("gemfire.disableAutoEviction", "true");
- setUp();
- }
- Region<Integer, String> r = createRegion(getName());
- System.setProperty("gemfire.disableAutoEviction", "false");
- for (int i =0; i<5; i++) {
- r.put(i, "value"+i);
- }
- PartitionedRegion pr = (PartitionedRegion) r;
- BucketRegion br = pr.getBucketRegion(1);
- assertNotNull(br.getAttributes().getEvictionAttributes());
- assertEquals(EvictionAlgorithm.NONE, br.getAttributes().getEvictionAttributes().getAlgorithm());
-
- GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache();
- assertEquals(0.0f, cache.getResourceManager().getEvictionHeapPercentage());
- }
-
- public void test050LRURegionAttributesForPR() {
- RegionFactory<Integer, String> rf = cache.createRegionFactory();
-// rf.setHDFSStoreName(hdfsStore.getName());
- rf.setDataPolicy(DataPolicy.HDFS_PARTITION);
- verifyLRURegionAttributesForPR(rf.create(getName()));
- }
-
- public void test060LRURegionAttributesForRegionShortcutPR() {
- verifyLRURegionAttributesForPR(createRegion(getName()));
- }
-
- private void verifyLRURegionAttributesForPR(Region r) {
- for (int i =0; i<200; i++) {
- r.put(i, "value"+i);
- }
- RegionAttributes<Integer, String> ra = r.getAttributes();
- assertNotNull(ra.getEvictionAttributes());
- // default eviction action for region shortcut
- assertEquals(EvictionAction.OVERFLOW_TO_DISK, ra.getEvictionAttributes().getAction());
-
- GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache();
- assertEquals(80.0f, cache.getResourceManager().getEvictionHeapPercentage());
- DiskStore ds = cache.findDiskStore(null);
- assertNotNull(ds);
- Set s = cache.getResourceManager().getResourceListeners(ResourceType.HEAP_MEMORY);
- Iterator it = s.iterator();
- boolean regionFound = false;
- while (it.hasNext()) {
- Object o = it.next();
- if (o instanceof PartitionedRegion) {
- PartitionedRegion pr = (PartitionedRegion) o;
- if (getName().equals(pr.getName())) {
- regionFound = true;
- } else {
- continue;
- }
- for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
- assertNotNull(br.getAttributes().getEvictionAttributes());
- assertEquals(EvictionAlgorithm.LRU_HEAP, br.getAttributes().getEvictionAttributes().getAlgorithm());
- assertEquals(EvictionAction.OVERFLOW_TO_DISK, br.getAttributes().getEvictionAttributes().getAction());
- }
- }
- }
- assertTrue(regionFound);
-
- }
-
- public void test070SizeEstimate() {
- Region<Integer, String> r = createRegion(getName());
- int size = 226;
- Random rand = new Random();
- for (int i=0; i<size; i++) {
- r.put(rand.nextInt(), "value"+i);
- }
- // size before flush
- LocalRegion lr = (LocalRegion) r;
- long estimate = lr.sizeEstimate();
- double err = Math.abs(estimate - size) / (double) size;
- // on a busy system flush might start before we call estimateSize, so rather than equality,
- // test for error margin. fixes bug 49381
- assertTrue("size:"+size+" estimate:"+estimate, err < 0.02 * 10); // each bucket can have an error of 0.02
-
- // size after flush
- sleep(r.getFullPath());
- estimate = lr.sizeEstimate();
- err = Math.abs(estimate - size) / (double) size;
- assertTrue("size:"+size+" estimate:"+estimate, err < 0.02 * 10); // each bucket can have an error of 0.02
- }
-
- public void test080PutGet() throws InterruptedException {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- for (int i=0; i<100; i++) {
- assertEquals("value"+i, r.get(i));
- }
-
- //Do a put while there are entries in the map
- r.put(0, "value"+0);
-
- r.destroy(1, "value"+1);
- }
-
- public void test090Delete() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<11; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- int delKey = 9;
- r.destroy(delKey);
- assertNull(r.get(delKey));
- assertFalse(r.containsKey(delKey));
- }
-
- public void test100Invalidate() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- int invKey = 9;
- r.invalidate(invKey);
- assertNull(r.get(invKey));
- assertTrue(r.containsKey(invKey));
- }
-
- public void test110Size() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- assertEquals(100, r.size());
- r.destroy(45);
- assertEquals(99, r.size());
- r.invalidate(55);
- r.invalidate(65);
- assertEquals(99, r.size());
- }
-
- public void test120KeyIterator() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- Set<Integer> keys = r.keySet();
- int c = 0;
- for (int i : keys) {
-// assertEquals(c, i);
- c++;
- }
- assertEquals(100, c);
- assertEquals(100, keys.size());
- int delKey = 88;
- r.destroy(delKey);
- r.invalidate(39);
- keys = r.keySet();
- c = 0;
- for (int i : keys) {
- if (c == delKey) {
- c++;
- }
-// assertEquals(c, i);
- c++;
- }
- assertEquals(99, keys.size());
- }
-
- public void test130EntriesIterator() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- Set<Entry<Integer, String>> entries = r.entrySet();
- int c = 0;
- for (Entry<Integer, String> e : entries) {
-// assertEquals(c, (int) e.getKey());
- assertEquals("value"+e.getKey(), e.getValue());
- c++;
- }
- assertEquals(100, c);
- assertEquals(100, entries.size());
- int delKey = 88;
- r.destroy(delKey);
- int invKey = 39;
- r.invalidate(invKey);
- entries = r.entrySet();
- c = 0;
- for (Entry<Integer, String> e : entries) {
- if (c == delKey) {
- c++;
- } else if (e.getKey() == invKey) {
-// assertEquals(c, (int) e.getKey());
- assertNull(e.getValue());
- } else {
-// assertEquals(c, (int) e.getKey());
- assertEquals("value"+e.getKey(), e.getValue());
- }
- c++;
- }
- assertEquals(99, entries.size());
- }
-
- public void test140ContainsKey() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- assertTrue(r.containsKey(80));
- r.destroy(80);
- assertFalse(r.containsKey(80));
- r.invalidate(64);
- assertTrue(r.containsKey(64));
- }
-
- public void test150ContainsValue() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- assertTrue(r.containsValue("value45"));
- r.destroy(45);
- assertFalse(r.containsValue("value45"));
- r.invalidate(64);
- assertFalse(r.containsValue("value64"));
- }
-
- public void test160DestroyRegion() {
- Region<Integer, String> r = createRegion(getName());
- for (int i=0; i<100; i++) {
- r.put(i, "value"+i);
- }
- clearBackingCHM(r);
- r.destroyRegion();
- try {
- r.get(3);
- fail("expected exception not thrown");
- } catch (RegionDestroyedException expected) {
- }
- }
-
- public void test170PutIfAbsent() {
- Region<Integer, String> r = createRegion(getName());
- r.put(1, "value1");
- clearBackingCHM(r);
- assertEquals("value1", r.putIfAbsent(1, "value2"));
- }
-
- public void test180Replace() {
- Region<Integer, String> r = createRegion(getName());
- assertNull(r.replace(1, "value"));
- r.put(1, "value1");
- clearBackingCHM(r);
- assertEquals("value1", r.replace(1, "value2"));
- }
-
- public void test190ReplaceKVV() {
- Region<Integer, String> r = createRegion(getName());
- assertFalse(r.replace(1, "oldValue", "newValue"));
- r.put(1, "value1");
- clearBackingCHM(r);
- assertTrue(r.replace(1, "value1", "value2"));
- }
-
- public void test200Accuracy() throws IOException {
- double sum=0.0;
- int iter = 10;
- for (int t=0; t<iter; t++) {
- Random r = new Random();
- HashSet<Integer> vals = new HashSet<Integer>();
- HyperLogLog hll = new HyperLogLog(0.03);
- //HyperLogLog hll = new HyperLogLog(0.1);
- double accuracy = 0.0;
- for (int i = 0; i < 2 * 1000000; i++) {
- int val = r.nextInt();
- vals.add(val);
- hll.offer(val);
- }
- long size = vals.size();
- long est = hll.cardinality();
-
- accuracy = 100.0 * (size - est) / est;
- System.out.printf("Accuracy is %f hll size is %d\n", accuracy, hll.getBytes().length);
- sum+=Math.abs(accuracy);
- }
- double avgAccuracy = sum/(iter*1.0);
- System.out.println("Avg accuracy is:"+avgAccuracy);
- assertTrue(avgAccuracy < 6);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
deleted file mode 100644
index de2aae3..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Iterator;
-import java.util.Properties;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.internal.util.concurrent.CustomEntryConcurrentHashMap;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJUnitTest {
- static {
- System.setProperty("gemfire.trackOffHeapRefCounts", "true");
- System.setProperty("gemfire.trackOffHeapFreedRefCounts", "true");
- }
-
- @Override
- protected void clearBackingCHM(Region<Integer, String> r) {
- PartitionedRegion pr = (PartitionedRegion)r;
- for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
- assertTrue(br.getRegionMap() instanceof HDFSRegionMap);
- CustomEntryConcurrentHashMap chm = ((AbstractRegionMap)br.getRegionMap())._getMap();
- Iterator it = chm.keySet().iterator();
- while (it.hasNext()) {
- Object key = it.next();
- OffHeapRegionEntry re = (OffHeapRegionEntry) chm.remove(key);
- assert re != null;
- re.release();
- }
- // wait here to make sure that the queue has been flushed
- }
- sleep(pr.getFullPath());
- }
-
- @Override
- public void tearDown() throws Exception {
-
- OffHeapTestUtil.checkOrphans();
- super.tearDown();
- }
- @Override
- protected Region<Integer, String> createRegion(String regionName) {
- RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
- PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
- rf.setPartitionAttributes(prAttr);
- rf.setOffHeap(true);
-// rf.setHDFSStoreName(hdfsStore.getName());
- Region<Integer, String> r = rf.create(regionName);
-// addListener(r);
-
- ((PartitionedRegion) r).setQueryHDFS(true);
- return r;
- }
- @Override
- protected Properties getDSProps() {
- Properties props = super.getDSProps();
- props.setProperty("off-heap-memory-size", "50m");
- return props;
- }
-
-
-
-}
[09/15] incubator-geode git commit: GEODE-429: Remove test category
HoplogTests
Posted by as...@apache.org.
GEODE-429: Remove test category HoplogTests
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8fb5edd3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8fb5edd3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8fb5edd3
Branch: refs/heads/feature/GEODE-409
Commit: 8fb5edd349ac388fec2d5f665119f26244343703
Parents: f2390a1
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 15:08:18 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700
----------------------------------------------------------------------
.../cache/hdfs/internal/SignalledFlushObserverJUnitTest.java | 7 +++----
.../cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java | 7 +++----
.../gemstone/gemfire/test/junit/categories/HoplogTest.java | 7 -------
3 files changed, 6 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8fb5edd3/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
index e6b7aa8..92328f8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
@@ -5,15 +5,14 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.junit.experimental.categories.Category;
-import junit.framework.TestCase;
-
import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
import com.gemstone.gemfire.test.junit.categories.IntegrationTest
;
-@Category({IntegrationTest.class, HoplogTest.class})
+import junit.framework.TestCase;
+
+@Category({IntegrationTest.class})
public class SignalledFlushObserverJUnitTest extends TestCase {
private AtomicInteger events;
private AtomicInteger delivered;
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8fb5edd3/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
index 6fa1ff1..0acaf8e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
@@ -17,8 +17,6 @@ import java.util.concurrent.ConcurrentSkipListSet;
import org.junit.experimental.categories.Category;
-import junit.framework.TestCase;
-
import com.gemstone.gemfire.cache.Cache;
import com.gemstone.gemfire.cache.CacheFactory;
import com.gemstone.gemfire.cache.Operation;
@@ -41,16 +39,17 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
import com.gemstone.gemfire.internal.cache.PartitionedRegion;
import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
import com.gemstone.gemfire.test.junit.categories.IntegrationTest
;
+import junit.framework.TestCase;
+
/**
* A test class for testing whether the functionalities of sorted Aysync Queue.
*
* @author Hemant Bhanawat
*/
-@Category({IntegrationTest.class, HoplogTest.class})
+@Category({IntegrationTest.class})
public class SortedListForAsyncQueueJUnitTest extends TestCase {
public SortedListForAsyncQueueJUnitTest() {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8fb5edd3/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
----------------------------------------------------------------------
diff --git a/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java b/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
deleted file mode 100644
index 08987a5..0000000
--- a/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
+++ /dev/null
@@ -1,7 +0,0 @@
-package com.gemstone.gemfire.test.junit.categories;
-/**
- * JUnit Test Category that specifies a test with very narrow and well defined
- * scope. Any complex dependencies and interactions are stubbed or mocked.
- */
-public interface HoplogTest {
-}
[04/15] incubator-geode git commit: GEODE-429: Remove hdfsStore gfsh
commands
Posted by as...@apache.org.
GEODE-429: Remove hdfsStore gfsh commands
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7f251978
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7f251978
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7f251978
Branch: refs/heads/feature/GEODE-409
Commit: 7f251978c9730c403534a62fb385e922eecc8e5b
Parents: 7bcc1e4
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:01:01 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700
----------------------------------------------------------------------
.../gemfire/internal/redis/RegionProvider.java | 2 +-
.../gemfire/management/cli/ConverterHint.java | 1 -
.../CreateAlterDestroyRegionCommands.java | 12 +-
.../cli/commands/HDFSStoreCommands.java | 695 ---------------
.../cli/converters/HdfsStoreNameConverter.java | 88 --
.../cli/functions/AlterHDFSStoreFunction.java | 228 -----
.../cli/functions/CreateHDFSStoreFunction.java | 124 ---
.../cli/functions/DestroyHDFSStoreFunction.java | 100 ---
.../cli/functions/ListHDFSStoresFunction.java | 102 ---
.../cli/functions/RegionFunctionArgs.java | 66 +-
.../internal/cli/i18n/CliStrings.java | 112 ---
.../HDFSStoreCommandsController.java | 229 -----
.../controllers/ShellCommandsController.java | 28 +-
.../commands/HDFSStoreCommandsJUnitTest.java | 838 -------------------
.../AlterHDFSStoreFunctionJUnitTest.java | 324 -------
.../CreateHDFSStoreFunctionJUnitTest.java | 307 -------
.../DescribeHDFSStoreFunctionJUnitTest.java | 364 --------
.../DestroyHDFSStoreFunctionJUnitTest.java | 305 -------
.../ListHDFSStoresFunctionJUnitTest.java | 319 -------
19 files changed, 17 insertions(+), 4227 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
index 0240a4c..a01858e 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
@@ -378,7 +378,7 @@ public class RegionProvider implements Closeable {
r = cache.getRegion(key);
if (r != null) return r;
do {
- Result result = cliCmds.createRegion(key, defaultRegionType, null, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
+ Result result = cliCmds.createRegion(key, defaultRegionType, null, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
r = cache.getRegion(key);
if (result.getStatus() == Status.ERROR && r == null) {
String err = "";
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
index f295983..afe8c76 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
@@ -41,5 +41,4 @@ public interface ConverterHint {
public static final String LOG_LEVEL = "converter.hint.log.levels";
public static final String STRING_DISABLER = "converter.hint.disable-string-converter";
- public static final String HDFSSTORE_ALL = "converter.hint.cluster.hdfsstore";
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
index 919d6fe..41cf531 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
@@ -202,14 +202,6 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
help = CliStrings.CREATE_REGION__GATEWAYSENDERID__HELP)
@CliMetaData (valueSeparator = ",")
String[] gatewaySenderIds,
- @CliOption (key = CliStrings.CREATE_REGION__HDFSSTORE_NAME,
- help = CliStrings.CREATE_REGION__HDFSSTORE_NAME__HELP ,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE)
- String hdfsStoreName,
- @CliOption (key = CliStrings.CREATE_REGION__HDFSSTORE_WRITEONLY,
- help = CliStrings.CREATE_REGION__HDFSSTORE_WRITEONLY__HELP,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE)
- Boolean hdfsWriteOnly,
@CliOption (key = CliStrings.CREATE_REGION__KEYCONSTRAINT,
help = CliStrings.CREATE_REGION__KEYCONSTRAINT__HELP)
String keyConstraint,
@@ -319,7 +311,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
prRedundantCopies, prStartupRecoveryDelay,
prTotalMaxMemory, prTotalNumBuckets,
- offHeap, hdfsStoreName , hdfsWriteOnly, regionAttributes);
+ offHeap, regionAttributes);
if (regionAttributes.getPartitionAttributes() == null && regionFunctionArgs.hasPartitionAttributes()) {
@@ -339,7 +331,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
concurrencyChecksEnabled, cloningEnabled, concurrencyLevel,
prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
prRedundantCopies, prStartupRecoveryDelay,
- prTotalMaxMemory, prTotalNumBuckets, null,compressor, offHeap , hdfsStoreName , hdfsWriteOnly);
+ prTotalMaxMemory, prTotalNumBuckets, null,compressor, offHeap);
if (!regionShortcut.name().startsWith("PARTITION") && regionFunctionArgs.hasPartitionAttributes()) {
throw new IllegalArgumentException(
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
deleted file mode 100644
index 6e573f1..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
+++ /dev/null
@@ -1,695 +0,0 @@
-package com.gemstone.gemfire.management.internal.cli.commands;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Set;
-
-import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.execute.Execution;
-import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
-import com.gemstone.gemfire.cache.execute.ResultCollector;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
-import com.gemstone.gemfire.internal.lang.ClassUtils;
-import com.gemstone.gemfire.management.cli.CliMetaData;
-import com.gemstone.gemfire.management.cli.ConverterHint;
-import com.gemstone.gemfire.management.cli.Result;
-import com.gemstone.gemfire.management.cli.Result.Status;
-import com.gemstone.gemfire.management.internal.cli.CliUtil;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction.AlterHDFSStoreAttributes;
-import com.gemstone.gemfire.management.internal.cli.functions.CliFunctionResult;
-import com.gemstone.gemfire.management.internal.cli.functions.CreateHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.cli.result.CommandResultException;
-import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
-import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
-import com.gemstone.gemfire.management.internal.cli.result.ResultDataException;
-import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-import com.gemstone.gemfire.management.internal.cli.util.MemberNotFoundException;
-import com.gemstone.gemfire.management.internal.configuration.SharedConfigurationWriter;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-/**
- * The HdfsStoreCommands class encapsulates all GemFire Hdfs Store commands in Gfsh.
- * </p>
- *
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.management.internal.cli.commands.AbstractCommandsSupport
- */
-
-
-public class HDFSStoreCommands extends AbstractCommandsSupport {
- @CliCommand (value = CliStrings.CREATE_HDFS_STORE, help = CliStrings.CREATE_HDFS_STORE__HELP)
- @CliMetaData (relatedTopic = CliStrings.TOPIC_GEMFIRE_HDFSSTORE, writesToSharedConfiguration = true)
- public Result createHdfsStore(
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__NAME,
- mandatory = true,
- optionContext = ConverterHint.HDFSSTORE_ALL,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__NAME__HELP)
- String hdfsUniqueName,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__NAMENODE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__NAMENODE__HELP)
- String namenode,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__HOMEDIR,
- optionContext = ConverterHint.DIR_PATHSTRING,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__HOMEDIR__HELP)
- String homeDir,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__BATCHSIZE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__BATCHSIZE__HELP)
- Integer batchSize,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL__HELP)
- Integer batchInterval,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__READCACHESIZE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__READCACHESIZE__HELP)
- Float readCacheSize,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS__HELP)
- Integer dispatcherThreads,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAXMEMORY,
- mandatory = false,
- unspecifiedDefaultValue =CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__MAXMEMORY__HELP)
- Integer maxMemory,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT__HELP)
- Boolean bufferPersistent,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE__HELP)
- Boolean syncDiskWrite,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME__HELP)
- String diskStoreName,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT__HELP)
- Boolean minorCompact,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP)
- Integer minorCompactionThreads,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT__HELP)
- Boolean majorCompact,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP)
- Integer majorCompactionInterval,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP)
- Integer majorCompactionThreads,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL__HELP)
- Integer purgeInterval,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE__HELP)
- Integer maxWriteonlyFileSize,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL__HELP)
- Integer fileRolloverInterval,
- @CliOption (key = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE,
- optionContext = ConverterHint.FILE_PATHSTRING,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE__HELP)
- String clientConfigFile,
- @CliOption(key=CliStrings.CREATE_HDFS_STORE__GROUP,
- help=CliStrings.CREATE_HDFS_STORE__GROUP__HELP,
- optionContext=ConverterHint.MEMBERGROUP)
- @CliMetaData (valueSeparator = ",")
- String[] groups ) {
- try {
-
- return getCreatedHdfsStore(groups, hdfsUniqueName, namenode, homeDir, clientConfigFile, fileRolloverInterval,
- maxWriteonlyFileSize, minorCompact, majorCompact, batchSize, batchInterval, diskStoreName, bufferPersistent,
- dispatcherThreads, syncDiskWrite, readCacheSize, majorCompactionInterval, majorCompactionThreads,
- minorCompactionThreads, purgeInterval, maxMemory);
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable th) {
- String formattedErrString = CliStrings.format(CliStrings.CREATE_HDFS_STORE__ERROR_WHILE_CREATING_REASON_0,
- new Object[] { th.getMessage() });
- SystemFailure.checkFailure();
- return ResultBuilder.createGemFireErrorResult(formattedErrString);
- }
- }
-
- public Result getCreatedHdfsStore(String[] groups, String hdfsUniqueName, String namenode, String homeDir,
- String clientConfigFile, Integer fileRolloverInterval, Integer maxWriteonlyFileSize, Boolean minorCompact,
- Boolean majorCompact, Integer batchSize, Integer batchInterval, String diskStoreName, Boolean bufferPersistent,
- Integer dispatcherThreads, Boolean syncDiskWrite, Float readCacheSize, Integer majorCompactionInterval,
- Integer majorCompactionThreads, Integer minorCompactionThreads, Integer purgeInterval, Integer maxMemory) {
-
- XmlEntity xmlEntity = null;
-
- Set<DistributedMember> targetMembers = null;
-
- try {
- targetMembers = getGroupMembers(groups);
- } catch (CommandResultException cre) {
- return cre.getResult();
- }
-
- HDFSStoreConfigHolder configHolder = new HDFSStoreConfigHolder();
- configHolder.setName(hdfsUniqueName);
- if (readCacheSize != null)
- configHolder.setBlockCacheSize(readCacheSize);
-
- if (fileRolloverInterval != null)
- configHolder.setWriteOnlyFileRolloverInterval(fileRolloverInterval);
- if (clientConfigFile != null)
- configHolder.setHDFSClientConfigFile(clientConfigFile);
- if (homeDir != null)
- configHolder.setHomeDir(homeDir);
- if (maxWriteonlyFileSize != null)
- configHolder.setWriteOnlyFileRolloverSize(maxWriteonlyFileSize);
- if (namenode != null)
- configHolder.setNameNodeURL(namenode);
-
- if (minorCompact != null)
- configHolder.setMinorCompaction(minorCompact);
- if (majorCompact != null)
- configHolder.setMajorCompaction(majorCompact);
- if (majorCompactionInterval != null)
- configHolder.setMajorCompactionInterval(majorCompactionInterval);
- if (majorCompactionThreads != null)
- configHolder.setMajorCompactionThreads(majorCompactionThreads);
- if (minorCompactionThreads != null)
- configHolder.setMinorCompactionThreads(minorCompactionThreads);
- if (purgeInterval != null)
- configHolder.setPurgeInterval(purgeInterval);
-
- if (batchSize != null)
- configHolder.setBatchSize(batchSize);
- if (batchInterval != null)
- configHolder.setBatchInterval(batchInterval);
- if (diskStoreName != null)
- configHolder.setDiskStoreName(diskStoreName);
- if (syncDiskWrite != null)
- configHolder.setSynchronousDiskWrite(syncDiskWrite);
- if (dispatcherThreads != null)
- configHolder.setDispatcherThreads(dispatcherThreads);
- if (maxMemory != null)
- configHolder.setMaxMemory(maxMemory);
- if (bufferPersistent != null)
- configHolder.setBufferPersistent(bufferPersistent);
-
- ResultCollector<?, ?> resultCollector = getMembersFunctionExecutor(targetMembers)
- .withArgs(configHolder).execute(new CreateHDFSStoreFunction());
-
- List<CliFunctionResult> hdfsStoreCreateResults = CliFunctionResult.cleanResults((List<?>)resultCollector
- .getResult());
-
- TabularResultData tabularResultData = ResultBuilder.createTabularResultData();
-
- Boolean accumulatedData = false;
-
- for (CliFunctionResult hdfsStoreCreateResult : hdfsStoreCreateResults) {
- if (hdfsStoreCreateResult.getThrowable() != null) {
- String memberId = hdfsStoreCreateResult.getMemberIdOrName();
- String errorMsg = hdfsStoreCreateResult.getThrowable().getMessage();
- String errClass = hdfsStoreCreateResult.getThrowable().getClass().getName();
- tabularResultData.accumulate("Member", memberId);
- tabularResultData.accumulate("Result", "ERROR: " + errClass + ": " + errorMsg);
- accumulatedData = true;
- tabularResultData.setStatus(Status.ERROR);
- }
- else if (hdfsStoreCreateResult.isSuccessful()) {
- String memberId = hdfsStoreCreateResult.getMemberIdOrName();
- String successMsg = hdfsStoreCreateResult.getMessage();
- tabularResultData.accumulate("Member", memberId);
- tabularResultData.accumulate("Result", successMsg);
- if (xmlEntity == null) {
- xmlEntity = hdfsStoreCreateResult.getXmlEntity();
- }
- accumulatedData = true;
- }
- }
-
- if (!accumulatedData) {
- return ResultBuilder.createInfoResult("Unable to create hdfs store:" + hdfsUniqueName);
- }
-
- Result result = ResultBuilder.buildResult(tabularResultData);
- if (xmlEntity != null) {
- result.setCommandPersisted((new SharedConfigurationWriter()).addXmlEntity(xmlEntity, groups));
- }
-
- return ResultBuilder.buildResult(tabularResultData);
- }
-
-
- @CliCommand(value = CliStrings.DESCRIBE_HDFS_STORE, help = CliStrings.DESCRIBE_HDFS_STORE__HELP)
- @CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEMFIRE_HDFSSTORE})
- public Result describeHdfsStore(
- @CliOption(key = CliStrings.DESCRIBE_HDFS_STORE__MEMBER,
- mandatory = true, optionContext = ConverterHint.MEMBERIDNAME,
- help = CliStrings.DESCRIBE_HDFS_STORE__MEMBER__HELP)
- final String memberName,
- @CliOption(key = CliStrings.DESCRIBE_HDFS_STORE__NAME,
- mandatory = true,
- optionContext = ConverterHint.HDFSSTORE_ALL,
- help = CliStrings.DESCRIBE_HDFS_STORE__NAME__HELP)
- final String hdfsStoreName) {
- try{
- return toCompositeResult(getHDFSStoreDescription(memberName , hdfsStoreName));
-
- }catch (HDFSStoreNotFoundException e){
- return ResultBuilder.createShellClientErrorResult(((HDFSStoreNotFoundException)e).getMessage());
- }
- catch (FunctionInvocationTargetException ignore) {
- return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
- CliStrings.DESCRIBE_HDFS_STORE));
-
- } catch (MemberNotFoundException e) {
- return ResultBuilder.createShellClientErrorResult(e.getMessage());
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable t) {
- SystemFailure.checkFailure();
- return ResultBuilder.createGemFireErrorResult(String.format(CliStrings.DESCRIBE_HDFS_STORE__ERROR_MESSAGE,
- memberName, hdfsStoreName, t));
- }
- }
-
- public HDFSStoreConfigHolder getHDFSStoreDescription(String memberName, String hdfsStoreName) {
-
- final DistributedMember member = getMember(getCache(), memberName);
-
- ResultCollector<?, ?> resultCollector = getMembersFunctionExecutor(Collections.singleton(member))
- .withArgs(hdfsStoreName).execute(new DescribeHDFSStoreFunction());
-
- Object result = ((List<?>)resultCollector.getResult()).get(0);
-
- if (result instanceof HDFSStoreConfigHolder) {
- return (HDFSStoreConfigHolder)result;
- }
- if (result instanceof HDFSStoreNotFoundException) {
- throw (HDFSStoreNotFoundException)result;
- }
- else {
- final Throwable cause = (result instanceof Throwable ? (Throwable)result : null);
- throw new RuntimeException(CliStrings.format(CliStrings.UNEXPECTED_RETURN_TYPE_EXECUTING_COMMAND_ERROR_MESSAGE,
- ClassUtils.getClassName(result), CliStrings.DESCRIBE_HDFS_STORE), cause);
-
- }
- }
-
- public Result toCompositeResult(final HDFSStoreConfigHolder storePrms) {
- final CompositeResultData hdfsStoreCompositeResult = ResultBuilder.createCompositeResultData();
- final CompositeResultData.SectionResultData hdfsStoreSection = hdfsStoreCompositeResult.addSection();
-
- hdfsStoreSection.addData("Hdfs Store Name", storePrms.getName());
- hdfsStoreSection.addData("Name Node URL", storePrms.getNameNodeURL());
- hdfsStoreSection.addData("Home Dir", storePrms.getHomeDir());
- hdfsStoreSection.addData("Block Cache", storePrms.getBlockCacheSize());
- hdfsStoreSection.addData("File RollOver Interval", storePrms.getWriteOnlyFileRolloverInterval());
- hdfsStoreSection.addData("Max WriteOnly File Size", storePrms.getWriteOnlyFileRolloverSize());
-
- hdfsStoreSection.addData("Client Configuration File", storePrms.getHDFSClientConfigFile());
-
- hdfsStoreSection.addData("Disk Store Name", storePrms.getDiskStoreName());
- hdfsStoreSection.addData("Batch Size In MB", storePrms.getBatchSize());
- hdfsStoreSection.addData("Batch Interval Time", storePrms.getBatchInterval());
- hdfsStoreSection.addData("Maximum Memory", storePrms.getMaxMemory());
- hdfsStoreSection.addData("Dispatcher Threads", storePrms.getDispatcherThreads());
- hdfsStoreSection.addData("Buffer Persistence", storePrms.getBufferPersistent());
- hdfsStoreSection.addData("Synchronous Persistence", storePrms.getSynchronousDiskWrite());
-
- hdfsStoreSection.addData("Major Compaction Enabled", storePrms.getMajorCompaction());
- hdfsStoreSection.addData("Major Compaction Threads", storePrms.getMajorCompactionThreads());
- hdfsStoreSection.addData("Major compaction Interval", storePrms.getMajorCompactionInterval());
- hdfsStoreSection.addData("Minor Compaction Enabled", storePrms.getMinorCompaction());
- hdfsStoreSection.addData("Minor Compaction Threads", storePrms.getMinorCompactionThreads());
- hdfsStoreSection.addData("Purge Interval", storePrms.getPurgeInterval());
-
- return ResultBuilder.buildResult(hdfsStoreCompositeResult);
- }
-
- @CliCommand(value = CliStrings.LIST_HDFS_STORE, help = CliStrings.LIST_HDFS_STORE__HELP)
- @CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEMFIRE_HDFSSTORE })
- public Result listHdfsStore() {
- try {
- Set<DistributedMember> dataMembers = getNormalMembers(getCache());
- if (dataMembers.isEmpty()) {
- return ResultBuilder.createInfoResult(CliStrings.NO_CACHING_MEMBERS_FOUND_MESSAGE);
- }
- return toTabularResult(getHdfsStoreListing(dataMembers));
-
- } catch (FunctionInvocationTargetException ignore) {
- return ResultBuilder.createGemFireErrorResult(
- CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
- CliStrings.LIST_HDFS_STORE));
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable t) {
- SystemFailure.checkFailure();
- return ResultBuilder.createGemFireErrorResult(
- String.format(CliStrings.LIST_HDFS_STORE__ERROR_MESSAGE, t.getMessage()));
- }
- }
-
- protected List<HdfsStoreDetails> getHdfsStoreListing(Set<DistributedMember> members) {
-
- final Execution membersFunctionExecutor = getMembersFunctionExecutor(members);
-
- if (membersFunctionExecutor instanceof AbstractExecution) {
- ((AbstractExecution)membersFunctionExecutor).setIgnoreDepartedMembers(true);
- }
-
- final ResultCollector<?, ?> resultCollector = membersFunctionExecutor.execute(new ListHDFSStoresFunction());
- final List<?> results = (List<?>)resultCollector.getResult();
- final List<HdfsStoreDetails> hdfsStoreList = new ArrayList<HdfsStoreDetails>(results.size());
-
- for (final Object result : results) {
- if (result instanceof Set) { // ignore FunctionInvocationTargetExceptions and other Exceptions...
- hdfsStoreList.addAll((Set<HdfsStoreDetails>)result);
- }
- }
-
- Collections.sort(hdfsStoreList, new Comparator<HdfsStoreDetails>() {
- public <T extends Comparable<T>> int compare(final T obj1, final T obj2) {
- return (obj1 == null && obj2 == null ? 0 : (obj1 == null ? 1 : (obj2 == null ? -1 : obj1.compareTo(obj2))));
- }
-
- @Override
- public int compare(HdfsStoreDetails store1, HdfsStoreDetails store2) {
- int comparisonValue = compare(store1.getMemberName(), store2.getMemberName());
- comparisonValue = (comparisonValue != 0 ? comparisonValue : compare(store1.getMemberId(), store2.getMemberId()));
- return (comparisonValue != 0 ? comparisonValue : store1.getStoreName().compareTo(store2.getStoreName()));
- }
- });
-
- return hdfsStoreList;
- }
-
-
- protected Result toTabularResult(final List<HdfsStoreDetails> hdfsStoreList) throws ResultDataException {
- if (!hdfsStoreList.isEmpty()) {
- final TabularResultData hdfsStoreData = ResultBuilder.createTabularResultData();
- for (final HdfsStoreDetails hdfsStoreDetails : hdfsStoreList) {
- hdfsStoreData.accumulate("Member Name", hdfsStoreDetails.getMemberName());
- hdfsStoreData.accumulate("Member Id", hdfsStoreDetails.getMemberId());
- hdfsStoreData.accumulate("Hdfs Store Name", hdfsStoreDetails.getStoreName());
- }
- return ResultBuilder.buildResult(hdfsStoreData);
- }
- else {
- return ResultBuilder.createInfoResult(CliStrings.LIST_HDFS_STORE__HDFS_STORES_NOT_FOUND_MESSAGE);
- }
- }
-
-
- @CliCommand(value=CliStrings.DESTROY_HDFS_STORE, help=CliStrings.DESTROY_HDFS_STORE__HELP)
- @CliMetaData(shellOnly=false, relatedTopic={CliStrings.TOPIC_GEMFIRE_HDFSSTORE}, writesToSharedConfiguration=true)
- public Result destroyHdfstore(
- @CliOption (key=CliStrings.DESTROY_HDFS_STORE__NAME,
- optionContext=ConverterHint.HDFSSTORE_ALL,
- mandatory=true,
- help=CliStrings.DESTROY_HDFS_STORE__NAME__HELP)
- String hdfsStoreName,
- @CliOption(key=CliStrings.DESTROY_HDFS_STORE__GROUP,
- help=CliStrings.DESTROY_HDFS_STORE__GROUP__HELP,
- optionContext=ConverterHint.MEMBERGROUP)
- @CliMetaData (valueSeparator = ",")
- String[] groups) {
- try{
- return destroyStore(hdfsStoreName,groups);
-
- } catch (FunctionInvocationTargetException ignore) {
- return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
- CliStrings.DESTROY_HDFS_STORE));
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable th) {
- SystemFailure.checkFailure();
- return ResultBuilder.createGemFireErrorResult(CliStrings.format(
- CliStrings.DESTROY_HDFS_STORE__ERROR_WHILE_DESTROYING_REASON_0, new Object[] { th.getMessage() }));
- }
- }
-
- protected Result destroyStore(String hdfsStoreName , String[] groups){
- TabularResultData tabularData = ResultBuilder.createTabularResultData();
- boolean accumulatedData = false;
-
- Set<DistributedMember> targetMembers = null;
- try {
- targetMembers = getGroupMembers(groups);
- } catch (CommandResultException cre) {
- return cre.getResult();
- }
-
- ResultCollector<?, ?> rc = getMembersFunctionExecutor(targetMembers)
- .withArgs(hdfsStoreName).execute(new DestroyHDFSStoreFunction());
-
- List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>)rc.getResult());
-
- XmlEntity xmlEntity = null;
- for (CliFunctionResult result : results) {
-
- if (result.getThrowable() != null) {
- tabularData.accumulate("Member", result.getMemberIdOrName());
- tabularData.accumulate("Result", "ERROR: " + result.getThrowable().getClass().getName() + ": "
- + result.getThrowable().getMessage());
- accumulatedData = true;
- tabularData.setStatus(Status.ERROR);
- }
- else if (result.getMessage() != null) {
- tabularData.accumulate("Member", result.getMemberIdOrName());
- tabularData.accumulate("Result", result.getMessage());
- accumulatedData = true;
-
- if (xmlEntity == null) {
- xmlEntity = result.getXmlEntity();
- }
- }
- }
-
- if (!accumulatedData) {
- return ResultBuilder.createInfoResult("No matching hdfs stores found.");
- }
-
- Result result = ResultBuilder.buildResult(tabularData);
- if (xmlEntity != null) {
- result.setCommandPersisted((new SharedConfigurationWriter()).deleteXmlEntity(xmlEntity, groups));
- }
-
- return result;
- }
- @CliCommand(value=CliStrings.ALTER_HDFS_STORE, help=CliStrings.ALTER_HDFS_STORE__HELP)
- @CliMetaData(shellOnly=false, relatedTopic={CliStrings.TOPIC_GEMFIRE_HDFSSTORE}, writesToSharedConfiguration=true)
- public Result alterHdfstore(
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__NAME,
- mandatory = true,
- optionContext = ConverterHint.HDFSSTORE_ALL,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__NAME__HELP)
- String hdfsUniqueName,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__BATCHSIZE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__BATCHSIZE__HELP)
- Integer batchSize,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL__HELP)
- Integer batchInterval,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT__HELP)
- Boolean minorCompact,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP)
- Integer minorCompactionThreads,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT__HELP)
- Boolean majorCompact,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP)
- Integer majorCompactionInterval,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP)
- Integer majorCompactionThreads,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL__HELP)
- Integer purgeInterval,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL__HELP)
- Integer fileRolloverInterval,
- @CliOption (key = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE,
- mandatory = false,
- unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
- help = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE__HELP)
- Integer maxWriteonlyFileSize,
- @CliOption(key=CliStrings.ALTER_HDFS_STORE__GROUP,
- help=CliStrings.ALTER_HDFS_STORE__GROUP__HELP,
- optionContext=ConverterHint.MEMBERGROUP)
- @CliMetaData (valueSeparator = ",")
- String[] groups){
- try {
-
- return getAlteredHDFSStore(groups, hdfsUniqueName, batchSize, batchInterval, minorCompact,
- minorCompactionThreads, majorCompact, majorCompactionInterval, majorCompactionThreads, purgeInterval,
- fileRolloverInterval, maxWriteonlyFileSize);
-
- } catch (FunctionInvocationTargetException ignore) {
- return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
- CliStrings.ALTER_HDFS_STORE));
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable th) {
- SystemFailure.checkFailure();
- return ResultBuilder.createGemFireErrorResult(CliStrings.format(
- CliStrings.ALTER_HDFS_STORE__ERROR_WHILE_ALTERING_REASON_0, new Object[] { th.getMessage() }));
- }
- }
-
-
- protected Result getAlteredHDFSStore(String[] groups, String hdfsUniqueName, Integer batchSize,
- Integer batchInterval, Boolean minorCompact, Integer minorCompactionThreads, Boolean majorCompact,
- Integer majorCompactionInterval, Integer majorCompactionThreads, Integer purgeInterval,
- Integer fileRolloverInterval, Integer maxWriteonlyFileSize) {
-
- Set<DistributedMember> targetMembers = null;
- try {
- targetMembers = getGroupMembers(groups);
- } catch (CommandResultException cre) {
- return cre.getResult();
- }
-
- TabularResultData tabularData = ResultBuilder.createTabularResultData();
-
- AlterHDFSStoreAttributes alterAttributes = new AlterHDFSStoreAttributes(
- hdfsUniqueName, batchSize, batchInterval, minorCompact,
- majorCompact, minorCompactionThreads, majorCompactionInterval,
- majorCompactionThreads, purgeInterval, fileRolloverInterval,
- maxWriteonlyFileSize);
-
- ResultCollector<?, ?> rc = getMembersFunctionExecutor(targetMembers)
- .withArgs(alterAttributes).execute(new AlterHDFSStoreFunction());
-
- List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>)rc.getResult());
-
- XmlEntity xmlEntity = null;
-
- for (CliFunctionResult result : results) {
- if (result.getThrowable() != null) {
- tabularData.accumulate("Member", result.getMemberIdOrName());
- tabularData.accumulate("Result", "ERROR: " + result.getThrowable().getClass().getName() + ": "
- + result.getThrowable().getMessage());
- tabularData.setStatus(Status.ERROR);
- }
- else if (result.getMessage() != null) {
- tabularData.accumulate("Member", result.getMemberIdOrName());
- tabularData.accumulate("Result", result.getMessage());
-
- if (xmlEntity == null) {
- xmlEntity = result.getXmlEntity();
- }
- }
- }
-
- Result result = ResultBuilder.buildResult(tabularData);
-
- if (xmlEntity != null) {
- result.setCommandPersisted((new SharedConfigurationWriter()).deleteXmlEntity(xmlEntity, groups));
- }
-
- return result;
- }
- @CliAvailabilityIndicator({CliStrings.CREATE_HDFS_STORE, CliStrings.LIST_HDFS_STORE,
- CliStrings.DESCRIBE_HDFS_STORE, CliStrings.ALTER_HDFS_STORE, CliStrings.DESTROY_HDFS_STORE})
- public boolean hdfsStoreCommandsAvailable() {
- // these hdfs store commands are always available in GemFire
- return (!CliUtil.isGfshVM() || (getGfsh() != null && getGfsh().isConnectedAndReady()));
- }
-
- @Override
- protected Set<DistributedMember> getMembers(final Cache cache) {
- return CliUtil.getAllMembers(cache);
- }
-
- protected Set<DistributedMember> getNormalMembers(final Cache cache) {
- return CliUtil.getAllNormalMembers(cache);
- }
-
- protected Set<DistributedMember> getGroupMembers(String[] groups) throws CommandResultException {
- return CliUtil.findAllMatchingMembers(groups, null);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
deleted file mode 100644
index e595c77..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * =========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * more patents listed at http://www.pivotal.io/patents.
- * ========================================================================
- */
-package com.gemstone.gemfire.management.internal.cli.converters;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import com.gemstone.gemfire.management.cli.ConverterHint;
-import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
-
-import org.springframework.shell.core.Completion;
-import org.springframework.shell.core.Converter;
-import org.springframework.shell.core.MethodTarget;
-
-/**
- *
- * @author Namrata Thanvi
- *
- */
-
-public class HdfsStoreNameConverter implements Converter<String> {
-
- @Override
- public boolean supports(Class<?> type, String optionContext) {
- return String.class.equals(type) && ConverterHint.HDFSSTORE_ALL.equals(optionContext);
- }
-
- @Override
- public String convertFromText(String value, Class<?> targetType, String optionContext) {
- return value;
- }
-
- @Override
- public boolean getAllPossibleValues(List<Completion> completions, Class<?> targetType, String existingData,
- String optionContext, MethodTarget target) {
- if (String.class.equals(targetType) && ConverterHint.HDFSSTORE_ALL.equals(optionContext)) {
- Set<String> hdfsStoreNames = getHdfsStoreNames();
-
- for (String hdfsStoreName : hdfsStoreNames) {
- if (existingData != null) {
- if (hdfsStoreName.startsWith(existingData)) {
- completions.add(new Completion(hdfsStoreName));
- }
- }
- else {
- completions.add(new Completion(hdfsStoreName));
- }
- }
- }
-
- return !completions.isEmpty();
- }
-
- private Set<String> getHdfsStoreNames() {
- SortedSet<String> hdfsStoreNames = new TreeSet<String>();
- Gfsh gfsh = Gfsh.getCurrentInstance();
-
- if (gfsh != null && gfsh.isConnectedAndReady()) {
- Map<String, String[]> hdfsStoreInfo = gfsh.getOperationInvoker().getDistributedSystemMXBean()
- .listMemberHDFSStore();
- if (hdfsStoreInfo != null) {
- Set<Entry<String, String[]>> entries = hdfsStoreInfo.entrySet();
-
- for (Entry<String, String[]> entry : entries) {
- String[] value = entry.getValue();
- if (value != null) {
- hdfsStoreNames.addAll(Arrays.asList(value));
- }
- }
-
- }
- }
-
- return hdfsStoreNames;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
deleted file mode 100644
index b5b5341..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import java.io.Serializable;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreMutatorImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-/**
- * Function used by the 'alter hdfs-store' gfsh command to alter a hdfs store on
- * each member.
- *
- * @author Namrata Thanvi
- */
-
-public class AlterHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
- private static final Logger logger = LogService.getLogger();
-
- private static final String ID = AlterHDFSStoreFunction.class.getName();
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public void execute(FunctionContext context) {
- String memberId = "";
-
- try {
- final AlterHDFSStoreAttributes alterAttributes = (AlterHDFSStoreAttributes)context.getArguments();
- GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
- DistributedMember member = getDistributedMember(cache);
-
- memberId = member.getId();
- // If they set a name use it instead
- if (!member.getName().equals("")) {
- memberId = member.getName();
- }
- HDFSStore hdfsStore = cache.findHDFSStore(alterAttributes.getHdfsUniqueName());
- CliFunctionResult result;
- if (hdfsStore != null) {
- // TODO - Need to verify what all attributes needs to be persisted in
- // cache.xml
- XmlEntity xmlEntity = getXMLEntity(hdfsStore.getName());
- alterHdfsStore(hdfsStore, alterAttributes);
- result = new CliFunctionResult(memberId, xmlEntity, "Success");
- }
- else {
- result = new CliFunctionResult(memberId, false, "Hdfs store not found on this member");
- }
- context.getResultSender().lastResult(result);
-
- } catch (CacheClosedException cce) {
- CliFunctionResult result = new CliFunctionResult(memberId, false, null);
- context.getResultSender().lastResult(result);
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable th) {
- SystemFailure.checkFailure();
- logger.error("Could not alter hdfs store: {}", th.getMessage(), th);
-
- CliFunctionResult result = new CliFunctionResult(memberId, th, null);
- context.getResultSender().lastResult(result);
- }
-
- }
-
- @Override
- public String getId() {
- return ID;
- }
-
- /**
- * Alter HDFSStore with given configuration.
- *
- * @param hdfsStore
- * @param alterAttributes
- * @return HDFSStore
- */
-
- protected HDFSStore alterHdfsStore(HDFSStore hdfsStore, AlterHDFSStoreAttributes alterAttributes) {
- HDFSStoreMutator storeMutator = new HDFSStoreMutatorImpl(hdfsStore);
-
- if (alterAttributes.getFileRolloverInterval() != null)
- storeMutator.setWriteOnlyFileRolloverInterval(alterAttributes
- .getFileRolloverInterval());
-
- if (alterAttributes.getMaxWriteonlyFileSize() != null)
- storeMutator.setWriteOnlyFileRolloverSize(alterAttributes.getMaxWriteonlyFileSize());
-
- if (alterAttributes.getMinorCompact() != null)
- storeMutator.setMinorCompaction(alterAttributes.getMinorCompact());
-
- if (alterAttributes.getMajorCompact() != null)
- storeMutator.setMajorCompaction(alterAttributes.getMajorCompact());
-
- if (alterAttributes.getMajorCompactionInterval() != null)
- storeMutator.setMajorCompactionInterval(alterAttributes.getMajorCompactionInterval());
-
- if (alterAttributes.getMajorCompactionThreads() != null)
- storeMutator.setMajorCompactionThreads(alterAttributes.getMajorCompactionThreads());
-
- if (alterAttributes.getMajorCompactionThreads() != null)
- storeMutator.setMinorCompactionThreads(alterAttributes.getMajorCompactionThreads());
-
- if (alterAttributes.getPurgeInterval() != null)
- storeMutator.setPurgeInterval(alterAttributes.getPurgeInterval());
-
- if (alterAttributes.getBatchSize() != null)
- storeMutator.setBatchSize(alterAttributes.getBatchSize());
-
- if (alterAttributes.getBatchInterval() != null)
- storeMutator.setBatchInterval(alterAttributes.getBatchInterval());
-
- hdfsStore.alter(storeMutator);
- return hdfsStore;
- }
-
-
- public static class AlterHDFSStoreAttributes implements Serializable {
- private static final long serialVersionUID = 1L;
- String hdfsUniqueName;
- Integer batchSize , batchInterval;
- Boolean minorCompact, majorCompact;
- Integer minorCompactionThreads, majorCompactionInterval, majorCompactionThreads, purgeInterval;
- Integer fileRolloverInterval, maxWriteonlyFileSize;
-
- public AlterHDFSStoreAttributes(String hdfsUniqueName, Integer batchSize,
- Integer batchInterval, Boolean minorCompact, Boolean majorCompact,
- Integer minorCompactionThreads, Integer majorCompactionInterval,
- Integer majorCompactionThreads, Integer purgeInterval,
- Integer fileRolloverInterval, Integer maxWriteonlyFileSize) {
- this.hdfsUniqueName = hdfsUniqueName;
- this.batchSize = batchSize;
- this.batchInterval = batchInterval;
- this.minorCompact = minorCompact;
- this.majorCompact = majorCompact;
- this.minorCompactionThreads = minorCompactionThreads;
- this.majorCompactionInterval = majorCompactionInterval;
- this.majorCompactionThreads = majorCompactionThreads;
- this.purgeInterval = purgeInterval;
- this.fileRolloverInterval = fileRolloverInterval;
- this.maxWriteonlyFileSize = maxWriteonlyFileSize;
- }
-
- public String getHdfsUniqueName() {
- return hdfsUniqueName;
- }
-
- public Integer getBatchSize() {
- return batchSize;
- }
-
- public Integer getBatchInterval() {
- return batchInterval;
- }
-
- public Boolean getMinorCompact() {
- return minorCompact;
- }
-
- public Boolean getMajorCompact() {
- return majorCompact;
- }
-
- public Integer getMinorCompactionThreads() {
- return minorCompactionThreads;
- }
-
- public Integer getMajorCompactionInterval() {
- return majorCompactionInterval;
- }
-
- public Integer getMajorCompactionThreads() {
- return majorCompactionThreads;
- }
-
- public Integer getPurgeInterval() {
- return purgeInterval;
- }
-
- public Integer getFileRolloverInterval() {
- return fileRolloverInterval;
- }
-
- public Integer getMaxWriteonlyFileSize() {
- return maxWriteonlyFileSize;
- }
-
-
- }
-
-
- protected Cache getCache() {
- return CacheFactory.getAnyInstance();
- }
-
- protected DistributedMember getDistributedMember(Cache cache){
- return ((InternalCache)cache).getMyId();
- }
-
- protected XmlEntity getXMLEntity(String storeName){
- return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
deleted file mode 100644
index b4e5033..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import org.apache.logging.log4j.Logger;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-
-import com.gemstone.gemfire.management.internal.cli.CliUtil;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-
-/**
- * Function used by the 'create hdfs-store' gfsh command to create a hdfs store
- * on each member.
- *
- * @author Namrata Thanvi
- */
-
-public class CreateHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
-
- private static final long serialVersionUID = 1L;
-
- private static final Logger logger = LogService.getLogger();
-
- public static final CreateHDFSStoreFunction INSTANCE = new CreateHDFSStoreFunction();
-
- private static final String ID = CreateHDFSStoreFunction.class.getName();
-
- @Override
- public void execute(FunctionContext context) {
- String memberId = "";
- try {
- Cache cache = getCache();
- DistributedMember member = getDistributedMember(cache);
-
- memberId = member.getId();
- if (!member.getName().equals("")) {
- memberId = member.getName();
- }
- HDFSStoreConfigHolder configHolder = (HDFSStoreConfigHolder)context.getArguments();
-
- HDFSStore hdfsStore = createHdfsStore(cache, configHolder);
- // TODO - Need to verify what all attributes needs to be persisted in
- // cache.xml
- XmlEntity xmlEntity = getXMLEntity(hdfsStore.getName());
- context.getResultSender().lastResult(new CliFunctionResult(memberId, xmlEntity, "Success"));
-
- } catch (CacheClosedException cce) {
- context.getResultSender().lastResult(new CliFunctionResult(memberId, false, null));
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable th) {
- SystemFailure.checkFailure();
- logger.error("Could not create hdfs store: {}", CliUtil.stackTraceAsString(th), th);
- context.getResultSender().lastResult(new CliFunctionResult(memberId, th, th.getMessage()));
- }
- }
-
- @Override
- public String getId() {
- return ID;
- }
-
- /**
- * Creates the HDFSStore with given configuration.
- *
- * @param cache
- * @param configHolder
- * @return HDFSStore
- */
-
- protected HDFSStore createHdfsStore(Cache cache, HDFSStoreConfigHolder configHolder) {
- HDFSStoreFactory hdfsStoreFactory = cache.createHDFSStoreFactory();
- hdfsStoreFactory.setName(configHolder.getName());
- hdfsStoreFactory.setNameNodeURL(configHolder.getNameNodeURL());
- hdfsStoreFactory.setBlockCacheSize(configHolder.getBlockCacheSize());
- hdfsStoreFactory.setWriteOnlyFileRolloverInterval(configHolder.getWriteOnlyFileRolloverInterval());
- hdfsStoreFactory.setHomeDir(configHolder.getHomeDir());
- hdfsStoreFactory.setHDFSClientConfigFile(configHolder.getHDFSClientConfigFile());
- hdfsStoreFactory.setWriteOnlyFileRolloverSize(configHolder.getWriteOnlyFileRolloverSize());
- hdfsStoreFactory.setMajorCompaction(configHolder.getMajorCompaction());
- hdfsStoreFactory.setMajorCompactionInterval(configHolder.getMajorCompactionInterval());
- hdfsStoreFactory.setMajorCompactionThreads(configHolder.getMajorCompactionThreads());
- hdfsStoreFactory.setMinorCompaction(configHolder.getMinorCompaction());
- hdfsStoreFactory.setMaxMemory(configHolder.getMaxMemory());
- hdfsStoreFactory.setBatchSize(configHolder.getBatchSize());
- hdfsStoreFactory.setBatchInterval(configHolder.getBatchInterval());
- hdfsStoreFactory.setDiskStoreName(configHolder.getDiskStoreName());
- hdfsStoreFactory.setDispatcherThreads(configHolder.getDispatcherThreads());
- hdfsStoreFactory.setMinorCompactionThreads(configHolder.getMinorCompactionThreads());
- hdfsStoreFactory.setPurgeInterval(configHolder.getPurgeInterval());
- hdfsStoreFactory.setSynchronousDiskWrite(configHolder.getSynchronousDiskWrite());
- hdfsStoreFactory.setBufferPersistent(configHolder.getBufferPersistent());
-
- return hdfsStoreFactory.create(configHolder.getName());
- }
-
- protected Cache getCache() {
- return CacheFactory.getAnyInstance();
- }
-
- protected DistributedMember getDistributedMember(Cache cache){
- return ((InternalCache)cache).getMyId();
- }
-
- protected XmlEntity getXMLEntity(String storeName){
- return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
- }
-}
-
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
deleted file mode 100644
index 83f6740..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-/**
- * Function used by the 'destroy hdfs-store' gfsh command to destroy a hdfs
- * store on each member.
- *
- * @author Namrata Thanvi
- */
-
-public class DestroyHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
- private static final Logger logger = LogService.getLogger();
-
- private static final String ID = DestroyHDFSStoreFunction.class.getName();
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public void execute(FunctionContext context) {
- String memberId = "";
- try {
- final String hdfsStoreName = (String)context.getArguments();
- GemFireCacheImpl cache = (GemFireCacheImpl)getCache();
- DistributedMember member = getDistributedMember(cache);
- CliFunctionResult result;
-
- memberId = member.getId();
- if (!member.getName().equals("")) {
- memberId = member.getName();
- }
-
- HDFSStoreImpl hdfsStore = cache.findHDFSStore(hdfsStoreName);
-
- if (hdfsStore != null) {
- hdfsStore.destroy();
- // TODO - Need to verify what all attributes needs to be persisted in cache.xml and how
- XmlEntity xmlEntity = getXMLEntity(hdfsStoreName);
- result = new CliFunctionResult(memberId, xmlEntity, "Success");
- }
- else {
- result = new CliFunctionResult(memberId, false, "Hdfs store not found on this member");
- }
- context.getResultSender().lastResult(result);
-
- } catch (CacheClosedException cce) {
- CliFunctionResult result = new CliFunctionResult(memberId, false, null);
- context.getResultSender().lastResult(result);
-
- } catch (VirtualMachineError e) {
- SystemFailure.initiateFailure(e);
- throw e;
-
- } catch (Throwable th) {
- SystemFailure.checkFailure();
- logger.error("Could not destroy hdfs store: {}", th.getMessage(), th);
- CliFunctionResult result = new CliFunctionResult(memberId, th, null);
- context.getResultSender().lastResult(result);
- }
- }
-
- @Override
- public String getId() {
- return ID;
- }
-
- protected Cache getCache() {
- return CacheFactory.getAnyInstance();
- }
-
- protected DistributedMember getDistributedMember(Cache cache){
- return ((InternalCache)cache).getMyId();
- }
-
- protected XmlEntity getXMLEntity(String storeName){
- return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
deleted file mode 100644
index fb947ae..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * more patents listed at http://www.pivotal.io/patents.
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import java.io.Serializable;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.logging.LogService;
-
-/**
- * Function used by the 'list hdfs-stores' gfsh command to determine all the
- * Hdfs stores that exist for the entire cache, distributed across the GemFire distributed system.
- * on each member.
- *
- * @author Namrata Thanvi
- */
-
-public class ListHDFSStoresFunction extends FunctionAdapter implements InternalEntity {
-
- private static final long serialVersionUID = 1L;
-
- private static final String ID = ListHDFSStoresFunction.class.getName();
-
- private static final Logger logger = LogService.getLogger();
-
- protected Cache getCache() {
- return CacheFactory.getAnyInstance();
- }
-
- protected DistributedMember getDistributedMemberId(Cache cache){
- return ((InternalCache)cache).getMyId();
- }
-
- public void execute(final FunctionContext context) {
- Set<HdfsStoreDetails> hdfsStores = new HashSet<HdfsStoreDetails>();
- try {
- final Cache cache = getCache();
- if (cache instanceof GemFireCacheImpl) {
- final GemFireCacheImpl gemfireCache = (GemFireCacheImpl)cache;
- final DistributedMember member = getDistributedMemberId(cache);
- for (final HDFSStore store : gemfireCache.getHDFSStores()) {
- hdfsStores.add(new HdfsStoreDetails (store.getName() , member.getId() , member.getName()));
- }
- }
- context.getResultSender().lastResult(hdfsStores);
- } catch (Exception e) {
- context.getResultSender().sendException(e);
- }
- }
-
- @Override
- public String getId() {
- return ID;
- }
-
-
- public static class HdfsStoreDetails implements Serializable {
- private static final long serialVersionUID = 1L;
- private String storeName;
- private String memberId, memberName;
-
- public HdfsStoreDetails(String storeName, String memberId, String memberName) {
- super();
- this.storeName = storeName;
- this.memberId = memberId;
- this.memberName = memberName;
- }
-
- public String getStoreName() {
- return storeName;
- }
-
- public String getMemberId() {
- return memberId;
- }
-
- public String getMemberName() {
- return memberName;
- }
-
-}
-}
-
-
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
index bd5e196..f7b2b4d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
@@ -70,48 +70,9 @@ public class RegionFunctionArgs implements Serializable {
private final boolean isSetCompressor;
private Boolean offHeap;
private final boolean isSetOffHeap;
- private String hdfsStoreName;
- private Boolean isSetHdfsWriteOnly = false;
- private Boolean hdfsWriteOnly;
-
private RegionAttributes<?, ?> regionAttributes;
public RegionFunctionArgs(String regionPath,
- RegionShortcut regionShortcut, String useAttributesFrom,
- boolean skipIfExists, String keyConstraint, String valueConstraint,
- Boolean statisticsEnabled,
- RegionFunctionArgs.ExpirationAttrs entryExpirationIdleTime,
- RegionFunctionArgs.ExpirationAttrs entryExpirationTTL,
- RegionFunctionArgs.ExpirationAttrs regionExpirationIdleTime,
- RegionFunctionArgs.ExpirationAttrs regionExpirationTTL, String diskStore,
- Boolean diskSynchronous, Boolean enableAsyncConflation,
- Boolean enableSubscriptionConflation, String[] cacheListeners,
- String cacheLoader, String cacheWriter, String[] asyncEventQueueIds,
- String[] gatewaySenderIds, Boolean concurrencyChecksEnabled,
- Boolean cloningEnabled, Integer concurrencyLevel, String prColocatedWith,
- Integer prLocalMaxMemory, Long prRecoveryDelay,
- Integer prRedundantCopies, Long prStartupRecoveryDelay,
- Long prTotalMaxMemory, Integer prTotalNumBuckets, Integer evictionMax,
- String compressor, Boolean offHeap , String hdfsStoreName , Boolean hdfsWriteOnly) {
- this(regionPath, regionShortcut, useAttributesFrom, skipIfExists,
- keyConstraint, valueConstraint, statisticsEnabled,
- entryExpirationIdleTime, entryExpirationTTL,
- regionExpirationIdleTime, regionExpirationTTL, diskStore,
- diskSynchronous, enableAsyncConflation,
- enableSubscriptionConflation, cacheListeners, cacheLoader,
- cacheWriter, asyncEventQueueIds, gatewaySenderIds,
- concurrencyChecksEnabled, cloningEnabled, concurrencyLevel,
- prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
- prRedundantCopies, prStartupRecoveryDelay, prTotalMaxMemory,
- prTotalNumBuckets, evictionMax, compressor, offHeap);
- this.isSetHdfsWriteOnly = hdfsWriteOnly != null;
- if (isSetHdfsWriteOnly) {
- this.hdfsWriteOnly = hdfsWriteOnly;
- }
- if (hdfsStoreName != null )
- this.hdfsStoreName = hdfsStoreName;
- }
- public RegionFunctionArgs(String regionPath,
RegionShortcut regionShortcut, String useAttributesFrom,
boolean skipIfExists, String keyConstraint, String valueConstraint,
Boolean statisticsEnabled,
@@ -219,8 +180,7 @@ public class RegionFunctionArgs implements Serializable {
Integer prLocalMaxMemory, Long prRecoveryDelay,
Integer prRedundantCopies, Long prStartupRecoveryDelay,
Long prTotalMaxMemory, Integer prTotalNumBuckets,
- Boolean offHeap, String hdfsStoreName , Boolean hdfsWriteOnly ,
- RegionAttributes<?, ?> regionAttributes) {
+ Boolean offHeap, RegionAttributes<?, ?> regionAttributes) {
this(regionPath, null, useAttributesFrom, skipIfExists, keyConstraint,
valueConstraint, statisticsEnabled, entryExpirationIdleTime,
entryExpirationTTL, regionExpirationIdleTime, regionExpirationTTL,
@@ -230,7 +190,7 @@ public class RegionFunctionArgs implements Serializable {
concurrencyChecksEnabled, cloningEnabled, concurrencyLevel,
prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
prRedundantCopies, prStartupRecoveryDelay,
- prTotalMaxMemory, prTotalNumBuckets, null, null, offHeap , hdfsStoreName , hdfsWriteOnly);
+ prTotalMaxMemory, prTotalNumBuckets, null, null, offHeap);
this.regionAttributes = regionAttributes;
}
@@ -277,28 +237,6 @@ public class RegionFunctionArgs implements Serializable {
}
/**
- * @return the hdfsStoreName
- */
- public String getHDFSStoreName() {
- return this.hdfsStoreName;
- }
-
- /**
- * @return the hdfsWriteOnly
- */
- public Boolean getHDFSWriteOnly() {
- return this.hdfsWriteOnly;
- }
-
- /**
- * @return the isSetHDFSWriteOnly
- */
- public Boolean isSetHDFSWriteOnly() {
- return this.isSetHdfsWriteOnly;
- }
-
-
- /**
* @return the valueConstraint
*/
public String getValueConstraint() {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
index a4561bf..5ae8e82 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
@@ -11,10 +11,7 @@ package com.gemstone.gemfire.management.internal.cli.i18n;
import java.text.MessageFormat;
import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
import com.gemstone.gemfire.cache.server.CacheServer;
-import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
-import com.gemstone.gemfire.cache.wan.GatewayEventSubstitutionFilter;
import com.gemstone.gemfire.distributed.internal.DistributionConfig;
import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
@@ -104,8 +101,6 @@ public class CliStrings {
public static final String TOPIC_SHARED_CONFIGURATION = "Cluster Configuration";
public static final String TOPIC_SHARED_CONFIGURATION_HELP = "Configuration for cluster and various groups. It consists of cache.xml, gemfire properties and deployed jars.\nChanges due to gfshs command are persisted to the locator hosting the cluster configuration service.";
public static final String TOPIC_CHANGELOGLEVEL = "User can change the log-level for a memeber run time and generate log contents as per the need";
- public static final String TOPIC_GEMFIRE_HDFSSTORE = "Hdfs Store";
- public static final String TOPIC_GEMFIRE_HDFSSTORE__DESC = "Hdfs stores are used to persist data to hadoop distributed file system as a backup to your in-memory copy or as overflow storage when eviction criteria is specified.";
/*-*************************************************************************
* ********* String Constants other than command name, options & help ******
@@ -680,114 +675,7 @@ public class CliStrings {
public static final String CREATE_REGION__OFF_HEAP = "off-heap";
public static final String CREATE_REGION__OFF_HEAP__HELP = "Causes the values of the region to be stored in off-heap memory. The default is on heap.";
- public static final String CREATE_REGION__HDFSSTORE_NAME = "hdfs-store";
- public static final String CREATE_REGION__HDFSSTORE_NAME__HELP = "HDFS Store to be used by this region. \"list hdfs-store\" can be used to display existing HDFSStores.";
- public static final String CREATE_REGION__HDFSSTORE_WRITEONLY = "hdfs-write-only";
- public static final String CREATE_REGION__HDFSSTORE_WRITEONLY__HELP = "HDFS write-only mode will be used. All data will be persisted in the HDFS store, and user can access the stored data only through the MapReduce API";
- /* hdfsstore commands */
- public static final String CREATE_HDFS_STORE ="create hdfs-store";
- public static final String CREATE_HDFS_STORE__HELP = "Create a hdfsstore and persist region data on the specified hadoop cluster.";
- public static final String CREATE_HDFS_STORE__NAME = "name";
- public static final String CREATE_HDFS_STORE__NAME__HELP = "Name of the store.";
- public static final String CREATE_HDFS_STORE__NAMENODE = "namenode";
- public static final String CREATE_HDFS_STORE__NAMENODE__HELP = "The URL of the Hadoop NameNode for your HD cluster.HDFSStore persists data on a HDFS cluster identified by cluster's NameNode URL or NameNode Service URL.NameNode URL can also be provided via hdfs-site.xml";
- public static final String CREATE_HDFS_STORE__HOMEDIR = "home-dir";
- public static final String CREATE_HDFS_STORE__HOMEDIR__HELP ="The HDFS directory path in which HDFSStore stores files. The value must not contain the NameNode URL";
- public static final String CREATE_HDFS_STORE__READCACHESIZE= "read-cache-size";
- public static final String CREATE_HDFS_STORE__READCACHESIZE__HELP ="The maximum amount of memory in megabytes used by HDFSStore read cache.";
- public static final String CREATE_HDFS_STORE__BATCHSIZE = "batch-size";
- public static final String CREATE_HDFS_STORE__BATCHSIZE__HELP ="HDFSStore buffer data is persisted on HDFS in batches, and the BatchSize defines the maximum size (in megabytes) of each batch that is written to HDFS.";
- public static final String CREATE_HDFS_STORE__BATCHINTERVAL = "batch-interval";
- public static final String CREATE_HDFS_STORE__BATCHINTERVAL__HELP ="It defines the maximum time that can elapse between writing batches to HDFS. ";
- public static final String CREATE_HDFS_STORE__MAXMEMORY = "max-memory";
- public static final String CREATE_HDFS_STORE__MAXMEMORY__HELP ="The maximum amount of memory in megabytes used by HDFSStore";
- public static final String CREATE_HDFS_STORE__DISPATCHERTHREADS = "dispatcher-threads";
- public static final String CREATE_HDFS_STORE__DISPATCHERTHREADS__HELP ="The maximum number of threads (per region) used to write batches of HDFS.";
- public static final String CREATE_HDFS_STORE__BUFFERPERSISTENT = "buffer-persistent";
- public static final String CREATE_HDFS_STORE__BUFFERPERSISTENT__HELP ="Configure if HDFSStore in-memory buffer data, that has not been persisted on HDFS yet, should be persisted to a local disk to buffer prevent data loss";
- public static final String CREATE_HDFS_STORE__SYNCDISKWRITE = "synchronous-disk-write";
- public static final String CREATE_HDFS_STORE__SYNCDISKWRITE__HELP ="Enable or disable synchronous writes to the local DiskStore.";
- public static final String CREATE_HDFS_STORE__DISKSTORENAME = "disk-store-name";
- public static final String CREATE_HDFS_STORE__DISKSTORENAME__HELP ="The named DiskStore to use for any local disk persistence needs of HDFSStore.";
- public static final String CREATE_HDFS_STORE__MINORCOMPACT= "minor-compact";
- public static final String CREATE_HDFS_STORE__MINORCOMPACT__HELP ="Minor compaction reorganizes data in files to optimize read performance and reduce number of files created on HDFS.";
-
- public static final String CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS = "minor-compaction-threads";
- public static final String CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform minor compaction in this HDFS store.";
- public static final String CREATE_HDFS_STORE__MAJORCOMPACT= "major-compact";
- public static final String CREATE_HDFS_STORE__MAJORCOMPACT__HELP ="Major compaction removes old values of a key and deleted records from the HDFS files.";
- public static final String CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL= "major-compaction-interval";
- public static final String CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP ="Interval Between two major compactions.";
- public static final String CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS = "major-compaction-threads";
- public static final String CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform major compaction in this HDFS store.";
- public static final String CREATE_HDFS_STORE__PURGEINTERVAL = "purge-interval";
- public static final String CREATE_HDFS_STORE__PURGEINTERVAL__HELP ="PurgeInterval defines the amount of time old files remain available for MapReduce jobs. After this interval has passed, old files are deleted.";
- public static final String CREATE_HDFS_STORE__WRITEONLYFILESIZE = "max-write-only-file-size";
- public static final String CREATE_HDFS_STORE__WRITEONLYFILESIZE__HELP ="For HDFS write-only regions, this defines the maximum size (in megabytes) that an HDFS log file can reach before HDFSStore closes the file and begins writing to a new file.";
- public static final String CREATE_HDFS_STORE__FILEROLLOVERINTERVAL = "write-only-file-rollover-interval";
- public static final String CREATE_HDFS_STORE__FILEROLLOVERINTERVAL__HELP ="For HDFS write-only regions, this defines the maximum time that can elapse before HDFSStore closes an HDFS file and begins writing to a new file.";
- public static final String CREATE_HDFS_STORE__CLIENTCONFIGFILE = "client-config-files";
- public static final String CREATE_HDFS_STORE__CLIENTCONFIGFILE__HELP ="The full path to the HDFS client configuration file that the store uses.The full path to the HDFS client configuration files, for e.g. hdfs-site.xml and core-site.xml. These files must be accessible to any node where an instance of this HDFSStore will be created";
- public static final String CREATE_HDFS_STORE__ERROR_WHILE_CREATING_REASON_0 = "An error occurred while creating the hdfs store: \"{0}\"";
- public static final String CREATE_HDFS_STORE__GROUP = "group";
- public static final String CREATE_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be created. If no group is specified the hdfs store will be created on all members.";
-
- /*HDFS describe command*/
- public static final String DESCRIBE_HDFS_STORE = "describe hdfs-store";
- public static final String DESCRIBE_HDFS_STORE__HELP = "Display information about a hdfs store.";
- public static final String DESCRIBE_HDFS_STORE__NAME = "name";
- public static final String DESCRIBE_HDFS_STORE__NAME__HELP = "name of the hdfs store";
- public static final String DESCRIBE_HDFS_STORE__MEMBER = "member";
- public static final String DESCRIBE_HDFS_STORE__MEMBER__HELP = "Name/Id of the member with the hdfs store to be described.";
- public static final String DESCRIBE_HDFS_STORE__ERROR_MESSAGE = "An error occurred while getting information about the hdfs store: \"{0}\"";
-
- /*HDFS list command*/
- public static final String LIST_HDFS_STORE = "list hdfs-stores";
- public static final String LIST_HDFS_STORE__HELP = "Display hdfs stores for all members.";
- public static final String LIST_HDFS_STORE__NAME__HELP = "name of the hdfs store";
- public static final String LIST_HDFS_STORE__ERROR_MESSAGE = "An error occurred while collecting Hdfs Store information for all members across the GemFire cluster: %1$s";
- public static final String LIST_HDFS_STORE__HDFS_STORES_NOT_FOUND_MESSAGE = "No Hdfs Stores Found";
-
-
- /* 'destroy hdfs-store' command */
- public static final String DESTROY_HDFS_STORE = "destroy hdfs-store";
- public static final String DESTROY_HDFS_STORE__HELP = "Destroy a hdfs store";
- public static final String DESTROY_HDFS_STORE__NAME = "name";
- public static final String DESTROY_HDFS_STORE__NAME__HELP = "Name of the hdfs store that will be destroyed.";
- public static final String DESTROY_HDFS_STORE__GROUP = "group";
- public static final String DESTROY_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be destroyed. If no group is specified the hdfs store will be destroyed on all members.";
- public static final String DESTROY_HDFS_STORE__ERROR_WHILE_DESTROYING_REASON_0 = "An error occurred while destroying the hdfs store: \"{0}\"";
-
- /* 'alter hdfs-store' command */
- public static final String ALTER_HDFS_STORE = "alter hdfs-store";
- public static final String ALTER_HDFS_STORE__HELP = "Alter a hdfs store";
- public static final String ALTER_HDFS_STORE__NAME = "name";
- public static final String ALTER_HDFS_STORE__NAME__HELP = "Name of the hdfs store that will be Altered.";
- public static final String ALTER_HDFS_STORE__GROUP = "group";
- public static final String ALTER_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be altered. If no group is specified the hdfs store will be altered on all members.";
- public static final String ALTER_HDFS_STORE__ERROR_WHILE_ALTERING_REASON_0 = "An error occurred while altering the hdfs store: \"{0}\"";
- public static final String ALTER_HDFS_STORE__BATCHSIZE = "batch-size";
- public static final String ALTER_HDFS_STORE__BATCHSIZE__HELP ="HDFSStore buffer data is persisted on HDFS in batches, and the BatchSize defines the maximum size (in megabytes) of each batch that is written to HDFS.";
- public static final String ALTER_HDFS_STORE__BATCHINTERVAL = "batch-interval";
- public static final String ALTER_HDFS_STORE__BATCHINTERVAL__HELP ="It defines the maximum time that can elapse between writing batches to HDFS. ";
- public static final String ALTER_HDFS_STORE__MINORCOMPACT= "minor-compact";
- public static final String ALTER_HDFS_STORE__MINORCOMPACT__HELP ="Minor compaction reorganizes data in files to optimize read performance and reduce number of files created on HDFS.";
- public static final String ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS = "minor-compaction-threads";
- public static final String ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform minor compaction in this HDFS store.";
- public static final String ALTER_HDFS_STORE__MAJORCOMPACT= "major-compact";
- public static final String ALTER_HDFS_STORE__MAJORCOMPACT__HELP ="Major compaction removes old values of a key and deleted records from the HDFS files.";
- public static final String ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL= "major-compaction-interval";
- public static final String ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP ="Interval Between two major compactions.";
- public static final String ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS = "major-compaction-threads";
- public static final String ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform major compaction in this HDFS store.";
- public static final String ALTER_HDFS_STORE__PURGEINTERVAL = "purge-interval";
- public static final String ALTER_HDFS_STORE__PURGEINTERVAL__HELP ="PurgeInterval defines the amount of time old files remain available for MapReduce jobs. After this interval has passed, old files are deleted.";
- public static final String ALTER_HDFS_STORE__FILEROLLOVERINTERVAL = "write-only-file-rollover-interval";
- public static final String ALTER_HDFS_STORE__FILEROLLOVERINTERVAL__HELP = "For HDFS write-only regions, this defines the maximum time that can elapse before HDFSStore closes an HDFS file and begins writing to a new file.";
- public static final String ALTER_HDFS_STORE__WRITEONLYFILESIZE = "max-write-only-file-size";
- public static final String ALTER_HDFS_STORE__WRITEONLYFILESIZE__HELP ="For HDFS write-only regions, this defines the maximum size (in megabytes) that an HDFS log file can reach before HDFSStore closes the file and begins writing to a new file.";
-
/* debug command */
public static final String DEBUG = "debug";
public static final String DEBUG__HELP = "Enable/Disable debugging output in GFSH.";
[08/15] incubator-geode git commit: GEODE-429: Remove
Cache.createHdfsStoreFactory method
Posted by as...@apache.org.
GEODE-429: Remove Cache.createHdfsStoreFactory method
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f2390a1a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f2390a1a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f2390a1a
Branch: refs/heads/feature/GEODE-409
Commit: f2390a1ada2acbcabac28dd4226a67f7baf924ae
Parents: 74c3156
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 15:05:36 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700
----------------------------------------------------------------------
.../gemstone/gemfire/cache/GemFireCache.java | 8 -
.../internal/cache/GemFireCacheImpl.java | 6 -
.../internal/cache/xmlcache/CacheCreation.java | 5 -
.../HDFSRegionMBeanAttributeJUnitTest.java | 169 -------------------
4 files changed, 188 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
index d81d25d..b948c5d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
@@ -267,12 +267,4 @@ public interface GemFireCache extends RegionService {
* @param name the name of the HDFSStore to find.
*/
public HDFSStore findHDFSStore(String name);
-
- /**
- * Creates a {@link HDFSStoreFactory} for creating a {@link HDFSStore}
- *
- * @return the HDFS store factory
- */
- public HDFSStoreFactory createHDFSStoreFactory();
-
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index 0d4961b..78ea6be 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -5309,12 +5309,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
}
}
- @Override
- public HDFSStoreFactory createHDFSStoreFactory() {
- // TODO Auto-generated method stub
- return new HDFSStoreFactoryImpl(this);
- }
-
public HDFSStoreFactory createHDFSStoreFactory(HDFSStoreCreation creation) {
return new HDFSStoreFactoryImpl(this, creation);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
index 0347d67..e4bea7f 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
@@ -1378,11 +1378,6 @@ public class CacheCreation implements InternalCache, Extensible<Cache> {
}
@Override
- public HDFSStoreFactory createHDFSStoreFactory() {
- // TODO Auto-generated method stub
- return new HDFSStoreFactoryImpl(this);
- }
- @Override
public HDFSStore findHDFSStore(String storeName) {
return (HDFSStore)this.hdfsStores.get(storeName);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
deleted file mode 100644
index 14b61e6..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.bean.stats;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.versions.DiskVersionTag;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.management.ManagementService;
-import com.gemstone.gemfire.management.RegionMXBean;
-import com.gemstone.gemfire.management.internal.ManagementConstants;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Test for verifying HDFS related MBean attributes
- * @author rishim
- *
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
-
- public static final String HDFS_STORE_NAME = "HDFSMBeanJUnitTestStore";
- public static final String REGION_NAME = "HDFSMBeanJUnitTest_Region";
- protected Path testDataDir;
- protected Cache cache;
-
- protected HDFSStoreFactory hsf;
- protected HDFSStoreImpl hdfsStore;
- protected Region<Object, Object> region;
- SortedOplogStatistics stats;
- HFileStoreStatistics storeStats;
- BlockCache blockCache;
-
- @Override
- protected void setUp() throws Exception {
- super.setUp();
-
- System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
- testDataDir = new Path("test-case");
-
- cache = createCache();
-
- configureHdfsStoreFactory();
- hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
-
- RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-// regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
-
- // regionfactory.setCompressionCodec("Some");
- PartitionAttributesFactory fac = new PartitionAttributesFactory();
- fac.setTotalNumBuckets(10);
-
- regionfactory.setPartitionAttributes(fac.create());
- region = regionfactory.create(REGION_NAME);
-
- }
-
- protected void configureHdfsStoreFactory() throws Exception {
- hsf = this.cache.createHDFSStoreFactory();
- hsf.setHomeDir(testDataDir.toString());
- }
-
- protected Cache createCache() {
- CacheFactory cf = new CacheFactory().set("mcast-port", "0").set("log-level", "info");
- cache = cf.create();
- return cache;
- }
-
- @Override
- protected void tearDown() throws Exception {
- hdfsStore.getFileSystem().delete(testDataDir, true);
- cache.close();
- super.tearDown();
- }
-
- public void testStoreUsageStats() throws Exception {
-
- PartitionedRegion parRegion = (PartitionedRegion)region;
-
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- String key = ("key-" + (i * 100 + i));
- String value = ("value-" + System.nanoTime());
- parRegion.put(key, value);
-
- items.add(new TestEvent(key, value));
- }
-
- //Dont want to create
- Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
- BucketRegion flushingBucket= localPrimaryBucketRegions.iterator().next();
- HoplogOrganizer hoplogOrganizer = getOrganizer(parRegion,flushingBucket.getId());
- hoplogOrganizer.flush(items.iterator(), 100);
-
- GemFireCacheImpl cache = GemFireCacheImpl.getExisting();
- ManagementService service = ManagementService.getExistingManagementService(cache);
- RegionMXBean bean = service.getLocalRegionMBean(region.getFullPath());
-
-
- //assertTrue(bean.getEntryCount() == ManagementConstants.ZERO);
- assertTrue(bean.getEntrySize() == ManagementConstants.NOT_AVAILABLE_LONG);
- assertTrue(0 < bean.getDiskUsage());
-
- }
-
-
- private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
- BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
- if (br == null) {
- // got rebalanced or something
- throw new BucketMovedException("Bucket region is no longer available. BucketId: " +
- bucketId + " RegionPath: " + region.getFullPath());
- }
-
- return br.getHoplogOrganizer();
- }
-
-
- public static class TestEvent extends SortedHDFSQueuePersistedEvent implements Serializable {
- private static final long serialVersionUID = 1L;
-
- Object key;
-
- public TestEvent(String k, String v) throws Exception {
- this(k, v, Operation.PUT_IF_ABSENT);
- }
-
- public TestEvent(String k, String v, Operation op) throws Exception {
- super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
- this.key = k;
- }
- }
-
-
-}
[13/15] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit
and Dunits
Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
deleted file mode 100644
index e6a1229..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
+++ /dev/null
@@ -1,1045 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.Matcher;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer.HoplogComparator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.TieredCompactionJUnitTest.TestHoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-import dunit.DistributedTestCase;
-import dunit.DistributedTestCase.ExpectedException;
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HdfsSortedOplogOrganizerJUnitTest extends BaseHoplogTestCase {
- /**
- * Tests flush operation
- */
- public void testFlush() throws Exception {
- int count = 10;
- int bucketId = (int) System.nanoTime();
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < count; i++) {
- items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
- }
- organizer.flush(items.iterator(), count);
-
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- // only one hoplog should exists
- assertEquals(1, hoplogs.length);
-
- assertEquals(count, organizer.sizeEstimate());
- assertEquals(0, stats.getActiveReaderCount());
- }
-
- /**
- * Tests reads from a set of hoplogs containing both valid and stale KVs
- */
- public void testReopen() throws Exception {
- int bucketId = (int) System.nanoTime();
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 100; i++) {
- items.add(new TestEvent("" + i, ("1-1")));
- }
- organizer.flush(items.iterator(), items.size());
-
- Hoplog hoplog = organizer.getSortedOplogs().iterator().next().get();
- byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
- hoplog.close();
-
- for (int i = 0; i < 10; i++) {
- Path path = new Path(testDataDir, getName() + "/" + bucketId + "/" + hoplog.getFileName());
- HFileSortedOplog oplog = new HFileSortedOplog(hdfsStore, path, blockCache, stats, storeStats);
- oplog.getReader().read(keyBytes1);
- oplog.close(false);
- }
- }
-
- /**
- * Tests reads from a set of hoplogs containing both valid and stale KVs
- */
- public void testRead() throws Exception {
- doRead(regionManager);
- }
-
-// public void testNewReaderWithNameNodeHA() throws Exception {
-// deleteMiniClusterDir();
-// int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
-// int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
-//
-// MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
-// initClientHAConf(nn1port, nn2port);
-//
-// HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
-// regionfactory.setHDFSStoreName(store1.getName());
-// Region<Object, Object> region1 = regionfactory.create("region-1");
-// HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
-//
-// HoplogOrganizer<SortedHoplogPersistedEvent> organizer = doRead(regionManager1);
-// organizer.close();
-//
-// dunit.DistributedTestCase.ExpectedException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
-// NameNode nnode2 = cluster.getNameNode(1);
-// assertTrue(nnode2.isStandbyState());
-// cluster.shutdownNameNode(0);
-// cluster.transitionToActive(1);
-// assertFalse(nnode2.isStandbyState());
-//
-// organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
-// byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-// byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
-// byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
-// assertEquals("2-1", organizer.read(keyBytes1).getValue());
-// assertEquals("3-3", organizer.read(keyBytes3).getValue());
-// assertEquals("1-4", organizer.read(keyBytes4).getValue());
-// ex.remove();
-//
-// region1.destroyRegion();
-// store1.destroy();
-// cluster.shutdown();
-// FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
-// }
-
-// public void testActiveReaderWithNameNodeHA() throws Exception {
-// deleteMiniClusterDir();
-// int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
-// int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
-//
-// MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
-// initClientHAConf(nn1port, nn2port);
-//
-// HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
-// regionfactory.setHDFSStoreName(store1.getName());
-// Region<Object, Object> region1 = regionfactory.create("region-1");
-// HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
-//
-// HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
-// ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-// for (int i = 100000; i < 101000; i++) {
-// items.add(new TestEvent(("" + i), (i + " some string " + i)));
-// }
-// organizer.flush(items.iterator(), items.size());
-// organizer.getSortedOplogs().get(0).get().getReader();
-//
-// dunit.DistributedTestCase.ExpectedException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
-// NameNode nnode2 = cluster.getNameNode(1);
-// assertTrue(nnode2.isStandbyState());
-// cluster.shutdownNameNode(0);
-// cluster.transitionToActive(1);
-// assertFalse(nnode2.isStandbyState());
-//
-// for (int i = 100000; i < 100500; i++) {
-// byte[] keyBytes1 = BlobHelper.serializeToBlob("" + i);
-// assertEquals(i + " some string " + i, organizer.read(keyBytes1).getValue());
-// }
-// ex.remove();
-// region1.destroyRegion();
-// store1.destroy();
-// cluster.shutdown();
-// FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
-// }
-
-// public void testFlushWithNameNodeHA() throws Exception {
-// deleteMiniClusterDir();
-// int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
-// int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
-//
-// MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
-//
-// initClientHAConf(nn1port, nn2port);
-// HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
-//
-// regionfactory.setHDFSStoreName(store1.getName());
-// Region<Object, Object> region1 = regionfactory.create("region-1");
-// HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
-//
-// HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
-// ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-// items.add(new TestEvent(("1"), ("1-1")));
-// organizer.flush(items.iterator(), items.size());
-//
-// dunit.DistributedTestCase.ExpectedException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
-// NameNode nnode2 = cluster.getNameNode(1);
-// assertTrue(nnode2.isStandbyState());
-// cluster.shutdownNameNode(0);
-// cluster.transitionToActive(1);
-// assertFalse(nnode2.isStandbyState());
-//
-// items.add(new TestEvent(("4"), ("1-4")));
-// organizer.flush(items.iterator(), items.size());
-// byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-// byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
-// assertEquals("1-1", organizer.read(keyBytes1).getValue());
-// assertEquals("1-4", organizer.read(keyBytes4).getValue());
-// ex.remove();
-//
-// region1.destroyRegion();
-// store1.destroy();
-// cluster.shutdown();
-// FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
-// }
-
- public HoplogOrganizer<SortedHoplogPersistedEvent> doRead(HdfsRegionManager rm) throws Exception {
- HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer(rm, 0);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- items.add(new TestEvent(("4"), ("1-4")));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("1"), ("2-1")));
- items.add(new TestEvent(("3"), ("2-3")));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("3"), ("3-3")));
- items.add(new TestEvent(("5"), ("3-5")));
- organizer.flush(items.iterator(), items.size());
-
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(rm.getStore().getFileSystem(),
- rm.getRegionFolder() + "/" + 0,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- // expect 3 files are 3 flushes
- assertEquals(3, hoplogs.length);
- byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
- byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
- byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
- // expect key 1 from hoplog 2
- assertEquals("2-1", organizer.read(keyBytes1).getValue());
- // expect key 3 from hoplog 3
- assertEquals("3-3", organizer.read(keyBytes3).getValue());
- // expect key 4 from hoplog 1
- assertEquals("1-4", organizer.read(keyBytes4).getValue());
- return organizer;
- }
-
- /**
- * Tests bucket organizer initialization during startup. Existing hoplogs should identified and
- * returned
- */
- public void testHoplogIdentification() throws Exception {
- // create one empty file and one directories in bucket directory
- Path bucketPath = new Path(testDataDir, getName() + "/0");
- FileSystem fs = hdfsStore.getFileSystem();
- fs.createNewFile(new Path(bucketPath, "temp_file"));
- fs.mkdirs(new Path(bucketPath, "temp_dir"));
-
- // create 2 hoplogs files each of type flush, minor and major hoplog
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- String[] extensions = { HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION};
- for (String string : extensions) {
- Hoplog oplog = organizer.getTmpSortedOplog(null, string);
- createHoplog(0, oplog);
- organizer.makeLegitimate(oplog);
- }
-
- // create a temp hoplog
- Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- createHoplog(0, oplog);
-
- // bucket directory should have 6 hoplogs, 1 temp log, 1 misc file and 1 directory
- FileStatus[] results = fs.listStatus(bucketPath);
- assertEquals(9, results.length);
-
- // only two are hoplogs
- List<Hoplog> list = organizer.identifyAndLoadSortedOplogs(true);
- assertEquals(6, list.size());
- }
-
- public void testExpiryMarkerIdentification() throws Exception {
- // epxired hoplogs from the list below should be deleted
- String[] files = {
- "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
- "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
- "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
- "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
-
- Path bucketPath = new Path(testDataDir, getName() + "/0");
- FileSystem fs = hdfsStore.getFileSystem();
- for (String file : files) {
- Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
- }
-
- String marker1 = "0-4-1234"
- + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
- fs.createNewFile(new Path(bucketPath, marker1));
- String marker2 = "0-5-1235"
- + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
- fs.createNewFile(new Path(bucketPath, marker2));
-
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
- assertEquals(7, hoplogs.length);
-
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
- regionManager, 0);
-
- FileStatus[] markers = organizer.getExpiryMarkers();
- // one hoplog and one exp marker will be deletion targets
- assertEquals(2, markers.length);
- for (FileStatus marker : markers) {
- String name = marker.getPath().getName();
- assertTrue(name.equals(marker1) || name.equals(marker2));
- }
- organizer.close();
- }
-
- public void testExpiredHoplogCleanup() throws Exception {
- // epxired hoplogs from the list below should be deleted
- String[] files = {
- "0-1-0000" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-
- "0-2-0000" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
- "0-2-2222" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-
- "0-3-0000" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
- "0-3-3333" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
- "0-3-3333" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-
- "0-4-4444" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION };
-
- Path bucketPath = new Path(testDataDir, getName() + "/0");
- FileSystem fs = hdfsStore.getFileSystem();
- for (String file : files) {
- if (file.endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
- fs.createNewFile(new Path(bucketPath, file));
- continue;
- }
- Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
- }
-
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
- assertEquals(9, hoplogs.length);
-
- long target = System.currentTimeMillis();
- TimeUnit.SECONDS.sleep(1);
-
- // all but minor compacted files from below this will not be deleted as it
- // is after target delete time
- files = new String[] {
- "0-4-4444" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-
- "0-5-5555" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
- "0-5-5555" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-
- "0-6-6666" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
- };
- for (String file : files) {
- if (file.endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
- fs.createNewFile(new Path(bucketPath, file));
- continue;
- }
- Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
- }
-
- hoplogs = getBucketHoplogs(getName() + "/0", "");
- assertEquals(13, hoplogs.length);
- int hopSize = 0;
- for (FileStatus file : hoplogs) {
- if(file.getLen() > hopSize) {
- hopSize = (int) file.getLen();
- }
- }
-
- final AtomicInteger behavior = new AtomicInteger(0);
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
- @Override
- protected FileStatus[] getExpiryMarkers() throws IOException {
- if (behavior.get() == 1) {
- ArrayList<FileStatus> markers = new ArrayList<FileStatus>();
- for (FileStatus marker : super.getExpiryMarkers()) {
- markers.add(marker);
- }
- // inject a dummy old expiry marker for major compacted file
- long age = 2 * HDFSStore.DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS * 60 * 1000;
- String markerName = "0-2-2222" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
- FileStatus marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
- markers.add(marker);
- return markers.toArray(new FileStatus[markers.size()]);
- }
- return super.getExpiryMarkers();
- }
- };
-
- List<FileStatus> list = organizer.getOptimizationTargets(target);
- assertEquals(6, list.size());
-
- behavior.set(1);
- list = organizer.getOptimizationTargets(target);
- assertEquals(8, list.size());
-
- assertEquals(9 * hopSize, stats.getStoreUsageBytes());
- int count = organizer.deleteExpiredFiles(list);
- assertEquals(8, count);
- assertEquals(5 * hopSize, stats.getStoreUsageBytes());
-
- List<FileStatus> tmp = new ArrayList<FileStatus>(Arrays.asList(hoplogs));
- for (Iterator<FileStatus> iter = tmp.iterator(); iter.hasNext();) {
- hoplogs = getBucketHoplogs(getName() + "/0", "");
- FileStatus file = iter.next();
- for (FileStatus hoplog : hoplogs) {
- if(hoplog.getPath().getName().startsWith("0-5-5555")) {
- fail("this file should have been deleted" + hoplog.getPath().getName());
- }
-
- if (hoplog.getPath().getName().equals(file.getPath().getName())) {
- iter.remove();
- break;
- }
- }
- }
-
- assertEquals(7, tmp.size());
- organizer.close();
- }
-
- public void testAlterPurgeInterval() throws Exception {
- // epxired hoplogs from the list below should be deleted
- String[] files = {
- "0-1-0000" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-2-2222" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-4-4444" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION };
-
- Path bucketPath = new Path(testDataDir, getName() + "/0");
- hdfsStore.getFileSystem();
- for (String file : files) {
- Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
- }
-
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
- int hopSize = 0;
- for (FileStatus file : hoplogs) {
- if(file.getLen() > hopSize) {
- hopSize = (int) file.getLen();
- }
- }
-
- final AtomicInteger behavior = new AtomicInteger(0);
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
- @Override
- protected FileStatus[] getExpiryMarkers() throws IOException {
- if (behavior.get() == 1) {
- ArrayList<FileStatus> markers = new ArrayList<FileStatus>();
- // inject dummy old expiry markers
- long age = 120 * 1000; // 120 seconds old
- String markerName = "0-2-2222" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
- FileStatus marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
- markers.add(marker);
- markerName = "0-4-4444" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
- marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
- markers.add(marker);
- return markers.toArray(new FileStatus[markers.size()]);
- }
- return super.getExpiryMarkers();
- }
- };
-
- behavior.set(1);
- int count = organizer.initiateCleanup();
- assertEquals(0, count);
-
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- mutator.setPurgeInterval(1);
- hdfsStore.alter(mutator);
- count = organizer.initiateCleanup();
- assertEquals(4, count);
- }
-
- public void testInUseExpiredHoplogCleanup() throws Exception {
- Path bucketPath = new Path(testDataDir, getName() + "/0");
- FileSystem fs = hdfsStore.getFileSystem();
-
- String[] files = new String[] {
- "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
- "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
- "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
- "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
-
- for (String file : files) {
- Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
- }
-
- final HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
- regionManager, 0);
- List<TrackedReference<Hoplog>> hopRefs = organizer.getSortedOplogs();
- assertEquals(files.length, hopRefs.size());
-
- // this is expiry marker for one of the files that will be compacted below.
- // While compaction is going on file deletion should not happen
- files = new String[] { "0-5-1235"
- + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
- + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION };
-
- for (String file : files) {
- fs.createNewFile(new Path(bucketPath, file));
- }
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
- assertEquals(hopRefs.size() + files.length, hoplogs.length);
-
- TimeUnit.MILLISECONDS.sleep(200);
- long target = System.currentTimeMillis();
- List<FileStatus> list = organizer.getOptimizationTargets(target);
- assertEquals(2, list.size());
-
- for (TrackedReference<Hoplog> ref : hopRefs) {
- ref.increment("test");
- }
-
- fs.delete(new Path(bucketPath, files[0]), false);
-
- TimeUnit.MILLISECONDS.sleep(50);
- organizer.markSortedOplogForDeletion(hopRefs, false);
-
- list = organizer.getOptimizationTargets(target);
- assertEquals(0, list.size());
- organizer.close();
- }
-
- /**
- * Tests max sequence initialization when file already exists and server starts
- */
- public void testSeqInitialization() throws Exception {
- // create many hoplogs files
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- String[] extensions = { HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION,
- HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION};
- for (String string : extensions) {
- Hoplog oplog = organizer.getTmpSortedOplog(null, string);
- createHoplog(1, oplog);
- organizer.makeLegitimate(oplog);
- }
-
- // a organizer should start creating files starting at 6 as five files already existed
- organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- createHoplog(1, oplog);
- organizer.makeLegitimate(oplog);
- assertEquals(6, HdfsSortedOplogOrganizer.getSequenceNumber(oplog));
- organizer.close();
- }
-
- /**
- * Tests temp file creation and making file legitimate
- */
- public void testMakeLegitimate() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- // create empty tmp hoplog
- Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- createHoplog(0, oplog);
-
- Path hoplogPath = new Path(testDataDir, getName() + "/0/" + oplog.getFileName());
- FileSystem fs = hdfsStore.getFileSystem();
- FileStatus hoplogStatus = fs.getFileStatus(hoplogPath);
- assertNotNull(hoplogStatus);
-
- organizer.makeLegitimate(oplog);
-
- try {
- hoplogStatus = fs.getFileStatus(hoplogPath);
- assertNull(hoplogStatus);
- } catch (FileNotFoundException e) {
- // tmp file is renamed hence should not exist, exception expected
- }
-
- assertTrue(oplog.getFileName().endsWith(HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION));
- hoplogPath = new Path(testDataDir, getName() + "/0/" + oplog.getFileName());
- hoplogStatus = fs.getFileStatus(hoplogPath);
- assertNotNull(hoplogStatus);
- }
-
- /**
- * Tests hoplog file name comparator
- */
- public void testHoplogFileComparator() throws IOException {
- String name1 = "bucket1-10-3.hop";
- String name2 = "bucket1-1-20.hop";
- String name3 = "bucket1-30-201.hop";
- String name4 = "bucket1-100-201.hop";
-
- TreeSet<TrackedReference<Hoplog>> list = new TreeSet<TrackedReference<Hoplog>>(new HoplogComparator());
- // insert soplog is the list out of expected order
- hdfsStore.getFileSystem();
- list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name2), blockCache, stats, storeStats)));
- list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name4), blockCache, stats, storeStats)));
- list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name1), blockCache, stats, storeStats)));
- list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name3), blockCache, stats, storeStats)));
-
- Iterator<TrackedReference<Hoplog>> iter = list.iterator();
- assertEquals(name4, iter.next().get().getFileName());
- assertEquals(name3, iter.next().get().getFileName());
- assertEquals(name2, iter.next().get().getFileName());
- assertEquals(name1, iter.next().get().getFileName());
- }
-
- /**
- * Tests clear on a set of hoplogs.
- */
- public void testClear() throws Exception {
- int bucketId = (int) System.nanoTime();
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- items.add(new TestEvent(("4"), ("1-4")));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("1"), ("2-1")));
- items.add(new TestEvent(("3"), ("2-3")));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("3"), ("3-3")));
- items.add(new TestEvent(("5"), ("3-5")));
- organizer.flush(items.iterator(), items.size());
-
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- // expect 3 files are 3 flushes
- assertEquals(3, hoplogs.length);
-
- organizer.clear();
-
- // check that all files are now expired
- hoplogs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- FileStatus[] exs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- FileStatus[] valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, exs);
- assertEquals(Collections.EMPTY_LIST, Arrays.asList(valids));
-
- assertEquals(0, stats.getActiveFileCount());
- assertEquals(0, stats.getInactiveFileCount());
- }
-
- public void testFixedIntervalMajorCompaction() throws Exception {
- final AtomicInteger majorCReqCount = new AtomicInteger(0);
-
- final Compactor compactor = new AbstractCompactor() {
- @Override
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- majorCReqCount.incrementAndGet();
- return true;
- }
- };
-
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
- @Override
- public synchronized Compactor getCompactor() {
- return compactor;
- }
- };
-
- regionManager.addOrganizer(0, organizer);
-
- System.setProperty(HoplogConfig.JANITOR_INTERVAL_SECS, "1");
- HDFSRegionDirector.resetJanitor();
-
- alterMajorCompaction(hdfsStore, true);
-
- // create hoplog in the past, 90 seconds before current time
- organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, 100, System.currentTimeMillis() - 90000));
- TimeUnit.MILLISECONDS.sleep(50);
- organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, 100, System.currentTimeMillis() - 90000));
-
- List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
- assertEquals(2, hoplogs.size());
-
- for (int i = 0; i < 3; i++) {
- TimeUnit.SECONDS.sleep(1);
- assertEquals(0, majorCReqCount.get());
- }
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- mutator.setMajorCompactionInterval(1);
- hdfsStore.alter(mutator);
- TimeUnit.SECONDS.sleep(5);
- assertTrue(3 < majorCReqCount.get());
- }
-
-
- public void testCorruptHfileBucketFail() throws Exception {
- // create a corrupt file
- FileSystem fs = hdfsStore.getFileSystem();
- for (int i = 0; i < 113; i++) {
- FSDataOutputStream opStream = fs.create(new Path(testDataDir.getName() + "/region-1/" + i + "/1-1-1.hop"));
- opStream.writeBytes("Some random corrupt file");
- opStream.close();
- }
-
- // create region with store
-// regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
- Region<Object, Object> region1 = regionfactory.create("region-1");
- ExpectedException ex = DistributedTestCase.addExpectedException("CorruptHFileException");
- try {
- region1.get("key");
- fail("get should have failed with corrupt file error");
- } catch (HDFSIOException e) {
- // expected
- } finally {
- ex.remove();
- }
-
- region1.destroyRegion();
- }
-
- public void testMaxOpenReaders() throws Exception {
- System.setProperty("hoplog.bucket.max.open.files", "5");
- HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 10; i++) {
- items.clear();
- items.add(new TestEvent("" + i, "" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
- List<TrackedReference<Hoplog>> hoplogs = bucket.getSortedOplogs();
- int closedCount = 0 ;
- for (TrackedReference<Hoplog> hoplog : hoplogs) {
- HFileSortedOplog hfile = (HFileSortedOplog) hoplog.get();
- if (hfile.isClosed()) {
- closedCount++;
- }
- }
- assertEquals(10, closedCount);
- assertEquals(10, stats.getActiveFileCount());
- assertEquals(0, stats.getActiveReaderCount());
-
- byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
- organizer.read(keyBytes1).getValue();
-
- closedCount = 0 ;
- for (TrackedReference<Hoplog> hoplog : hoplogs) {
- HFileSortedOplog hfile = (HFileSortedOplog) hoplog.get();
- if (hfile.isClosed()) {
- closedCount++;
- }
- }
- assertEquals(5, closedCount);
- assertEquals(10, stats.getActiveFileCount());
- assertEquals(0, stats.getInactiveFileCount());
- assertEquals(5, stats.getActiveReaderCount());
-
- organizer.getCompactor().compact(false, false);
- assertEquals(1, stats.getActiveFileCount());
- assertEquals(0, stats.getActiveReaderCount());
- assertEquals(0, stats.getInactiveFileCount());
- }
-
- public void testConcurrentReadInactiveClose() throws Exception {
- final HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
- alterMinorCompaction(hdfsStore, true);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 4; i++) {
- items.clear();
- items.add(new TestEvent("" + i, "" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- final byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
- class ReadTask implements Runnable {
- public void run() {
- try {
- organizer.read(keyBytes1);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- }
- ScheduledExecutorService[] readers = new ScheduledExecutorService[10];
- for (int i = 0; i < readers.length; i++) {
- readers[i] = Executors.newSingleThreadScheduledExecutor();
- readers[i].scheduleWithFixedDelay(new ReadTask(), 0, 1, TimeUnit.MILLISECONDS);
- }
-
- for (int i = 0; i < 100; i++) {
- items.clear();
- items.add(new TestEvent("" + i, "" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- for (int i = 0; i < readers.length; i++) {
- readers[i].shutdown();
- readers[i].awaitTermination(1, TimeUnit.SECONDS);
- TimeUnit.MILLISECONDS.sleep(50);
- }
-
- for (int i = 0; i < 20; i++) {
- if (stats.getActiveFileCount() < 4) {
- break;
- }
- organizer.getCompactor().compact(false, false);
- }
-
- organizer.performMaintenance();
- TimeUnit.SECONDS.sleep(1);
-
- assertTrue("" + stats.getActiveFileCount(), stats.getActiveFileCount() <= 4);
- assertEquals(stats.getActiveReaderCount(), stats.getActiveReaderCount());
- assertEquals(0, stats.getInactiveFileCount());
- }
-
- public void testEmptyBucketCleanup() throws Exception {
- HdfsSortedOplogOrganizer o = new HdfsSortedOplogOrganizer(regionManager, 0);
- long target = System.currentTimeMillis();
- o.getOptimizationTargets(target);
- // making sure empty bucket is not causing IO errors. no assertion needed
- // for this test case.
- }
-
- public void testExpiredFilterAtStartup() throws Exception {
- HdfsSortedOplogOrganizer bucket = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- items.add(new TestEvent(("4"), ("1-4")));
- bucket.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("1"), ("2-1")));
- items.add(new TestEvent(("3"), ("2-3")));
- bucket.flush(items.iterator(), items.size());
-
- FileStatus[] files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- HdfsSortedOplogOrganizer bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
- List<TrackedReference<Hoplog>> hoplogs = bucket2.getSortedOplogs();
- assertEquals(2, hoplogs.size());
-
- bucket.clear();
-
- files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
- hoplogs = bucket2.getSortedOplogs();
- assertEquals(0, hoplogs.size());
-
- items.clear();
- items.add(new TestEvent(("1"), ("2-1")));
- items.add(new TestEvent(("3"), ("2-3")));
- bucket.flush(items.iterator(), items.size());
-
- bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
- hoplogs = bucket2.getSortedOplogs();
- assertEquals(1, hoplogs.size());
- bucket.close();
- bucket2.close();
- }
-
- public void testExpireFilterRetartAfterClear() throws Exception {
- HdfsSortedOplogOrganizer bucket = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- items.add(new TestEvent(("4"), ("1-4")));
- bucket.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent(("1"), ("2-1")));
- items.add(new TestEvent(("3"), ("2-3")));
- bucket.flush(items.iterator(), items.size());
-
- FileStatus[] files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- HdfsSortedOplogOrganizer bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
- List<TrackedReference<Hoplog>> hoplogs = bucket2.getSortedOplogs();
- assertEquals(2, hoplogs.size());
-
- bucket.clear();
-
- files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- files = getBucketHoplogs(getName() + "/" + 0,
- HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(2, files.length);
-
- bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
- hoplogs = bucket2.getSortedOplogs();
- assertEquals(0, hoplogs.size());
- bucket.close();
- bucket2.close();
- }
-
- /**
- * tests maintenance does not fail even if there are no hoplogs
- */
- public void testNoFileJanitor() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer;
- organizer = regionManager.create(0);
- organizer.performMaintenance();
- }
-
- public void testValidHoplogRegex() {
- String[] valid = {"1-1-1.hop", "1-1-1.ihop", "1-1-1.chop"};
- String[] invalid = {"1-1-1.khop", "1-1-1.hop.tmphop", "1-1-1.hop.ehop", "1-1-.hop", "-1-1.hop"};
-
- for (String string : valid) {
- Matcher matcher = HdfsSortedOplogOrganizer.SORTED_HOPLOG_PATTERN.matcher(string);
- assertTrue(matcher.matches());
- }
-
- for (String string : invalid) {
- Matcher matcher = HdfsSortedOplogOrganizer.SORTED_HOPLOG_PATTERN.matcher(string);
- assertFalse(matcher.matches());
- }
- }
-
- public void testOneHoplogMajorCompaction() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- alterMajorCompaction(hdfsStore, true);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1-1")));
- organizer.flush(items.iterator(),items.size());
-
-
- FileStatus[] files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
-
- //Minor compaction will not perform on 1 .hop file
- organizer.getCompactor().compact(false, false);
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- //Major compaction will perform on 1 .hop file
- organizer.getCompactor().compact(true, false);
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- String hoplogName =files[0].getPath().getName();
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- organizer.getCompactor().compact(true, false);
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- assertEquals(hoplogName, files[0].getPath().getName());
-
- //Minor compaction does not convert major compacted file
- organizer.getCompactor().compact(false, false);
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
- assertEquals(0, files.length);
-
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- assertEquals(hoplogName, files[0].getPath().getName());
-
- files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(1, files.length);
- assertNotSame(hoplogName + HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION, files[0].getPath().getName() );
- }
-
- public void testExposeCleanupInterval() throws Exception {
- FileSystem fs = hdfsStore.getFileSystem();
- Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
- assertTrue(fs.exists(cleanUpIntervalPath));
- long interval = HDFSStore.DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS
- *60 * 1000;
- assertEquals(interval, HoplogUtil.readCleanUpIntervalMillis(fs,cleanUpIntervalPath));
- }
-
- @Override
- protected void setUp() throws Exception {
- System.setProperty(HoplogConfig.JANITOR_INTERVAL_SECS, "" + HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
- super.setUp();
- }
-}
-
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
deleted file mode 100644
index 7420437..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
+++ /dev/null
@@ -1,540 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NoSuchElementException;
-import java.util.TreeMap;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReader;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-import org.junit.experimental.categories.Category;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HfileSortedOplogJUnitTest extends BaseHoplogTestCase {
- ArrayList<Object> toBeCleaned = new ArrayList<>();
-
- /**
- * Tests hoplog creation using a writer. If this test fails, all the tests wills fail as hoplog
- * creation is the first step
- */
- public void testHoplogWriter() throws Exception {
- String hoplogName = getRandomHoplogName();
- createHoplog(hoplogName, 1);
- FileStatus hoplogStatus = hdfsStore.getFileSystem().getFileStatus(new Path(testDataDir, hoplogName));
- assertNotNull(hoplogStatus);
- }
-
- /**
- * Tests hoplog deletion.
- */
- public void testDeletion() throws Exception {
- String hoplogName = getRandomHoplogName();
- createHoplog(hoplogName, 1);
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-
- testHoplog.delete();
-
- try {
- FileStatus hoplogStatus = hdfsStore.getFileSystem().getFileStatus(new Path(testDataDir, hoplogName));
- // hoplog should not exists. fail if it does
- assertNull("File deletion failed", hoplogStatus);
- } catch (FileNotFoundException e) {
- // exception expected after deletion
- }
- }
-
- /**
- * Tests hoplog reader creation and key based gets
- */
- public void testHoplogReader() throws Exception {
- String hop1 = getRandomHoplogName();
- Map<String, String> map = createHoplog(hop1, 10);
-
- HFileSortedOplog testHoplog1 = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hop1), blockCache, stats, storeStats);
- HoplogReader reader = testHoplog1.getReader();
- // verify that each entry put in the hoplog is returned by reader
- for (Entry<String, String> entry : map.entrySet()) {
- byte[] value = reader.read(entry.getKey().getBytes());
- assertNotNull(value);
- }
- }
-
- /**
- * Tests full iteration on a hoplog. Ensures all inserted keys are returned and no key is missing
- */
- public void testIterator() throws IOException {
- int count = 10;
- ByteArrayComparator bac = new ByteArrayComparator();
-
- String hoplogName = getRandomHoplogName();
- TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
-
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
- HoplogReader reader = testHoplog.getReader();
-
- Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
- HoplogIterator<byte[], byte[]> iter = reader.scan();
- for (; iter.hasNext();) {
- byte[] key = iter.next();
- Entry<String, String> entry = mapIter.next();
- assertEquals(0, bac.compare(key, iter.getKey()));
- assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
- assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
- count--;
- }
- assertEquals(0, count);
- }
-
- /**
- * Tests hoplog iterator. after returning first key, has next should return false and all
- * subsequent next calls should return null
- */
- public void testSingleKVIterator() throws Exception {
- String hoplogName = getRandomHoplogName();
- TreeMap<String, String> map = createHoplog(hoplogName, 1);
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
- HoplogReader reader = testHoplog.getReader();
-
- HoplogIterator<byte[], byte[]> iter = reader.scan();
- assertNull(iter.getKey());
- assertNull(iter.getValue());
- assertTrue(iter.hasNext());
- assertNull(iter.getKey());
- assertNull(iter.getValue());
-
- Entry<String, String> entry = map.firstEntry();
- iter.next();
- assertNotNull(iter.getKey());
- assertEquals(entry.getKey(), new String(iter.getKey()));
- assertNotNull(iter.getValue());
- assertEquals(entry.getValue(), new String(iter.getValue()));
-
- assertFalse(iter.hasNext());
- try {
- iter.next();
- fail();
- } catch (NoSuchElementException e) {
- }
- }
-
- /**
- * Tests iteration on a hoplog with no keys, using a scanner. Scanner should not return any value
- * and hasNext should return false everytime
- */
- public void testEmptyFileIterator() throws Exception {
- String hoplogName = getRandomHoplogName();
- createHoplog(hoplogName, 0);
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
- HoplogReader reader = testHoplog.getReader();
- HoplogIterator<byte[], byte[]> iter = reader.scan();
- assertNull(iter.getKey());
- assertNull(iter.getValue());
- assertFalse(iter.hasNext());
- assertNull(iter.getKey());
- assertNull(iter.getValue());
- try {
- iter.next();
- fail();
- } catch (NoSuchElementException e) {
- }
- }
-
- /**
- * Tests from exclusive iterator
- */
- public void testFromExclusiveIterator() throws Exception {
- fromIterator(false);
- }
-
- /**
- * Tests from inclusive iterator
- */
- public void testFromInclusiveIterator() throws Exception {
- fromIterator(true);
- }
-
- /**
- * Tests from condition based iteration. creates hoplog with 10 KVs. Creates a scanner starting at
- * a middle key and verifies the count of KVs iterated on
- */
- public void fromIterator(boolean includeFrom) throws Exception {
- int count = 10;
- ByteArrayComparator bac = new ByteArrayComparator();
-
- String hoplogName = getRandomHoplogName();
- // sorted map contains the keys inserted in the hoplog for testing
- TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
-
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
- HoplogReader reader = testHoplog.getReader();
-
- int middleKey = 4;
- // remove top keys from the sorted map as the hoplog scanner should not
- // return those
- Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
- for (int i = 0; i < middleKey; i++) {
- mapIter.next();
- count--;
- }
- if (!includeFrom) {
- mapIter.next();
- count--;
- }
-
- // keys are like Key-X, for X=0 till X=9. Start iterator at fifth key,
- // key-4. if excluding from key, start at sixth key, key-5.
- HoplogIterator<byte[], byte[]> iter = reader.scan(("key-" + middleKey).getBytes(), includeFrom,
- null, true);
-
- for (; iter.hasNext();) {
- byte[] key = iter.next();
- Entry<String, String> entry = mapIter.next();
- // make sure the KV returned by iterator match the inserted KV
- assertEquals(0, bac.compare(key, iter.getKey()));
- assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
- assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
- count--;
- }
- assertEquals(0, count);
- }
-
- /**
- * Tests to exclusive iterator
- */
- public void testToExclusiveIterator() throws Exception {
- toIterator(false);
- }
-
- /**
- * Tests to inclusive iterator
- */
- public void testToInclusiveIterator() throws Exception {
- toIterator(true);
- }
-
- /**
- * Tests to condition based iteration. creates hoplog with 10 KVs. Creates a scanner ending at
- * a middle key and verifies the count of KVs iterated on
- */
- public void toIterator(boolean includeTo) throws Exception {
- int count = 10;
- ByteArrayComparator bac = new ByteArrayComparator();
-
- String hoplogName = getRandomHoplogName();
- // sorted map contains the keys inserted in the hoplog for testing
- TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
- Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
-
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
- HoplogReader reader = testHoplog.getReader();
-
- int middleKey = 4;
- // keys are like Key-X, for X=0 till X=9. End iterator at fifth key,
- // key-4. if excluding to key, end at fourth key, key-3.
- HoplogIterator<byte[], byte[]> iter = reader.scan(null, true, ("key-" + middleKey).getBytes(), includeTo);
-
- for (; iter.hasNext();) {
- byte[] key = iter.next();
- Entry<String, String> entry = mapIter.next();
- // make sure the KV returned by iterator match the inserted KV
- assertEquals(0, bac.compare(key, iter.getKey()));
- assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
- assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
-
- count --;
- }
-
- if (includeTo) {
- count++;
- }
-
- assertEquals(10, count + middleKey);
- }
-
- /**
- * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
- */
- public void testFromToIterator() throws IOException {
- ByteArrayComparator bac = new ByteArrayComparator();
- String hoplogName = getRandomHoplogName();
- HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-
- int count = 5;
- HoplogWriter writer = hoplog.createWriter(5);
- for (int i = 0; i < count; i++) {
- String value = "value-" + (i * 2);
- // even keys key-[0 2 4 6 8]
- writer.append(("key-" + (i * 2)).getBytes(), value.getBytes());
- }
- writer.close();
-
- HoplogReader reader = hoplog.getReader();
- HoplogIterator<byte[], byte[]> iter = reader.scan("key-1".getBytes(), true, "key-7".getBytes(), true);
-
- for (int i = 2; i < 7; i += 2) {
- assertTrue(iter.hasNext());
- iter.next();
- assertEquals(0, bac.compare(("key-" + i).getBytes(), iter.getKey()));
- assertEquals(0, bac.compare(("value-" + i).getBytes(), iter.getValue()));
- System.out.println(new String(iter.getKey()));
- }
- assertFalse(iter.hasNext());
- }
-
- /**
- * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
- */
- public void testDuplicateKeys() throws IOException {
- String hoplogName = getRandomHoplogName();
- HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-
- // write duplicate keys
- int count = 2;
- HoplogWriter writer = hoplog.createWriter(2);
- List<String> values = new ArrayList<String>();
- for(int i = 1; i <= count; i++) {
- String value = "value" + i;
- writer.append("key-1".getBytes(), value.getBytes());
- values.add(value);
- }
- writer.close();
-
- HoplogReader reader = hoplog.getReader();
- HoplogIterator<byte[], byte[]> scanner = reader.scan();
- for (byte[] key = null; scanner.hasNext();) {
- key = scanner.next();
- count--;
- assertEquals(0, Bytes.compareTo(key, "key-1".getBytes()));
- values.remove(new String(scanner.getValue()));
- }
- assertEquals(0, count);
- assertEquals(0, values.size());
- }
-
- public void testOffsetBasedScan() throws Exception {
- // Each record is 43 bytes. each block is 256 bytes. each block will have 6
- // records
-
- int blocksize = 1 << 8;
- System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
- String.valueOf(blocksize));
-
- int count = 50;
- String hoplogName = getRandomHoplogName();
- createHoplog(hoplogName, count);
-
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
- testDataDir, hoplogName), blockCache, stats, storeStats);
-
- HoplogReader reader = testHoplog.getReader();
-
- HoplogIterator<byte[], byte[]> scanner = reader.scan(blocksize * 1, blocksize * 2);
- int range1Count = 0;
- String range1EndKey = null;
- for (byte[] key = null; scanner.hasNext();) {
- key = scanner.next();
- range1Count++;
- range1EndKey = new String(key);
- }
- int range1EndKeyNum = Integer.valueOf(range1EndKey.substring("Key-".length()));
-
- scanner = reader.scan(blocksize * 2, blocksize * 1);
- int range2Count = 0;
- String range2EndKey = null;
- for (byte[] key = null; scanner.hasNext();) {
- key = scanner.next();
- range2Count++;
- range2EndKey = new String(key);
- }
-
- assertEquals(range2EndKey, range1EndKey);
- assertEquals(2, range1Count/range2Count);
-
- scanner = reader.scan(blocksize * 3, blocksize * 1);
- String range3FirstKey = new String(scanner.next());
-
- int range3FirstKeyNum = Integer.valueOf(range3FirstKey.substring("Key-"
- .length()));
-
- // range 3 starts at the end of range 1. so the two keys must be consecutive
- assertEquals(range1EndKeyNum + 1, range3FirstKeyNum);
-
- testHoplog.close();
- }
-
- public void testOffsetScanBeyondFileSize() throws Exception {
- // Each record is 43 bytes. each block is 256 bytes. each block will have 6
- // records
-
- int blocksize = 1 << 8;
- System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
- String.valueOf(blocksize));
-
- int count = 20;
- String hoplogName = getRandomHoplogName();
- createHoplog(hoplogName, count);
-
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
- testDataDir, hoplogName), blockCache, stats, storeStats);
-
- HoplogReader reader = testHoplog.getReader();
-
- HoplogIterator<byte[], byte[]> scanner = reader.scan(blocksize * 5, blocksize * 2);
- assertFalse(scanner.hasNext());
-
- testHoplog.close();
- }
-
- public void testZeroValueOffsetScan() throws Exception {
- // Each record is 43 bytes. each block is 256 bytes. each block will have 6
- // records
-
- int blocksize = 1 << 8;
- System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
- String.valueOf(blocksize));
-
- int count = 20;
- String hoplogName = getRandomHoplogName();
- createHoplog(hoplogName, count);
-
- HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
- testDataDir, hoplogName), blockCache, stats, storeStats);
-
- HoplogReader reader = testHoplog.getReader();
-
- HoplogIterator<byte[], byte[]> scanner = reader.scan(0, blocksize * 2);
- assertTrue(scanner.hasNext());
- int keyNum = Integer.valueOf(new String(scanner.next()).substring("Key-"
- .length()));
- assertEquals(100000, keyNum);
-
- testHoplog.close();
- }
-
- /*
- * Tests reader succeeds to read data even if FS client is recycled without
- * this reader knowing
- */
- public void testReaderDetectAndUseRecycledFs() throws Exception {
- HDFSStoreFactoryImpl storeFactory = getCloseableLocalHdfsStoreFactory();
- HDFSStoreImpl store = (HDFSStoreImpl) storeFactory.create("Store-1");
- toBeCleaned.add(store);
-
- HFileSortedOplog hop = new HFileSortedOplog(store, new Path(getName() + "-1-1.hop"), blockCache, stats, storeStats);
- toBeCleaned.add(hop);
- TreeMap<String, String> map = createHoplog(10, hop);
-
- HoplogReader reader = hop.getReader();
- // verify that each entry put in the hoplog is returned by reader
- for (Entry<String, String> entry : map.entrySet()) {
- byte[] value = reader.read(entry.getKey().getBytes());
- assertNotNull(value);
- }
-
- cache.getLogger().info("<ExpectedException action=add>java.io.IOException</ExpectedException>");
- try {
- store.getFileSystem().close();
- store.checkAndClearFileSystem();
-
- for (Entry<String, String> entry : map.entrySet()) {
- reader = hop.getReader();
- byte[] value = reader.read(entry.getKey().getBytes());
- assertNotNull(value);
- }
- } finally {
- cache.getLogger().info("<ExpectedException action=remove>java.io.IOException</ExpectedException>");
- }
- }
-
- public void testNewScannerDetechAndUseRecycledFs() throws Exception {
- HDFSStoreFactoryImpl storeFactory = getCloseableLocalHdfsStoreFactory();
- HDFSStoreImpl store = (HDFSStoreImpl) storeFactory.create("Store-1");
- toBeCleaned.add(store);
-
- HFileSortedOplog hop = new HFileSortedOplog(store, new Path(getName() + "-1-1.hop"), blockCache, stats, storeStats);
- createHoplog(10, hop);
-
- HoplogIterator<byte[], byte[]> scanner = hop.getReader().scan();
- // verify that each entry put in the hoplog is returned by reader
- int i = 0;
- while (scanner.hasNext()) {
- byte[] key = scanner.next();
- assertNotNull(key);
- i++;
- }
- assertEquals(10, i);
- // flush block cache
- hop.close(true);
- hop.delete();
-
- hop = new HFileSortedOplog(store, new Path(getName()+"-1-1.hop"), blockCache, stats, storeStats);
- createHoplog(10, hop);
- toBeCleaned.add(hop);
- hop.getReader();
-
- cache.getLogger().info("<ExpectedException action=add>java.io.IOException</ExpectedException>");
- try {
- store.getFileSystem().close();
- store.checkAndClearFileSystem();
-
- scanner = hop.getReader().scan();
- // verify that each entry put in the hoplog is returned by reader
- i = 0;
- while (scanner.hasNext()) {
- byte[] key = scanner.next();
- assertNotNull(key);
- i++;
- }
- assertEquals(10, i);
- } finally {
- cache.getLogger().info("<ExpectedException action=remove>java.io.IOException</ExpectedException>");
- }
- }
-
- @Override
- protected void tearDown() throws Exception {
- for (Object obj : toBeCleaned) {
- try {
- if (HDFSStoreImpl.class.isInstance(obj)) {
- ((HDFSStoreImpl) obj).clearFolder();
- } else if (AbstractHoplog.class.isInstance(obj)) {
- ((AbstractHoplog) obj).close();
- ((AbstractHoplog) obj).delete();
- }
- } catch (Exception e) {
- System.out.println(e);
- }
- }
- super.tearDown();
- }
-
- private TreeMap<String, String> createHoplog(String hoplogName, int numKeys) throws IOException {
- HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
- TreeMap<String, String> map = createHoplog(numKeys, hoplog);
- return map;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
deleted file mode 100644
index 13aa6a9..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class SortedOplogListIterJUnitTest extends BaseHoplogTestCase {
- public void testOneIterOneKey() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("0"), ("0")));
- organizer.flush(items.iterator(), items.size());
-
- List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
- HoplogSetIterator iter = new HoplogSetIterator(oplogs);
- assertTrue(iter.hasNext());
- int count = 0;
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
- count++;
- }
- assertEquals(1, count);
- organizer.close();
- }
-
- public void testOneIterDuplicateKey() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("0"), ("V2")));
- items.add(new TestEvent(("0"), ("V1")));
- items.add(new TestEvent(("1"), ("V2")));
- items.add(new TestEvent(("1"), ("V1")));
- organizer.flush(items.iterator(), items.size());
-
- List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
- HoplogSetIterator iter = new HoplogSetIterator(oplogs);
- assertTrue(iter.hasNext());
- int count = 0;
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
- assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
- assertEquals("V2", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
- count++;
- }
- assertEquals(2, count);
- organizer.close();
- }
-
- public void testTwoIterSameKey() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("0"), ("V1")));
- organizer.flush(items.iterator(), items.size());
- items.clear();
- items.add(new TestEvent(("0"), ("V2")));
- organizer.flush(items.iterator(), items.size());
-
- List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
- HoplogSetIterator iter = new HoplogSetIterator(oplogs);
- assertTrue(iter.hasNext());
- int count = 0;
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
- assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
- assertEquals("V2", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
- count++;
- }
- assertEquals(1, count);
- organizer.close();
- }
-
- public void testTwoIterDiffKey() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("0"), ("V1")));
- organizer.flush(items.iterator(), items.size());
- items.clear();
- items.add(new TestEvent(("1"), ("V1")));
- organizer.flush(items.iterator(), items.size());
-
- List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
- HoplogSetIterator iter = new HoplogSetIterator(oplogs);
- assertTrue(iter.hasNext());
- int count = 0;
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
- assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
- assertEquals("V1", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
- count++;
- }
- assertEquals(2, count);
- organizer.close();
- }
-
- public void testMergedIterator() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- // #1
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent(("1"), ("1")));
- items.add(new TestEvent(("2"), ("1")));
- items.add(new TestEvent(("3"), ("1")));
- items.add(new TestEvent(("4"), ("1")));
- organizer.flush(items.iterator(), items.size());
-
- // #2
- items.clear();
- items.add(new TestEvent(("2"), ("1")));
- items.add(new TestEvent(("4"), ("1")));
- items.add(new TestEvent(("6"), ("1")));
- items.add(new TestEvent(("8"), ("1")));
- organizer.flush(items.iterator(), items.size());
-
- // #3
- items.clear();
- items.add(new TestEvent(("1"), ("1")));
- items.add(new TestEvent(("3"), ("1")));
- items.add(new TestEvent(("5"), ("1")));
- items.add(new TestEvent(("7"), ("1")));
- items.add(new TestEvent(("9"), ("1")));
- organizer.flush(items.iterator(), items.size());
-
- // #4
- items.clear();
- items.add(new TestEvent(("0"), ("1")));
- items.add(new TestEvent(("1"), ("1")));
- items.add(new TestEvent(("4"), ("1")));
- items.add(new TestEvent(("5"), ("1")));
- organizer.flush(items.iterator(), items.size());
-
- List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
- HoplogSetIterator iter = new HoplogSetIterator(oplogs);
- // the iteration pattern for this test should be 0-9:
- // 0 1 4 5 oplog #4
- // 1 3 5 7 9 oplog #3
- // 2 4 6 8 oplog #2
- // 1 2 3 4 oplog #1
- int count = 0;
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
- count++;
- }
- assertEquals(10, count);
- organizer.close();
- }
-}
[12/15] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit
and Dunits
Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
deleted file mode 100644
index 7b45952..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
+++ /dev/null
@@ -1,904 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.HoplogCompactor;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class TieredCompactionJUnitTest extends BaseHoplogTestCase {
- static long ONE_MB = 1024 * 1024;
- static long TEN_MB = 10 * ONE_MB;
-
- @Override
- protected void configureHdfsStoreFactory() throws Exception {
- super.configureHdfsStoreFactory();
-
- hsf.setInputFileCountMin(3);
- hsf.setMinorCompaction(false);
- hsf.setMajorCompaction(false);
- }
-
- public void testMinorCompaction() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- // #1
- ArrayList<QueuedPersistentEvent> items = new ArrayList<QueuedPersistentEvent>();
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("2", "1"));
- items.add(new TestEvent("3", "1"));
- items.add(new TestEvent("4", "1"));
- organizer.flush(items.iterator(), items.size());
-
- // #2
- items.clear();
- items.add(new TestEvent("2", "1"));
- items.add(new TestEvent("4", "1"));
- items.add(new TestEvent("6", "1"));
- items.add(new TestEvent("8", "1"));
- organizer.flush(items.iterator(), items.size());
-
- // #3
- items.clear();
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("3", "1"));
- items.add(new TestEvent("5", "1"));
- items.add(new TestEvent("7", "1"));
- items.add(new TestEvent("9", "1"));
- organizer.flush(items.iterator(), items.size());
-
- // #4
- items.clear();
- items.add(new TestEvent("0", "1"));
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("4", "1"));
- items.add(new TestEvent("5", "1"));
- organizer.flush(items.iterator(), items.size());
-
- // check file existence in bucket directory, expect 4 hoplgos
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(4, hoplogs.length);
-
- // After compaction expect 1 hoplog only. It should have the same sequence number as that of the
- // youngest file compacted, which should be 4 in this case
- organizer.getCompactor().compact(false, false);
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
- assertEquals(1, hoplogs.length);
- assertEquals(1, organizer.getSortedOplogs().size());
- Hoplog hoplog = new HFileSortedOplog(hdfsStore, hoplogs[0].getPath(), blockCache, stats, storeStats);
- assertEquals(4, HdfsSortedOplogOrganizer.getSequenceNumber(hoplog));
-
- // iterate on oplogs to validate data in files
- HoplogSetIterator iter = new HoplogSetIterator(organizer.getSortedOplogs());
- // the iteration pattern for this test should be 0-9:
- // 0 1 4 5 oplog #4
- // 1 3 5 7 9 oplog #3
- // 2 4 6 8 oplog #2
- // 1 2 3 4 oplog #1
- int count = 0;
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
- count++;
- }
- assertEquals(10, count);
-
- // there must be 4 expired hoplogs now
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(4, hoplogs.length);
- organizer.close();
- }
-
- public void testIterativeMinorCompaction() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- // #1
- ArrayList<QueuedPersistentEvent> items = new ArrayList<QueuedPersistentEvent>();
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("2", "1"));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent("1", "2"));
- items.add(new TestEvent("3", "2"));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent("4", "3"));
- items.add(new TestEvent("5", "3"));
- organizer.flush(items.iterator(), items.size());
-
- // check file existence in bucket directory
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(3, hoplogs.length);
-
- organizer.getCompactor().compact(false, false);
-
- FileStatus[] expired = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(3, expired.length);
- FileStatus[] valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
- assertEquals(0, valids.length);
- // After compaction expect 1 hoplog only.
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
- assertEquals(1, hoplogs.length);
-
- items.clear();
- items.add(new TestEvent("4", "4"));
- items.add(new TestEvent("6", "4"));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent("7", "5"));
- items.add(new TestEvent("8", "5"));
- organizer.flush(items.iterator(), items.size());
-
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(5, hoplogs.length);
-
- organizer.getCompactor().compact(false, false);
- expired = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(6, expired.length);
- valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
- assertEquals(0, valids.length);
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
- assertEquals(2, hoplogs.length);
- valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
- assertEquals(1, valids.length);
-
- assertEquals("2", organizer.read(BlobHelper.serializeToBlob("1")).getValue());
- assertEquals("1", organizer.read(BlobHelper.serializeToBlob("2")).getValue());
- assertEquals("2", organizer.read(BlobHelper.serializeToBlob("3")).getValue());
- assertEquals("4", organizer.read(BlobHelper.serializeToBlob("4")).getValue());
- assertEquals("3", organizer.read(BlobHelper.serializeToBlob("5")).getValue());
- assertEquals("4", organizer.read(BlobHelper.serializeToBlob("6")).getValue());
- assertEquals("5", organizer.read(BlobHelper.serializeToBlob("7")).getValue());
- organizer.close();
- }
-
- public void testMajorCompactionWithDelete() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
- // #1
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("2", "1"));
- items.add(new TestEvent("3", "1"));
- items.add(new TestEvent("4", "1"));
- items.add(new TestEvent("4", "10", Operation.DESTROY));
- organizer.flush(items.iterator(), items.size());
-
- // #2
- items.clear();
- items.add(new TestEvent("2", "1", Operation.DESTROY));
- items.add(new TestEvent("4", "1", Operation.DESTROY));
- items.add(new TestEvent("6", "1", Operation.INVALIDATE));
- items.add(new TestEvent("8", "1"));
- organizer.flush(items.iterator(), items.size());
-
- // #3
- items.clear();
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("3", "1"));
- items.add(new TestEvent("5", "1"));
- items.add(new TestEvent("7", "1"));
- items.add(new TestEvent("9", "1", Operation.DESTROY));
- organizer.flush(items.iterator(), items.size());
-
- // #4
- items.clear();
- items.add(new TestEvent("0", "1", Operation.DESTROY));
- items.add(new TestEvent("1", "1"));
- items.add(new TestEvent("4", "1"));
- items.add(new TestEvent("5", "1"));
- organizer.flush(items.iterator(), items.size());
-
- // check file existence in bucket directory, expect 4 hoplgos
- FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(4, hoplogs.length);
-
- // After compaction expect 1 hoplog only. It should have the same sequence number as that of the
- // youngest file compacted, which should be 4 in this case
- organizer.getCompactor().compact(true, false);
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
- assertEquals(1, hoplogs.length);
- assertEquals(1, organizer.getSortedOplogs().size());
- Hoplog hoplog = new HFileSortedOplog(hdfsStore, hoplogs[0].getPath(), blockCache, stats, storeStats);
- assertEquals(4, HdfsSortedOplogOrganizer.getSequenceNumber(hoplog));
-
- // iterate on oplogs to validate data in files
- HoplogSetIterator iter = new HoplogSetIterator(organizer.getSortedOplogs());
- int count = 0;
-
- // entries in () are destroyed or invalidated
- // 1, 2, 3, 4, (11)
- // (2), (4), (6), 8
- // 1, 3, 5, 7, (9)
- // (0), 1, 4, 5
- String[] expectedValues = { "1", "3", "4", "5", "7", "8" };
- for (ByteBuffer keyBB = null; iter.hasNext();) {
- keyBB = iter.next();
- byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
- assertEquals(expectedValues[count], BlobHelper.deserializeBlob(key));
- count++;
- }
- assertEquals(6, count);
-
- // there must be 4 expired hoplogs now
- hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- assertEquals(4, hoplogs.length);
- organizer.close();
- }
-
- public void testGainComputation() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
- HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 10; i++) {
- targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB)));
- }
-
- // each read has cost 3. Four files read cost is 3 * 4. Reduce read cost of
- // file after compaction
- float expect = (float) ((3 * 4.0 - 3) / (20 + 30 + 40 + 50));
- float result = bucket.computeGain(2, 5, targets);
- assertTrue(Math.abs(expect - result) < (expect/1000));
-
- // each read has cost 3 except 10MB file with read cost 2. 9 files read cost
- // is 3 * 9. Reduce read cost of file after compaction.
- expect = (float) ((3 * 9 - 3 - 1.0) / (10 + 20 + 30 + 40 + 50 + 60 + 70 + 80 + 90));
- result = bucket.computeGain(0, 9, targets);
- assertTrue(Math.abs(expect - result) < (expect/1000));
- }
-
- public void testGainComputeSmallFile() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
- HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 10; i++) {
- targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB / 1024)));
- }
-
- float result = bucket.computeGain(2, 5, targets);
- assertTrue(Math.abs(8.0 - result) < (1.0/1000));
- }
-
- public void testGainComputeMixedFiles() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
- HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 10; i++) {
- targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB / 1024)));
- }
- TestHoplog midHop = (TestHoplog) targets.get(4).get();
- // one more than other files
- midHop.size = 5 * TEN_MB;
-
- float expect = (float) ((4 * 2 - 3 + 1.0) / 50);
- float result = bucket.computeGain(2, 5, targets);
- System.out.println(expect);
- System.out.println(result);
- assertTrue(Math.abs(expect - result) < (expect/1000));
- }
-
- public void testGainComputeBadRatio() throws Exception {
- HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
- HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 10; i++) {
- targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB)));
- }
-
- TestHoplog firstHop = (TestHoplog) targets.get(2).get();
- // one more than other files
- firstHop.size = (1 + 30 + 40 + 50) * TEN_MB;
- Float result = bucket.computeGain(2, 5, targets);
- assertNull(result);
- }
-
- public void testMinorCompactionTargetMaxSize() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
- for (int i = 0; i < 5; i++) {
- TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
- hop.increment();
- targets.add(hop);
- }
- TrackedReference<TestHoplog> oldestHop = targets.get(targets.size() - 1);
- TestHoplog thirdHop = (TestHoplog) targets.get(2).get();
-
- // oldest is more than max size is ignored
- oldestHop.get().size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
- List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(4, list.size());
- for (TrackedReference<Hoplog> ref : list) {
- assertTrue(((TestHoplog)ref.get()).size - TEN_MB < 5 );
- }
-
- // third is more than max size but is not ignored
- thirdHop.size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
- oldestHop.increment();
- list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(4, list.size());
- int i = 0;
- for (TrackedReference<Hoplog> ref : list) {
- if (i != 2) {
- assertTrue(((TestHoplog) ref.get()).size - TEN_MB < 5);
- } else {
- assertTrue(((TestHoplog) ref.get()).size > HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB);
- }
- i++;
- }
- }
-
- public void testAlterMaxInputFileSize() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- assertTrue(TEN_MB * 2 < hdfsStore.getInputFileSizeMax() * ONE_MB);
-
- ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
- for (int i = 0; i < 5; i++) {
- TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
- hop.increment();
- targets.add(hop);
- }
-
- List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(targets.size(), list.size());
-
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- mutator.setInputFileSizeMax(1);
- hdfsStore.alter(mutator);
-
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(0, list.size());
- }
-
- public void testAlterInputFileCount() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- assertTrue(2 < hdfsStore.getInputFileCountMax());
-
- ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
- for (int i = 0; i < 5; i++) {
- TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
- hop.increment();
- targets.add(hop);
- }
-
- List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(targets.size(), list.size());
-
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- mutator.setInputFileCountMax(2);
- mutator.setInputFileCountMin(2);
- hdfsStore.alter(mutator);
-
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(2, list.size());
- }
-
- public void testAlterMajorCompactionInterval() throws Exception {
- final AtomicInteger majorCReqCount = new AtomicInteger(0);
-
- final Compactor compactor = new AbstractCompactor() {
- @Override
- public boolean compact(boolean isMajor, boolean isForced) throws IOException {
- majorCReqCount.incrementAndGet();
- return true;
- }
- };
-
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
- @Override
- public synchronized Compactor getCompactor() {
- return compactor;
- }
- };
-
- // create hoplog in the past, 90 seconds before current time
- organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, ONE_MB, System.currentTimeMillis() - 90000));
- TimeUnit.MILLISECONDS.sleep(50);
- organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, ONE_MB, System.currentTimeMillis() - 90000));
-
- alterMajorCompaction(hdfsStore, true);
-
- List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
- assertEquals(2, hoplogs.size());
-
- organizer.performMaintenance();
- TimeUnit.MILLISECONDS.sleep(100);
- assertEquals(0, majorCReqCount.get());
-
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- mutator.setMajorCompactionInterval(1);
- hdfsStore.alter(mutator);
-
- organizer.performMaintenance();
- TimeUnit.MILLISECONDS.sleep(100);
- assertEquals(1, majorCReqCount.get());
- }
-
- public void testMinorCompactionTargetMinCount() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 2; i++) {
- TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
- hop.increment();
- targets.add(hop);
- }
- compactor.getMinorCompactionTargets(targets, -1);
- assertEquals(0, targets.size());
- }
-
- public void testMinorCompactionLessTargetsStatsUpdate() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent("1", "1"));
- organizer.flush(items.iterator(), items.size());
-
- items.clear();
- items.add(new TestEvent("2", "2", Operation.DESTROY));
- organizer.flush(items.iterator(), items.size());
-
- TimeUnit.SECONDS.sleep(1);
- List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
- assertEquals(2, hoplogs.size());
-
- organizer.performMaintenance();
- hoplogs = organizer.getSortedOplogs();
- assertEquals(2, hoplogs.size());
- }
-
- public void testMinorCompactionTargetsOptimizer() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 6; i++) {
- TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
- hop.increment();
- targets.add(hop);
- }
- List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(6, list.size());
-
- TestHoplog fifthHop = (TestHoplog) targets.get(4).get();
- // fifth hop needs additional block read as it has more than max keys size
- fifthHop.size = (HdfsSortedOplogOrganizer.AVG_NUM_KEYS_PER_INDEX_BLOCK * 5 + 1) * 64 * 1024;
- list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(4, list.size());
- for (TrackedReference<Hoplog> ref : list) {
- assertTrue(((TestHoplog)ref.get()).size - TEN_MB < 4 );
- }
- }
-
- public void testTargetsReleasedBadRatio() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
- for (int i = 0; i < 3; i++) {
- TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
- hop.increment();
- targets.add(hop);
- }
- TestHoplog oldestHop = (TestHoplog) targets.get(2).get();
- oldestHop.size = (1 + 30) * TEN_MB;
-
- List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
- compactor.getMinorCompactionTargets(list, -1);
- assertEquals(0, list.size());
- assertEquals(3, targets.size());
- for (TrackedReference<Hoplog> ref : targets) {
- assertEquals(0, ref.uses());
- }
- }
-
- public void testMinorCTargetsIgnoreMajorC() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 7; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
- List<TrackedReference<Hoplog>> targets = organizer.getSortedOplogs();
- compactor.getMinorCompactionTargets(targets, -1);
- assertEquals(7, targets.size());
-
- targets = organizer.getSortedOplogs();
- for (TrackedReference<Hoplog> ref : targets) {
- ref.increment();
- }
- compactor.getMinorCompactionTargets(targets, 2);
- assertEquals((7 - 2), targets.size());
- targets = organizer.getSortedOplogs();
- for (int i = 0; i < targets.size(); i++) {
- if (i + 1 <= (7 - 2)) {
- assertEquals(1, targets.get(i).uses());
- } else {
- assertEquals(0, targets.get(i).uses());
- }
- }
-
- targets = organizer.getSortedOplogs();
- for (TrackedReference<Hoplog> ref : targets) {
- if (ref.uses() == 0) {
- ref.increment();
- }
- assertEquals(1, ref.uses());
- }
- compactor.getMinorCompactionTargets(targets, 7);
- assertEquals(0, targets.size());
-
- targets = organizer.getSortedOplogs();
- for (int i = 0; i < targets.size(); i++) {
- assertEquals(0, targets.get(i).uses());
- }
- }
-
- public void testTargetOverlap() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 7; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
- List<TrackedReference<Hoplog>> targets = organizer.getSortedOplogs();
- assertTrue(compactor.isMinorMajorOverlap(targets, 8));
- assertTrue(compactor.isMinorMajorOverlap(targets, 7));
- assertTrue(compactor.isMinorMajorOverlap(targets, 6));
- assertTrue(compactor.isMinorMajorOverlap(targets, 1));
- assertFalse(compactor.isMinorMajorOverlap(targets, 0));
- assertFalse(compactor.isMinorMajorOverlap(targets, -1));
-
- targets.remove(targets.size() -1); // remove the last one
- targets.remove(targets.size() -1); // remove the last one again
- assertFalse(compactor.isMinorMajorOverlap(targets, 1));
- assertFalse(compactor.isMinorMajorOverlap(targets, 2));
- assertTrue(compactor.isMinorMajorOverlap(targets, 3));
-
- targets.remove(3); // remove from the middle, seq num 4
- assertTrue(compactor.isMinorMajorOverlap(targets, 4));
- assertTrue(compactor.isMinorMajorOverlap(targets, 3));
- }
-
- public void testSuspendMinorByMajor() throws Exception {
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 5; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
- Hoplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir + "/"
- + getName() + "-" + System.currentTimeMillis() + "-1.ihop.tmp"), blockCache, stats, storeStats);
- compactor.fillCompactionHoplog(false, organizer.getSortedOplogs(), hoplog, -1);
-
- cache.getLogger().info("<ExpectedException action=add>java.lang.InterruptedException</ExpectedException>");
- try {
- compactor.maxMajorCSeqNum.set(3);
- compactor.fillCompactionHoplog(false, organizer.getSortedOplogs(), hoplog, -1);
- fail();
- } catch (InterruptedException e) {
- // expected
- }
- cache.getLogger().info("<ExpectedException action=remove>java.lang.InterruptedException</ExpectedException>");
- organizer.close();
- }
-
- public void testMajorCompactionSetsSeqNum() throws Exception {
- final CountDownLatch compactionStartedLatch = new CountDownLatch(1);
- final CountDownLatch waitLatch = new CountDownLatch(1);
- class MyOrganizer extends HdfsSortedOplogOrganizer {
- final HoplogCompactor compactor = new MyCompactor();
- public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
- super(region, bucketId);
- }
- public synchronized Compactor getCompactor() {
- return compactor;
- }
- class MyCompactor extends HoplogCompactor {
- @Override
- public long fillCompactionHoplog(boolean isMajor,
- List<TrackedReference<Hoplog>> targets, Hoplog output,
- int majorCSeqNum) throws IOException, InterruptedException {
- compactionStartedLatch.countDown();
- waitLatch.await();
- long byteCount = 0;
- try {
- byteCount = super.fillCompactionHoplog(isMajor, targets, output, majorCSeqNum);
- } catch (ForceReattemptException e) {
- // we do not expect this in a unit test.
- }
- return byteCount;
- }
- }
- }
-
- final HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 3; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- Thread t = new Thread(new Runnable() {
- public void run() {
- try {
- organizer.getCompactor().compact(true, false);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- });
- t.start();
- compactionStartedLatch.await();
- assertEquals(3, ((HoplogCompactor)organizer.getCompactor()).maxMajorCSeqNum.get());
- waitLatch.countDown();
- t.join();
- }
-
- public void testMinorWatchesMajorsSeqNum() throws Exception {
- final CountDownLatch majorCStartedLatch = new CountDownLatch(1);
- final CountDownLatch majorCWaitLatch = new CountDownLatch(1);
-
- final CountDownLatch minorCStartedLatch = new CountDownLatch(1);
- final List<TrackedReference<Hoplog>> minorTargets = new ArrayList<TrackedReference<Hoplog>>();
-
- class MyOrganizer extends HdfsSortedOplogOrganizer {
- final HoplogCompactor compactor = new MyCompactor();
- public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
- super(region, bucketId);
- }
- public synchronized Compactor getCompactor() {
- return compactor;
- }
- class MyCompactor extends HoplogCompactor {
- @Override
- public long fillCompactionHoplog(boolean isMajor,
- List<TrackedReference<Hoplog>> targets, Hoplog output,
- int majorCSeqNum) throws IOException, InterruptedException {
- if (isMajor) {
- majorCStartedLatch.countDown();
- majorCWaitLatch.await();
- } else {
- minorCStartedLatch.countDown();
- minorTargets.addAll(targets);
- }
- long byteCount =0;
- try {
- byteCount = super.fillCompactionHoplog(isMajor, targets, output, majorCSeqNum);
- } catch (ForceReattemptException e) {
- // we do not expect this in a unit test.
- }
- return byteCount;
- }
- }
- }
-
- final HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 3; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- Thread majorCThread = new Thread(new Runnable() {
- public void run() {
- try {
- organizer.getCompactor().compact(true, false);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- });
- majorCThread.start();
- majorCStartedLatch.await();
- assertEquals(3, ((HoplogCompactor)organizer.getCompactor()).maxMajorCSeqNum.get());
-
- // create more files for minor C
- for (int i = 0; i < 4; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- Thread minorCThread = new Thread(new Runnable() {
- public void run() {
- try {
- organizer.getCompactor().compact(false, false);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- });
- minorCThread.start();
- minorCThread.join();
- assertEquals(4, minorTargets.size());
- for (TrackedReference<Hoplog> ref : minorTargets) {
- assertTrue(organizer.getSequenceNumber(ref.get()) >= 4);
- }
-
- majorCWaitLatch.countDown();
- majorCThread.join();
- }
-
- public void testTimeBoundedSuspend() throws Exception {
- final AtomicBoolean barrier = new AtomicBoolean(true);
-
- class MyOrganizer extends HdfsSortedOplogOrganizer {
- public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
- super(region, bucketId);
- }
- public synchronized Compactor getCompactor() {
- return new MyCompactor();
- }
- class MyCompactor extends HoplogCompactor {
- public long fillCompactionHoplog(boolean isMajor, List<TrackedReference<Hoplog>> targets, Hoplog output)
- throws IOException, InterruptedException {
- barrier.set(false);
- TimeUnit.SECONDS.sleep(5 * HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
- long byteCount =0;
- try {
- byteCount = super.fillCompactionHoplog(isMajor, targets, output, -1);
- } catch (ForceReattemptException e) {
- // we do not expect this in a unit test.
- }
- return byteCount;
- }
- }
- }
-
- HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < 4; i++) {
- items.clear();
- items.add(new TestEvent("1" + i, "1" + i));
- organizer.flush(items.iterator(), items.size());
- }
-
- final HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
- ExecutorService service = Executors.newCachedThreadPool();
- service.execute(new Runnable() {
- public void run() {
- try {
- compactor.compact(false, false);
- } catch (Exception e) {
- }
- }
- });
-
- final AtomicLong start = new AtomicLong(0);
- final AtomicLong end = new AtomicLong(0);
- service.execute(new Runnable() {
- public void run() {
- while (barrier.get()) {
- try {
- TimeUnit.MILLISECONDS.sleep(50);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
- start.set(System.currentTimeMillis());
- compactor.suspend();
- end.set(System.currentTimeMillis());
- }
- });
-
- for (long i = 0; i < 5; i++) {
- if (end.get() == 0) {
- TimeUnit.MILLISECONDS.sleep(HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT / 2);
- } else {
- break;
- }
- }
-
- assertTrue(end.get() - start.get() < 100 + HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
- }
-
- public static class TestHoplog extends AbstractHoplog {
- long size;
- long creationTime;
- TestHoplog(HDFSStoreImpl store, long size) throws IOException {
- this(store, size, 0);
- }
-
- TestHoplog(HDFSStoreImpl store, long size, long creationTime) throws IOException {
- super(store, new Path("1-" + creationTime + "-1.hop"), null);
- this.size = size;
- this.creationTime = creationTime;
- }
-
- @Override
- public long getSize() {
- return size;
- }
- @Override
- public long getModificationTimeStamp() {
- if (creationTime > 0) {
- return creationTime;
- }
- return super.getModificationTimeStamp();
- }
- @Override
- public String toString() {
- long name = size - TEN_MB;
- if (name < 0) name = size - (TEN_MB / 1024);
- return name + "";
- }
- public boolean isClosed() {
- return false;
- }
- public void close() throws IOException {
- }
- public HoplogReader getReader() throws IOException {
- return null;
- }
- public HoplogWriter createWriter(int keys) throws IOException {
- return null;
- }
- public void close(boolean clearCache) throws IOException {
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
deleted file mode 100644
index fe15305..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-
-import junit.framework.TestCase;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class GFKeyJUnitTest extends TestCase {
- public void testSerde() throws Exception {
- String str = "str";
- GFKey key = new GFKey();
- key.setKey(str);
-
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(baos);
- key.write(dos);
-
- ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
- DataInputStream dis = new DataInputStream(bais);
- key.readFields(dis);
-
- assertEquals(str, key.getKey());
- }
-
- public void testCompare() {
- GFKey keya = new GFKey();
- keya.setKey("a");
-
- GFKey keyb = new GFKey();
- keyb.setKey("b");
-
- assertEquals(-1, keya.compareTo(keyb));
- assertEquals(1, keyb.compareTo(keya));
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
deleted file mode 100644
index 5ebb00e..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSSplitIteratorJUnitTest extends BaseHoplogTestCase {
- public void test1Hop1BlockIter() throws Exception {
- Path path = new Path(testDataDir, "region/0/1-1-1.hop");
- Hoplog oplog = new HFileSortedOplog(hdfsStore, path, blockCache, stats,
- storeStats);
- createHoplog(10, oplog);
-
- Path[] paths = {path};
- long[] starts = {0};
- long[] lengths = {oplog.getSize()};
- HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-
- int count = 0;
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- assertEquals("key-" + count, new String((byte[])iter.getKey()));
- count++;
- }
- assertEquals(10, count);
- }
-
- public void test1HopNBlockIter() throws Exception {
- Path path = new Path(testDataDir, "region/0/1-1-1.hop");
- Hoplog oplog = new HFileSortedOplog(hdfsStore, path,
- blockCache, stats, storeStats);
- createHoplog(2000, oplog);
-
- FileSystem fs = hdfsStore.getFileSystem();
- Reader reader = HFile.createReader(fs, path, new CacheConfig(fs.getConf()));
- BlockIndexReader bir = reader.getDataBlockIndexReader();
- int blockCount = bir.getRootBlockCount();
- reader.close();
-
- // make sure there are more than 1 hfile blocks in the hoplog
- assertTrue(1 < blockCount);
-
- Path[] paths = {path};
- long half = oplog.getSize()/2;
- long[] starts = {0};
- long[] lengths = {half};
- HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-
- int count = 0;
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
- count++;
- }
- // the number of iterations should be less than number of keys inserted in
- // the hoplog
- assertTrue(count < 2000 && count > 0);
-
- paths = new Path[] {path};
- starts = new long[] {half + 1};
- lengths = new long[] {oplog.getSize()};
- iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
- count++;
- }
- assertEquals(2000, count);
-
- paths = new Path[] {path, path};
- starts = new long[] {0, half + 1};
- lengths = new long[] {half, oplog.getSize()};
- iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-
- count = 0;
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
- count++;
- }
- assertEquals(2000, count);
- }
-
- /*
- * This tests iterates over 3 hoplog files. The three hoplog files have the
- * same content. Duplicate keys should not get discarded
- */
- public void testNHoplogNBlockIter() throws Exception {
- Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
- Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
- blockCache, stats, storeStats);
- createHoplog(2000, oplog);
-
- FileSystem fs = hdfsStore.getFileSystem();
- Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
- BlockIndexReader bir = reader.getDataBlockIndexReader();
- int blockCount = bir.getRootBlockCount();
- reader.close();
-
- // make sure there are more than 1 hfile blocks in the hoplog
- assertTrue(1 < blockCount);
-
- Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
- oplog = new HFileSortedOplog(hdfsStore, path2,
- blockCache, stats, storeStats);
- createHoplog(2000, oplog);
-
- Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
- oplog = new HFileSortedOplog(hdfsStore, path3,
- blockCache, stats, storeStats);
- createHoplog(2000, oplog);
-
- Path[] paths = {path1, path2, path3, path1, path2, path3};
- long half = oplog.getSize()/2;
- long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
- long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
- HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-
- int[] keyCounts = new int[2000];
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- String key = new String((byte[])iter.getKey()).substring("key-".length());
- keyCounts[Integer.valueOf(key) - 100000] ++;
- }
-
- for (int i : keyCounts) {
- assertEquals(3, i);
- }
- }
-
- public void testMRLikeNHopIter() throws Exception {
- Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
- Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
- oplog = new HFileSortedOplog(hdfsStore, path2,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
- oplog = new HFileSortedOplog(hdfsStore, path3,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path[] paths = {path1, path2, path3};
- long[] starts = {0, 0, 0};
- long[] lengths = {oplog.getSize(), oplog.getSize(), oplog.getSize()};
- HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-
- int[] keyCounts = new int[10];
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- // extra has next before key read
- iter.hasNext();
- String key = new String((byte[])iter.getKey()).substring("key-".length());
- System.out.println(key);
- keyCounts[Integer.valueOf(key)] ++;
- }
-
- for (int i : keyCounts) {
- assertEquals(3, i);
- }
- }
-
- public void test1Hop1BlockIterSkipDeletedHoplogs() throws Exception {
- FileSystem fs = hdfsStore.getFileSystem();
- Path path = new Path(testDataDir, "region/0/1-1-1.hop");
- Hoplog oplog = new HFileSortedOplog(hdfsStore, path,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path[] paths = {path};
- long[] starts = {0};
- long[] lengths = {oplog.getSize()};
-
- //Delete the Hoplog file
- fs.delete(path, true);
-
- HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
- assertFalse(iter.hasNext());
-
- }
-
- public void testMRLikeNHopIterSkipDeletedHoplogs() throws Exception {
- FileSystem fs = hdfsStore.getFileSystem();
- //Create Hoplogs
- Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
- Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
- oplog = new HFileSortedOplog(hdfsStore, path2,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
- oplog = new HFileSortedOplog(hdfsStore, path3,
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
-
- Path[] paths = {path1, path2, path3};
- long[] starts = {0, 0, 0};
- long[] lengths = {oplog.getSize(), oplog.getSize(), oplog.getSize()};
- HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
- int count = 0;
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- count++;
- }
- assertEquals(30, count);
-
- for(int i = 0; i < 3; ++i){
- fs.delete(paths[i], true);
- iter = HDFSSplitIterator.newInstance(
- hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
- count = 0;
- while (iter.hasNext()) {
- boolean success = iter.next();
- assertTrue(success);
- count++;
- }
- assertEquals(20, count);
- oplog = new HFileSortedOplog(hdfsStore, paths[i],
- blockCache, stats, storeStats);
- createHoplog(10, oplog);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
deleted file mode 100644
index a209b6e..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HoplogUtilJUnitTest extends BaseHoplogTestCase {
- Path regionPath = null;
-
- @Override
- protected void configureHdfsStoreFactory() throws Exception {
- super.configureHdfsStoreFactory();
-
- hsf.setInputFileCountMin(3);
- hsf.setMinorCompaction(false);
- hsf.setMajorCompaction(false);
- }
-
- public void testHoplogListingMultiBucket() throws Exception {
- createHoplogs();
-
- Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- assertEquals(5, hdfsStore.getFileSystem().listStatus(regionPath).length);
- assertEquals(15, hoplogs.size());
- }
-
- public void testHoplogListingMixFileTypes() throws Exception {
- createHoplogs();
-
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
- organizer.getCompactor().compact(false, false);
-
- Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- assertEquals(7,
- hdfsStore.getFileSystem().listStatus(new Path(regionPath, "0")).length);
- assertEquals(15, hoplogs.size());
- }
-
- public void testHoplogListingEmptyBucket() throws Exception {
- createHoplogs();
-
- hdfsStore.getFileSystem().mkdirs(new Path(regionPath, "100"));
-
- Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- assertEquals(6, hdfsStore.getFileSystem().listStatus(regionPath).length);
- assertEquals(15, hoplogs.size());
- }
-
- public void testHoplogListingInvalidBucket() throws Exception {
- createHoplogs();
-
- hdfsStore.getFileSystem().rename(new Path(regionPath, "0"),
- new Path(regionPath, "not_a_bucket"));
-
- Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- assertEquals(5, hdfsStore.getFileSystem().listStatus(regionPath).length);
- assertEquals(12, hoplogs.size());
- }
-
- public void testHoplogListingInvalidFiles() throws Exception {
- createHoplogs();
-
- Path bucketPath = new Path(regionPath, "0");
- FSDataOutputStream stream = hdfsStore.getFileSystem().create(
- new Path(bucketPath, "not_a_hoplog"));
- stream.close();
-
- Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
- assertEquals(4, hdfsStore.getFileSystem().listStatus(bucketPath).length);
- assertEquals(15, hoplogs.size());
- }
-
- public void testTimeRange() throws Exception {
- createHoplogs();
- // rename hoplogs for testing purpose
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
- regionManager, 0);
- List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
- assertEquals(3, hoplogs.size());
- hoplogs.get(0).get().rename("0-300-1.hop");
- hoplogs.get(1).get().rename("0-310-1.hop");
- hoplogs.get(2).get().rename("0-320-1.hop");
- organizer.close();
-
- organizer = new HdfsSortedOplogOrganizer(regionManager, 3);
- hoplogs = organizer.getSortedOplogs();
- assertEquals(3, hoplogs.size());
- hoplogs.get(0).get().rename("0-600-1.hop");
- hoplogs.get(1).get().rename("0-610-1.hop");
- hoplogs.get(2).get().rename("0-620-1.hop");
- organizer.close();
-
- organizer = new HdfsSortedOplogOrganizer(regionManager, 6);
- hoplogs = organizer.getSortedOplogs();
- assertEquals(3, hoplogs.size());
- hoplogs.get(0).get().rename("0-100-1.hop");
- hoplogs.get(1).get().rename("0-110-1.hop");
- hoplogs.get(2).get().rename("0-120-1.hop");
-
- Collection<FileStatus> filtered = HoplogUtil.getRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 300, 305);
- assertEquals(5, filtered.size());
- assertTrue(containsHoplogWithName(filtered, "0-300-1.hop"));
- assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
- assertTrue(containsHoplogWithName(filtered, "0-600-1.hop"));
-
- filtered = HoplogUtil.getRegionHoplogs(regionPath,
- hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 250, 310);
- assertEquals(6, filtered.size());
- assertTrue(containsHoplogWithName(filtered, "0-300-1.hop"));
- assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
- assertTrue(containsHoplogWithName(filtered, "0-320-1.hop"));
-
- filtered = HoplogUtil.getRegionHoplogs(regionPath,
- hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 301, 311);
- assertEquals(5, filtered.size());
- assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
- assertTrue(containsHoplogWithName(filtered, "0-320-1.hop"));
-
- filtered = HoplogUtil.getRegionHoplogs(regionPath,
- hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 301, 309);
- assertEquals(4, filtered.size());
- assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
- organizer.close();
- }
-
- public void testExcludeSoonCleanedHoplogs() throws Exception {
- FileSystem fs = hdfsStore.getFileSystem();
- Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
- regionManager, 0);
- //delete the auto generated clean up interval file
- if (fs.exists(cleanUpIntervalPath)){
- fs.delete(cleanUpIntervalPath, true);
- }
-
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- int count = 10;
- for (int fileCount = 0; fileCount < 3; fileCount++) {
- items.clear();
- for (int itemCount = 0; itemCount < count; itemCount++) {
- items.add(new TestEvent(("key-" + itemCount), "value"));
- }
- organizer.flush(items.iterator(), count);
- }
- List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-
- for(TrackedReference<Hoplog> hoplog : hoplogs) {
- Path p = new Path(testDataDir, getName() + "/0/" +
- hoplog.get().getFileName() + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
- fs.createNewFile(p);
- }
- Collection<FileStatus> files = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(3, files.size());
-
- TimeUnit.MINUTES.sleep(2);
- //No clean up interval file, all expired files will be included
- files = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(3, files.size());
-
-
- long interval = 1 * 60 * 1000;
- HoplogUtil.exposeCleanupIntervalMillis(fs,cleanUpIntervalPath,interval);
-
- files = HoplogUtil.getAllRegionHoplogs(
- regionPath, hdfsStore.getFileSystem(),
- AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
- assertEquals(0, files.size());
- organizer.close();
- }
-
-
- public void testCheckpointSelection() throws Exception {
- createHoplogs();
- // rename hoplogs for testing purpose
- HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
- regionManager, 0);
- List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
- assertEquals(3, hoplogs.size());
- hoplogs.get(0).get().rename("0-300-1.chop");
- hoplogs.get(1).get().rename("0-310-1.hop");
- hoplogs.get(2).get().rename("0-320-1.hop"); // checkpoint file
- organizer.close();
-
- organizer = new HdfsSortedOplogOrganizer(regionManager, 3);
- hoplogs = organizer.getSortedOplogs();
- assertEquals(3, hoplogs.size());
- hoplogs.get(0).get().rename("0-600-1.hop");
- hoplogs.get(1).get().rename("0-610-1.chop");
- hoplogs.get(2).get().rename("0-620-1.hop");
- organizer.close();
-
- organizer = new HdfsSortedOplogOrganizer(regionManager, 6);
- hoplogs = organizer.getSortedOplogs();
- assertEquals(3, hoplogs.size());
- hoplogs.get(0).get().rename("0-100-1.hop");
- hoplogs.get(1).get().rename("0-110-1.hop");
- hoplogs.get(2).get().rename("0-120-1.chop");
-
- Collection<FileStatus> filtered = HoplogUtil.filterHoplogs(
- hdfsStore.getFileSystem(), regionPath, 290, 305, false);
- assertEquals(4, filtered.size());
- assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
- assertTrue(containsHoplogWithName(filtered, "0-600-1.hop"));
-
- filtered = HoplogUtil.filterHoplogs(hdfsStore.getFileSystem(),
- regionPath, 290, 305, true);
- assertEquals(3, filtered.size());
- assertTrue(containsHoplogWithName(filtered, "0-300-1.chop"));
- assertTrue(containsHoplogWithName(filtered, "0-610-1.chop"));
- assertTrue(containsHoplogWithName(filtered, "0-120-1.chop"));
- organizer.close();
- }
-
- private boolean containsHoplogWithName(Collection<FileStatus> filtered,
- String name) {
- for (FileStatus file : filtered) {
- if (file.getPath().getName().equals(name)) {
- return true;
- }
- }
- return false;
- }
-
- private void createHoplogs() throws IOException, Exception {
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- int count = 10;
- for (int bucketId = 0; bucketId < 15; bucketId += 3) {
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager,
- bucketId);
- for (int fileCount = 0; fileCount < 3; fileCount++) {
- items.clear();
- for (int itemCount = 0; itemCount < count; itemCount++) {
- items.add(new TestEvent(("key-" + itemCount), "value"));
- }
- organizer.flush(items.iterator(), count);
- }
- }
- }
-
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- regionPath = new Path(testDataDir, getName());
- }
-
- @Override
- protected void tearDown() throws Exception{
- FileSystem fs = hdfsStore.getFileSystem();
- Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(),HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
- if (fs.exists(cleanUpIntervalPath)){
- fs.delete(cleanUpIntervalPath, true);
- }
- super.tearDown();
- }
-}
[15/15] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit
and Dunits
Posted by as...@apache.org.
GEODE-429: Remove HdfsStore Junit and Dunits
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/74c3156a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/74c3156a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/74c3156a
Branch: refs/heads/feature/GEODE-409
Commit: 74c3156aaa0d29ccc4ec0b4c9a53659d2c9eb003
Parents: 1b4fd2f
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:58:00 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700
----------------------------------------------------------------------
.../ColocatedRegionWithHDFSDUnitTest.java | 189 ----
.../hdfs/internal/HDFSEntriesSetJUnitTest.java | 228 ----
.../internal/HdfsStoreMutatorJUnitTest.java | 191 ----
.../hdfs/internal/RegionWithHDFSTestBase.java | 715 ------------
.../internal/hoplog/BaseHoplogTestCase.java | 389 -------
.../hoplog/CardinalityEstimatorJUnitTest.java | 188 ----
.../hoplog/HDFSCacheLoaderJUnitTest.java | 106 --
.../hoplog/HDFSCompactionManagerJUnitTest.java | 449 --------
.../hoplog/HDFSRegionDirectorJUnitTest.java | 97 --
.../internal/hoplog/HDFSStatsJUnitTest.java | 250 -----
.../HDFSUnsortedHoplogOrganizerJUnitTest.java | 297 -----
.../HdfsSortedOplogOrganizerJUnitTest.java | 1045 ------------------
.../hoplog/HfileSortedOplogJUnitTest.java | 540 ---------
.../hoplog/SortedOplogListIterJUnitTest.java | 178 ---
.../hoplog/TieredCompactionJUnitTest.java | 904 ---------------
.../hoplog/mapreduce/GFKeyJUnitTest.java | 50 -
.../mapreduce/HDFSSplitIteratorJUnitTest.java | 265 -----
.../hoplog/mapreduce/HoplogUtilJUnitTest.java | 305 -----
18 files changed, 6386 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
deleted file mode 100644
index 44206dc..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-
-import dunit.AsyncInvocation;
-import dunit.SerializableCallable;
-import dunit.VM;
-
-/**
- * A class for testing the basic HDFS functionality
- *
- * @author Hemant Bhanawat
- */
-@SuppressWarnings({"serial", "rawtypes", "unchecked", "deprecation"})
-public class ColocatedRegionWithHDFSDUnitTest extends RegionWithHDFSTestBase {
-
- public ColocatedRegionWithHDFSDUnitTest(String name) {
- super(name);
- }
-
- @Override
- protected SerializableCallable getCreateRegionCallable(
- final int totalnumOfBuckets, final int batchSizeMB,
- final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval,
- final boolean queuePersistent, final boolean writeonly,
- final long timeForRollover, final long maxFileSize) {
- SerializableCallable createRegion = new SerializableCallable() {
- public Object call() throws Exception {
- HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
- hsf.setBatchSize(batchSizeMB);
- hsf.setBufferPersistent(queuePersistent);
- hsf.setMaxMemory(3);
- hsf.setBatchInterval(batchInterval);
- hsf.setHomeDir(tmpDir + "/" + folderPath);
- homeDir = new File(tmpDir + "/" + folderPath).getCanonicalPath();
- hsf.setHomeDir(homeDir);
- hsf.create(uniqueName);
-
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
-
- af.setHDFSStoreName(uniqueName);
- af.setPartitionAttributes(paf.create());
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(
- maximumEntries, EvictionAction.LOCAL_DESTROY));
-
- af.setHDFSWriteOnly(writeonly);
- Region r1 = createRootRegion(uniqueName + "-r1", af.create());
-
- paf.setColocatedWith(uniqueName + "-r1");
- af.setPartitionAttributes(paf.create());
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(
- maximumEntries, EvictionAction.LOCAL_DESTROY));
- Region r2 = createRootRegion(uniqueName + "-r2", af.create());
-
- ((LocalRegion) r1).setIsTest();
- ((LocalRegion) r2).setIsTest();
-
- return 0;
- }
- };
- return createRegion;
- }
-
- @Override
- protected void doPuts(String uniqueName, int start, int end) {
- Region r1 = getRootRegion(uniqueName + "-r1");
- Region r2 = getRootRegion(uniqueName + "-r2");
-
- for (int i = start; i < end; i++) {
- r1.put("K" + i, "V" + i);
- r2.put("K" + i, "V" + i);
- }
- }
-
- protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
- final int start, final int end, final String suffix) throws Exception {
- return vm.invokeAsync(new SerializableCallable() {
- public Object call() throws Exception {
- Region r1 = getRootRegion(regionName + "-r1");
- Region r2 = getRootRegion(regionName + "-r2");
-
- getCache().getLogger().info("Putting entries ");
- for (int i = start; i < end; i++) {
- r1.put("K" + i, "V" + i + suffix);
- r2.put("K" + i, "V" + i + suffix);
- }
- return null;
- }
-
- });
- }
-
- protected void doPutAll(final String uniqueName, Map map) {
- Region r1 = getRootRegion(uniqueName + "-r1");
- Region r2 = getRootRegion(uniqueName + "-r2");
- r1.putAll(map);
- r2.putAll(map);
- }
-
- @Override
- protected void doDestroys(String uniqueName, int start, int end) {
- Region r1 = getRootRegion(uniqueName + "-r1");
- Region r2 = getRootRegion(uniqueName + "-r2");
-
- for (int i = start; i < end; i++) {
- r1.destroy("K" + i);
- r2.destroy("K" + i);
- }
- }
-
- @Override
- protected void checkWithGet(String uniqueName, int start, int end,
- boolean expectValue) {
- Region r1 = getRootRegion(uniqueName + "-r1");
- Region r2 = getRootRegion(uniqueName + "-r2");
- for (int i = start; i < end; i++) {
- String expected = expectValue ? "V" + i : null;
- assertEquals("Mismatch on key " + i, expected, r1.get("K" + i));
- assertEquals("Mismatch on key " + i, expected, r2.get("K" + i));
- }
- }
-
- protected void checkWithGetAll(String uniqueName, ArrayList arrayl) {
- Region r1 = getRootRegion(uniqueName + "-r1");
- Region r2 = getRootRegion(uniqueName + "-r2");
- Map map1 = r1.getAll(arrayl);
- Map map2 = r2.getAll(arrayl);
- for (Object e : map1.keySet()) {
- String v = e.toString().replaceFirst("K", "V");
- assertTrue("Reading entries failed for key " + e + " where value = "
- + map1.get(e), v.equals(map1.get(e)));
- assertTrue("Reading entries failed for key " + e + " where value = "
- + map2.get(e), v.equals(map2.get(e)));
- }
- }
-
- @Override
- protected void verifyHDFSData(VM vm, String uniqueName) throws Exception {
- HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(
- vm, uniqueName, uniqueName + "-r1");
- HashMap<String, String> entriesMap = new HashMap<String, String>();
- for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
- .entrySet()) {
- entriesMap.putAll(e.getValue());
- }
-
- verifyInEntriesMap(entriesMap, 1, 50, "vm0");
- verifyInEntriesMap(entriesMap, 40, 100, "vm1");
- verifyInEntriesMap(entriesMap, 40, 100, "vm2");
- verifyInEntriesMap(entriesMap, 90, 150, "vm3");
-
- filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName
- + "-r2");
- entriesMap = new HashMap<String, String>();
- for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
- .entrySet()) {
- entriesMap.putAll(e.getValue());
- }
-
- verifyInEntriesMap(entriesMap, 1, 50, "vm0");
- verifyInEntriesMap(entriesMap, 40, 100, "vm1");
- verifyInEntriesMap(entriesMap, 40, 100, "vm2");
- verifyInEntriesMap(entriesMap, 90, 150, "vm3");
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
deleted file mode 100644
index 3085a66..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.internal.ParallelAsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedListForAsyncQueueJUnitTest.KeyValue;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.CachedDeserializable;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
-import com.gemstone.gemfire.internal.cache.EventID;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@SuppressWarnings("rawtypes")
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSEntriesSetJUnitTest extends TestCase {
- private GemFireCacheImpl cache;
- private HDFSStoreImpl store;
- private PartitionedRegion region;
- private BucketRegion bucket;
- private HDFSParallelGatewaySenderQueue queue;
-
- private HDFSBucketRegionQueue brq;
- private HoplogOrganizer hdfs;
-
- public void setUp() throws Exception {
- System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
- cache = (GemFireCacheImpl) new CacheFactory()
- .set("mcast-port", "0")
- .set("log-level", "info")
- .create();
-
- HDFSStoreFactory hsf = this.cache.createHDFSStoreFactory();
- hsf.setHomeDir("hoplogs");
- store = (HDFSStoreImpl) hsf.create("test");
-
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(1);
-
- RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
-// rf.setHDFSStoreName("test");
- region = (PartitionedRegion) rf.setPartitionAttributes(paf.create()).create("test");
-
- // prime the region so buckets get created
- region.put("test", "test");
- GatewaySenderAttributes g = new GatewaySenderAttributes();
- g.isHDFSQueue = true;
- g.id = "HDFSEntriesSetJUnitTest_Queue";
- ParallelAsyncEventQueueImpl gatewaySender = new ParallelAsyncEventQueueImpl(cache, g);
- Set<Region> set = new HashSet<Region>();
- set.add(region);
-
- queue = new HDFSParallelGatewaySenderQueue(gatewaySender, set, 0, 1);
- brq = (HDFSBucketRegionQueue)((PartitionedRegion) queue.getRegion()).getDataStore().getLocalBucketById(0);
- bucket = region.getDataStore().getLocalBucketById(0);
-
- HdfsRegionManager mgr = HDFSRegionDirector.getInstance().manageRegion(region, "test", null);
- hdfs = mgr.<SortedHoplogPersistedEvent>create(0);
- AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
- }
-
- public void tearDown() throws Exception {
- store.getFileSystem().delete(new Path("hoplogs"), true);
- hdfs.close();
-
- cache.close();
- }
-
- public void testEmptyIterator() throws Exception {
- checkIteration(Collections.<String>emptyList(), new KeyValue[] { }, new KeyValue[] { });
- }
-
- public void testQueueOnlyIterator() throws Exception {
- KeyValue[] qvals = new KeyValue[] {
- new KeyValue("K0", "0"),
- new KeyValue("K1", "1"),
- new KeyValue("K2", "2"),
- new KeyValue("K3", "3"),
- new KeyValue("K4", "4")
- };
- checkIteration(getExpected(), qvals, new KeyValue[] { });
- }
-
- public void testHdfsOnlyIterator() throws Exception {
- KeyValue[] hvals = new KeyValue[] {
- new KeyValue("K0", "0"),
- new KeyValue("K1", "1"),
- new KeyValue("K2", "2"),
- new KeyValue("K3", "3"),
- new KeyValue("K4", "4")
- };
- checkIteration(getExpected(), new KeyValue[] { }, hvals);
- }
-
- public void testUnevenIterator() throws Exception {
- KeyValue[] qvals = new KeyValue[] {
- new KeyValue("K0", "0"),
- new KeyValue("K2", "2"),
- };
-
- KeyValue[] hvals = new KeyValue[] {
- new KeyValue("K1", "1"),
- new KeyValue("K3", "3"),
- new KeyValue("K4", "4")
- };
-
- checkIteration(getExpected(), qvals, hvals);
- }
-
- public void testEitherOrIterator() throws Exception {
- KeyValue[] qvals = new KeyValue[] {
- new KeyValue("K0", "0"),
- new KeyValue("K2", "2"),
- new KeyValue("K4", "4")
- };
-
- KeyValue[] hvals = new KeyValue[] {
- new KeyValue("K1", "1"),
- new KeyValue("K3", "3")
- };
-
- checkIteration(getExpected(), qvals, hvals);
- }
-
- public void testDuplicateIterator() throws Exception {
- KeyValue[] qvals = new KeyValue[] {
- new KeyValue("K0", "0"),
- new KeyValue("K1", "1"),
- new KeyValue("K2", "2"),
- new KeyValue("K3", "3"),
- new KeyValue("K4", "4"),
- new KeyValue("K4", "4")
- };
-
- KeyValue[] hvals = new KeyValue[] {
- new KeyValue("K0", "0"),
- new KeyValue("K1", "1"),
- new KeyValue("K2", "2"),
- new KeyValue("K3", "3"),
- new KeyValue("K4", "4"),
- new KeyValue("K4", "4")
- };
-
- checkIteration(getExpected(), qvals, hvals);
- }
-
- private List<String> getExpected() {
- List<String> expected = new ArrayList<String>();
- expected.add("0");
- expected.add("1");
- expected.add("2");
- expected.add("3");
- expected.add("4");
- return expected;
- }
-
- private void checkIteration(List<String> expected, KeyValue[] qvals, KeyValue[] hvals)
- throws Exception {
- int seq = 0;
- List<PersistedEventImpl> evts = new ArrayList<PersistedEventImpl>();
- for (KeyValue kv : hvals) {
- evts.add(new SortedHDFSQueuePersistedEvent(getNewEvent(kv.key, kv.value, seq++)));
- }
- hdfs.flush(evts.iterator(), evts.size());
-
- for (KeyValue kv : qvals) {
- queue.put(getNewEvent(kv.key, kv.value, seq++));
- }
-
- List<String> actual = new ArrayList<String>();
- Iterator vals = new HDFSEntriesSet(bucket, brq, hdfs, IteratorType.VALUES, null).iterator();
- while (vals.hasNext()) {
- Object val = vals.next();
- if(val instanceof CachedDeserializable) {
- val = ((CachedDeserializable) val).getDeserializedForReading();
- }
- actual.add((String) val);
- }
-
- assertEquals(expected, actual);
- }
-
- private HDFSGatewayEventImpl getNewEvent(Object key, Object value, long seq) throws Exception {
- EntryEventImpl evt = EntryEventImpl.create(region, Operation.CREATE,
- key, value, null, false, (DistributedMember) cache.getMyId());
-
- evt.setEventId(new EventID(cache.getDistributedSystem()));
- HDFSGatewayEventImpl event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, evt, null, true, 0);
- event.setShadowKey(seq);
-
- return event;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
deleted file mode 100644
index b8cbb0d..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HdfsStoreMutatorJUnitTest extends BaseHoplogTestCase {
- public void testMutatorInitialState() {
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
- assertEquals(-1, mutator.getWriteOnlyFileRolloverInterval());
- assertEquals(-1, mutator.getWriteOnlyFileRolloverSize());
-
- assertEquals(-1, mutator.getInputFileCountMax());
- assertEquals(-1, mutator.getInputFileSizeMax());
- assertEquals(-1, mutator.getInputFileCountMin());
- assertEquals(-1, mutator.getMinorCompactionThreads());
- assertNull(mutator.getMinorCompaction());
-
- assertEquals(-1, mutator.getMajorCompactionInterval());
- assertEquals(-1, mutator.getMajorCompactionThreads());
- assertNull(mutator.getMajorCompaction());
-
- assertEquals(-1, mutator.getPurgeInterval());
-
- assertEquals(-1, mutator.getBatchSize());
- assertEquals(-1, mutator.getBatchInterval());
- }
-
- public void testMutatorSetInvalidValue() {
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-
- try {
- mutator.setWriteOnlyFileRolloverInterval(-3);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setWriteOnlyFileRolloverSize(-5);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
-
- try {
- mutator.setInputFileCountMin(-1);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setInputFileCountMax(-1);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setInputFileSizeMax(-1);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setMinorCompactionThreads(-9);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setMajorCompactionInterval(-6);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setMajorCompactionThreads(-1);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- mutator.setPurgeInterval(-4);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
-/* try {
- qMutator.setBatchSizeMB(-985);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- try {
- qMutator.setBatchTimeInterval(-695);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
-*/
- try {
- mutator.setInputFileCountMin(10);
- mutator.setInputFileCountMax(5);
- hdfsStore.alter(mutator);
- fail();
- } catch (IllegalArgumentException e) {
- // expected
- }
- }
-
- public void testMutatorReturnsUpdatedValues() {
- HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-
- mutator.setWriteOnlyFileRolloverInterval(121);
- mutator.setWriteOnlyFileRolloverSize(234);
-
- mutator.setInputFileCountMax(87);
- mutator.setInputFileSizeMax(45);
- mutator.setInputFileCountMin(34);
- mutator.setMinorCompactionThreads(843);
- mutator.setMinorCompaction(false);
-
- mutator.setMajorCompactionInterval(26);
- mutator.setMajorCompactionThreads(92);
- mutator.setMajorCompaction(false);
-
- mutator.setPurgeInterval(328);
-
- mutator.setBatchSize(985);
- mutator.setBatchInterval(695);
-
- assertEquals(121, mutator.getWriteOnlyFileRolloverInterval());
- assertEquals(234, mutator.getWriteOnlyFileRolloverSize());
-
- assertEquals(87, mutator.getInputFileCountMax());
- assertEquals(45, mutator.getInputFileSizeMax());
- assertEquals(34, mutator.getInputFileCountMin());
- assertEquals(843, mutator.getMinorCompactionThreads());
- assertFalse(mutator.getMinorCompaction());
-
- assertEquals(26, mutator.getMajorCompactionInterval());
- assertEquals(92, mutator.getMajorCompactionThreads());
- assertFalse(mutator.getMajorCompaction());
-
- assertEquals(328, mutator.getPurgeInterval());
-
- assertEquals(985, mutator.getBatchSize());
- assertEquals(695, mutator.getBatchInterval());
-
- // repeat the cycle once more
- mutator.setWriteOnlyFileRolloverInterval(14);
- mutator.setWriteOnlyFileRolloverSize(56);
-
- mutator.setInputFileCountMax(93);
- mutator.setInputFileSizeMax(85);
- mutator.setInputFileCountMin(64);
- mutator.setMinorCompactionThreads(59);
- mutator.setMinorCompaction(true);
-
- mutator.setMajorCompactionInterval(26);
- mutator.setMajorCompactionThreads(92);
- mutator.setMajorCompaction(false);
-
- mutator.setPurgeInterval(328);
-
- assertEquals(14, mutator.getWriteOnlyFileRolloverInterval());
- assertEquals(56, mutator.getWriteOnlyFileRolloverSize());
-
- assertEquals(93, mutator.getInputFileCountMax());
- assertEquals(85, mutator.getInputFileSizeMax());
- assertEquals(64, mutator.getInputFileCountMin());
- assertEquals(59, mutator.getMinorCompactionThreads());
- assertTrue(mutator.getMinorCompaction());
-
- assertEquals(26, mutator.getMajorCompactionInterval());
- assertEquals(92, mutator.getMajorCompactionThreads());
- assertFalse(mutator.getMajorCompaction());
-
- assertEquals(328, mutator.getPurgeInterval());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
deleted file mode 100644
index 3330574..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
+++ /dev/null
@@ -1,715 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog;
-import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.internal.FileUtil;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
-
-import dunit.AsyncInvocation;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.SerializableRunnable;
-import dunit.VM;
-
-@SuppressWarnings({"serial", "rawtypes", "unchecked"})
-public abstract class RegionWithHDFSTestBase extends CacheTestCase {
-
- protected String tmpDir;
-
- public static String homeDir = null;
-
- protected abstract void checkWithGetAll(String uniqueName, ArrayList arrayl);
-
- protected abstract void checkWithGet(String uniqueName, int start,
- int end, boolean expectValue);
-
- protected abstract void doDestroys(final String uniqueName, int start, int end);
-
- protected abstract void doPutAll(final String uniqueName, Map map);
-
- protected abstract void doPuts(final String uniqueName, int start, int end);
-
- protected abstract SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets, final int batchSizeMB,
- final int maximumEntries, final String folderPath, final String uniqueName, final int batchInterval, final boolean queuePersistent,
- final boolean writeonly, final long timeForRollover, final long maxFileSize);
-
- protected abstract void verifyHDFSData(VM vm, String uniqueName) throws Exception ;
-
- protected abstract AsyncInvocation doAsyncPuts(VM vm, final String regionName,
- final int start, final int end, final String suffix) throws Exception;
-
- public RegionWithHDFSTestBase(String name) {
- super(name);
- }
-
- @Override
- public void tearDown2() throws Exception {
- super.tearDown2();
- for (int h = 0; h < Host.getHostCount(); h++) {
- Host host = Host.getHost(h);
- SerializableCallable cleanUp = cleanUpStoresAndDisconnect();
- for (int v = 0; v < host.getVMCount(); v++) {
- VM vm = host.getVM(v);
- // This store will be deleted by the first VM itself. Invocations from
- // subsequent VMs will be no-op.
- vm.invoke(cleanUp);
- }
- }
- }
-
- public SerializableCallable cleanUpStoresAndDisconnect() throws Exception {
- SerializableCallable cleanUp = new SerializableCallable("cleanUpStoresAndDisconnect") {
- public Object call() throws Exception {
- disconnectFromDS();
- File file;
- if (homeDir != null) {
- file = new File(homeDir);
- FileUtil.delete(file);
- homeDir = null;
- }
- file = new File(tmpDir);
- FileUtil.delete(file);
- return 0;
- }
- };
- return cleanUp;
- }
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- tmpDir = /*System.getProperty("java.io.tmpdir") + "/" +*/ "RegionWithHDFSBasicDUnitTest_" + System.nanoTime();
- }
-
- int createServerRegion(VM vm, final int totalnumOfBuckets,
- final int batchSize, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval) {
- return createServerRegion(vm, totalnumOfBuckets,
- batchSize, maximumEntries, folderPath,
- uniqueName, batchInterval, false, false);
- }
-
- protected int createServerRegion(VM vm, final int totalnumOfBuckets,
- final int batchSizeMB, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval, final boolean writeonly,
- final boolean queuePersistent) {
- return createServerRegion(vm, totalnumOfBuckets,
- batchSizeMB, maximumEntries, folderPath,
- uniqueName, batchInterval, writeonly, queuePersistent, -1, -1);
- }
- protected int createServerRegion(VM vm, final int totalnumOfBuckets,
- final int batchSizeMB, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval, final boolean writeonly,
- final boolean queuePersistent, final long timeForRollover, final long maxFileSize) {
- SerializableCallable createRegion = getCreateRegionCallable(
- totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
- batchInterval, queuePersistent, writeonly, timeForRollover, maxFileSize);
-
- return (Integer) vm.invoke(createRegion);
- }
- protected AsyncInvocation createServerRegionAsync(VM vm, final int totalnumOfBuckets,
- final int batchSizeMB, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval, final boolean writeonly,
- final boolean queuePersistent) {
- SerializableCallable createRegion = getCreateRegionCallable(
- totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
- batchInterval, queuePersistent, writeonly, -1, -1);
-
- return vm.invokeAsync(createRegion);
- }
- protected AsyncInvocation createServerRegionAsync(VM vm, final int totalnumOfBuckets,
- final int batchSizeMB, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval, final boolean writeonly,
- final boolean queuePersistent, final long timeForRollover, final long maxFileSize) {
- SerializableCallable createRegion = getCreateRegionCallable(
- totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
- batchInterval, queuePersistent, writeonly, timeForRollover, maxFileSize);
-
- return vm.invokeAsync(createRegion);
- }
-
- /**
- * Does puts, gets, destroy and getAll. Since there are many updates
- * most of the time the data is not found in memory and queue and
- * is fetched from HDFS
- * @throws Throwable
- */
- public void testGetFromHDFS() throws Throwable {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- final String uniqueName = getName();
- final String homeDir = "../../testGetFromHDFS";
-
- createServerRegion(vm0, 7, 1, 50, homeDir, uniqueName, 50, false, true);
- createServerRegion(vm1, 7, 1, 50, homeDir, uniqueName, 50, false, true);
-
- // Do some puts
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 0, 40);
- return null;
- }
- });
-
- // Do some puts and destroys
- // some order manipulation has been done because of an issue:
- // " a higher version update on a key can be batched and
- // sent to HDFS before a lower version update on the same key
- // is batched and sent to HDFS. This will cause the latest
- // update on a key in an older file. Hence, a fetch from HDFS
- // will return an older update from a newer file."
-
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 40, 50);
- doDestroys(uniqueName, 40, 50);
- doPuts(uniqueName, 50, 100);
- doPuts(uniqueName, 30, 40);
- return null;
- }
- });
-
- // do some more puts and destroy
- // some order manipulation has been done because of an issue:
- // " a higher version update on a key can be batched and
- // sent to HDFS before a lower version update on the same key
- // is batched and sent to HDFS. This will cause the latest
- // update on a key in an older file. Hence, a fetch from HDFS
- // will return an older update from a newer file."
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 80, 90);
- doDestroys(uniqueName, 80, 90);
- doPuts(uniqueName, 110, 200);
- doPuts(uniqueName, 90, 110);
- return null;
- }
-
- });
-
- // get and getall the values and compare them.
- SerializableCallable checkData = new SerializableCallable() {
- public Object call() throws Exception {
- checkWithGet(uniqueName, 0, 40, true);
- checkWithGet(uniqueName, 40, 50, false);
- checkWithGet(uniqueName, 50, 80, true);
- checkWithGet(uniqueName, 80, 90, false);
- checkWithGet(uniqueName, 90, 200, true);
- checkWithGet(uniqueName, 200, 201, false);
-
- ArrayList arrayl = new ArrayList();
- for (int i =0; i< 200; i++) {
- String k = "K" + i;
- if ( !((40 <= i && i < 50) || (80 <= i && i < 90)))
- arrayl.add(k);
- }
- checkWithGetAll(uniqueName, arrayl);
-
- return null;
- }
- };
- vm1.invoke(checkData);
-
- //Restart the members and verify that we can still get the data
- closeCache(vm0);
- closeCache(vm1);
- AsyncInvocation async0 = createServerRegionAsync(vm0, 7, 1, 50, homeDir, uniqueName, 50, false, true);
- AsyncInvocation async1 = createServerRegionAsync(vm1, 7, 1, 50, homeDir, uniqueName, 50, false, true);
-
- async0.getResult();
- async1.getResult();
-
-
- // get and getall the values and compare them.
- vm1.invoke(checkData);
-
- //TODO:HDFS we are just reading the files here. Need to verify
- // once the folder structure is finalized.
- dumpFiles(vm1, uniqueName);
-
- }
-
- /**
- * puts a few entries (keys with multiple updates ). Gets them immediately.
- * High probability that it gets it from async queue.
- */
- public void testGetForAsyncQueue() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final String uniqueName = getName();
- final String homeDir = "../../testGetForAsyncQueue";
-
- createServerRegion(vm0, 2, 5, 1, homeDir, uniqueName, 10000);
- createServerRegion(vm1, 2, 5, 1, homeDir, uniqueName, 10000);
-
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 0, 4);
- return null;
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 0, 2);
- doDestroys(uniqueName, 2, 3);
- doPuts(uniqueName, 3, 7);
-
- checkWithGet(uniqueName, 0, 2, true);
- checkWithGet(uniqueName, 2, 3, false);
- checkWithGet(uniqueName, 3, 7, true);
- return null;
- }
- });
- }
-
- /**
- * puts a few entries (keys with multiple updates ). Calls getAll immediately.
- * High probability that it gets it from async queue.
- */
- public void testGetAllForAsyncQueue() {
-
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final String uniqueName = getName();
- createServerRegion(vm0, 2, 5, 2, uniqueName, uniqueName, 10000);
- createServerRegion(vm1, 2, 5, 2, uniqueName, uniqueName, 10000);
-
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 0, 4);
- return null;
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- doPuts(uniqueName, 1, 5);
-
- ArrayList arrayl = new ArrayList();
- for (int i =0; i< 5; i++) {
- String k = "K" + i;
- arrayl.add(k);
- }
- checkWithGetAll(uniqueName, arrayl);
- return null;
- }
- });
- }
-
- /**
- * puts a few entries (keys with multiple updates ). Calls getAll immediately.
- * High probability that it gets it from async queue.
- */
- public void testPutAllForAsyncQueue() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final String uniqueName = getName();
- final String homeDir = "../../testPutAllForAsyncQueue";
- createServerRegion(vm0, 2, 5, 2, homeDir, uniqueName, 10000);
- createServerRegion(vm1, 2, 5, 2, homeDir, uniqueName, 10000);
-
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- HashMap putAllmap = new HashMap();
- for (int i =0; i< 4; i++)
- putAllmap.put("K" + i, "V"+ i );
- doPutAll(uniqueName, putAllmap);
- return null;
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- HashMap putAllmap = new HashMap();
- for (int i =1; i< 5; i++)
- putAllmap.put("K" + i, "V"+ i );
- doPutAll(uniqueName, putAllmap);
- checkWithGet(uniqueName, 0, 5, true);
- return null;
- }
- });
- }
-
- /**
- * Does putAll and get. Since there are many updates
- * most of the time the data is not found in memory and queue and
- * is fetched from HDFS
- */
- public void _testPutAllAndGetFromHDFS() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final String uniqueName = getName();
- final String homeDir = "../../testPutAllAndGetFromHDFS";
- createServerRegion(vm0, 7, 1, 500, homeDir, uniqueName, 500);
- createServerRegion(vm1, 7, 1, 500, homeDir, uniqueName, 500);
-
- // Do some puts
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
-
- HashMap putAllmap = new HashMap();
-
- for (int i =0; i< 500; i++)
- putAllmap.put("K" + i, "V"+ i );
- doPutAll(uniqueName, putAllmap);
- return null;
- }
- });
-
- // Do putAll and some destroys
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- HashMap putAllmap = new HashMap();
- for (int i = 500; i< 1000; i++)
- putAllmap.put("K" + i, "V"+ i );
- doPutAll(uniqueName, putAllmap);
- return null;
- }
- });
-
- // do some more puts
- // some order manipulation has been done because of an issue:
- // " a higher version update on a key can be batched and
- // sent to HDFS before a lower version update on the same key
- // is batched and sent to HDFS. This will cause the latest
- // update on a key in an older file. Hence, a fetch from HDFS
- // will return an older update from a newer file."
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- HashMap putAllmap = new HashMap();
- for (int i =1100; i< 2000; i++)
- putAllmap.put("K" + i, "V"+ i );
- doPutAll(uniqueName, putAllmap);
- putAllmap = new HashMap();
- for (int i = 900; i< 1100; i++)
- putAllmap.put("K" + i, "V"+ i );
- doPutAll(uniqueName, putAllmap);
- return null;
- }
-
- });
-
- // get and getall the values and compare them.
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- checkWithGet(uniqueName, 0, 2000, true);
- checkWithGet(uniqueName, 2000, 2001, false);
-
- ArrayList arrayl = new ArrayList();
- for (int i =0; i< 2000; i++) {
- String k = "K" + i;
- arrayl.add(k);
- }
- checkWithGetAll(uniqueName, arrayl);
- return null;
- }
- });
-
- }
-
- public void _testWObasicClose() throws Throwable{
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- VM vm3 = host.getVM(3);
-
- String homeDir = "../../testWObasicClose";
- final String uniqueName = getName();
-
- createServerRegion(vm0, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- createServerRegion(vm1, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- createServerRegion(vm2, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- createServerRegion(vm3, 11, 1, 500, homeDir, uniqueName, 500, true, false);
-
- AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 50, "vm0");
- AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 40, 100, "vm1");
- AsyncInvocation a3 = doAsyncPuts(vm2, uniqueName, 40, 100, "vm2");
- AsyncInvocation a4 = doAsyncPuts(vm3, uniqueName, 90, 150, "vm3");
-
- a1.join();
- a2.join();
- a3.join();
- a4.join();
-
- Thread.sleep(5000);
- cacheClose (vm0, false);
- cacheClose (vm1, false);
- cacheClose (vm2, false);
- cacheClose (vm3, false);
-
- AsyncInvocation async1 = createServerRegionAsync(vm0, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- AsyncInvocation async2 = createServerRegionAsync(vm1, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- AsyncInvocation async3 = createServerRegionAsync(vm2, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- AsyncInvocation async4 = createServerRegionAsync(vm3, 11, 1, 500, homeDir, uniqueName, 500, true, false);
- async1.getResult();
- async2.getResult();
- async3.getResult();
- async4.getResult();
-
- verifyHDFSData(vm0, uniqueName);
-
- cacheClose (vm0, false);
- cacheClose (vm1, false);
- cacheClose (vm2, false);
- cacheClose (vm3, false);
- }
-
-
- protected void cacheClose(VM vm, final boolean sleep){
- vm.invoke( new SerializableCallable() {
- public Object call() throws Exception {
- if (sleep)
- Thread.sleep(2000);
- getCache().getLogger().info("Cache close in progress ");
- getCache().close();
- getCache().getLogger().info("Cache closed");
- return null;
- }
- });
-
- }
-
- protected void verifyInEntriesMap (HashMap<String, String> entriesMap, int start, int end, String suffix) {
- for (int i =start; i< end; i++) {
- String k = "K" + i;
- String v = "V"+ i + suffix;
- Object s = entriesMap.get(v);
- assertTrue( "The expected key " + k+ " didn't match the received value " + s + ". value: " + v, k.equals(s));
- }
- }
-
- /**
- * Reads all the sequence files and returns the list of key value pairs persisted.
- * Returns the key value pair as <value, key> tuple as there can be multiple values
- * for a key
- * @throws Exception
- */
- protected HashMap<String, HashMap<String, String>> createFilesAndEntriesMap(VM vm0, final String uniqueName, final String regionName) throws Exception {
- HashMap<String, HashMap<String, String>> entriesToFileMap = (HashMap<String, HashMap<String, String>>)
- vm0.invoke( new SerializableCallable() {
- public Object call() throws Exception {
- HashMap<String, HashMap<String, String>> entriesToFileMap = new HashMap<String, HashMap<String, String>>();
- HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
- FileSystem fs = hdfsStore.getFileSystem();
- System.err.println("dumping file names in HDFS directory: " + hdfsStore.getHomeDir());
- try {
- Path basePath = new Path(hdfsStore.getHomeDir());
- Path regionPath = new Path(basePath, regionName);
- RemoteIterator<LocatedFileStatus> files = fs.listFiles(regionPath, true);
-
- while(files.hasNext()) {
- HashMap<String, String> entriesMap = new HashMap<String, String>();
- LocatedFileStatus next = files.next();
- /* MergeGemXDHDFSToGFE - Disabled as I am not pulling in DunitEnv */
- // System.err.println(DUnitEnv.get().getPid() + " - " + next.getPath());
- System.err.println(" - " + next.getPath());
- readSequenceFile(fs, next.getPath(), entriesMap);
- entriesToFileMap.put(next.getPath().getName(), entriesMap);
- }
- } catch (FileNotFoundException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (IOException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
-
- return entriesToFileMap;
- }
- @SuppressWarnings("deprecation")
- public void readSequenceFile(FileSystem inputFS, Path sequenceFileName,
- HashMap<String, String> entriesMap) throws IOException {
- SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
- HoplogIterator<byte[], byte[]> iter = hoplog.getReader().scan();
- try {
- while (iter.hasNext()) {
- iter.next();
- PersistedEventImpl te = UnsortedHoplogPersistedEvent.fromBytes(iter.getValue());
- String stringkey = ((String)CacheServerHelper.deserialize(iter.getKey()));
- String value = (String) te.getDeserializedValue();
- entriesMap.put(value, stringkey);
- if (getCache().getLoggerI18n().fineEnabled())
- getCache().getLoggerI18n().fine("Key: " + stringkey + " value: " + value + " path " + sequenceFileName.getName());
- }
- } catch (Exception e) {
- assertTrue(e.toString(), false);
- }
- iter.close();
- hoplog.close();
- }
- });
- return entriesToFileMap;
- }
- protected SerializableCallable validateEmpty(VM vm0, final int numEntries, final String uniqueName) {
- SerializableCallable validateEmpty = new SerializableCallable("validateEmpty") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
-
- assertTrue(r.isEmpty());
-
- //validate region is empty on peer as well
- assertFalse(r.entrySet().iterator().hasNext());
- //Make sure the region is empty
- for (int i =0; i< numEntries; i++) {
- assertEquals("failure on key K" + i , null, r.get("K" + i));
- }
-
- return null;
- }
- };
-
- vm0.invoke(validateEmpty);
- return validateEmpty;
- }
-
- protected void closeCache(VM vm0) {
- //Restart and validate still empty.
- SerializableRunnable closeCache = new SerializableRunnable("close cache") {
- @Override
- public void run() {
- getCache().close();
- disconnectFromDS();
- }
- };
-
- vm0.invoke(closeCache);
- }
-
- protected void verifyDataInHDFS(VM vm0, final String uniqueName, final boolean shouldHaveData,
- final boolean wait, final boolean waitForQueueToDrain, final int numEntries) {
- vm0.invoke(new SerializableCallable("check for data in hdfs") {
- @Override
- public Object call() throws Exception {
-
- HDFSRegionDirector director = HDFSRegionDirector.getInstance();
- final SortedOplogStatistics stats = director.getHdfsRegionStats("/" + uniqueName);
- waitForCriterion(new WaitCriterion() {
- @Override
- public boolean done() {
- return stats.getActiveFileCount() > 0 == shouldHaveData;
- }
-
- @Override
- public String description() {
- return "Waiting for active file count to be greater than 0: " + stats.getActiveFileCount() + " stats=" + System.identityHashCode(stats);
- }
- }, 30000, 100, true);
-
- if(waitForQueueToDrain) {
- PartitionedRegion region = (PartitionedRegion) getCache().getRegion(uniqueName);
- final AsyncEventQueueStats queueStats = region.getHDFSEventQueueStats();
- waitForCriterion(new WaitCriterion() {
- @Override
- public boolean done() {
- return queueStats.getEventQueueSize() <= 0;
- }
-
- @Override
- public String description() {
- return "Waiting for queue stats to reach 0: " + queueStats.getEventQueueSize();
- }
- }, 30000, 100, true);
- }
- return null;
- }
- });
- }
-
- protected void doPuts(VM vm0, final String uniqueName, final int numEntries) {
- // Do some puts
- vm0.invoke(new SerializableCallable("do puts") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
- for (int i =0; i< numEntries; i++)
- r.put("K" + i, "V"+ i );
- return null;
- }
- });
- }
-
- protected void validate(VM vm1, final String uniqueName, final int numEntries) {
- SerializableCallable validate = new SerializableCallable("validate") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
-
- for (int i =0; i< numEntries; i++) {
- assertEquals("failure on key K" + i , "V"+ i, r.get("K" + i));
- }
-
- return null;
- }
- };
- vm1.invoke(validate);
- }
-
- protected void dumpFiles(VM vm0, final String uniqueName) {
- vm0.invoke(new SerializableRunnable() {
-
- @Override
- public void run() {
- HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
- FileSystem fs;
- try {
- fs = hdfsStore.getFileSystem();
- } catch (IOException e1) {
- throw new HDFSIOException(e1.getMessage(), e1);
- }
- System.err.println("dumping file names in HDFS directory: " + hdfsStore.getHomeDir());
- try {
- RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path(hdfsStore.getHomeDir()), true);
-
- while(files.hasNext()) {
- LocatedFileStatus next = files.next();
- /* MergeGemXDHDFSToGFE - Disabled as I am not pulling in DunitEnv */
- // System.err.println(DUnitEnv.get().getPid() + " - " + next.getPath());
- System.err.println(" - " + next.getPath());
- }
- } catch (FileNotFoundException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (IOException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
-
- }
-
- });
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
deleted file mode 100644
index 07d9f77..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
+++ /dev/null
@@ -1,389 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.net.URI;
-import java.util.HashSet;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.SerializedCacheValue;
-import com.gemstone.gemfire.cache.TransactionId;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.FileSystemFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.versions.DiskVersionTag;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-
-import dunit.DistributedTestCase;
-import dunit.DistributedTestCase.ExpectedException;
-
-public abstract class BaseHoplogTestCase extends TestCase {
- public static final String HDFS_STORE_NAME = "hdfs";
- public static final Random rand = new Random(System.currentTimeMillis());
- protected Path testDataDir;
- protected Cache cache;
-
- protected HDFSRegionDirector director;
- protected HdfsRegionManager regionManager;
- protected HDFSStoreFactory hsf;
- protected HDFSStoreImpl hdfsStore;
- protected RegionFactory<Object, Object> regionfactory;
- protected Region<Object, Object> region;
- protected SortedOplogStatistics stats;
- protected HFileStoreStatistics storeStats;
- protected BlockCache blockCache;
-
- Set<ExpectedException> exceptions = new HashSet<ExpectedException>();
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-
- //This is logged by HDFS when it is stopped.
- exceptions.add(DistributedTestCase.addExpectedException("sleep interrupted"));
- exceptions.add(DistributedTestCase.addExpectedException("java.io.InterruptedIOException"));
-
- testDataDir = new Path("test-case");
-
- cache = createCache();
-
- configureHdfsStoreFactory();
- hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
-
- regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-// regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
- region = regionfactory.create(getName());
-
- // disable compaction by default and clear existing queues
- HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
- compactionManager.reset();
-
- director = HDFSRegionDirector.getInstance();
- director.setCache(cache);
- regionManager = ((LocalRegion)region).getHdfsRegionManager();
- stats = director.getHdfsRegionStats("/" + getName());
- storeStats = hdfsStore.getStats();
- blockCache = hdfsStore.getBlockCache();
- AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
- }
-
- protected void configureHdfsStoreFactory() throws Exception {
- hsf = this.cache.createHDFSStoreFactory();
- hsf.setHomeDir(testDataDir.toString());
- hsf.setMinorCompaction(false);
- hsf.setMajorCompaction(false);
- }
-
- protected Cache createCache() {
- CacheFactory cf = new CacheFactory().set("mcast-port", "0")
- .set("log-level", "info")
- ;
- cache = cf.create();
- return cache;
- }
-
- @Override
- protected void tearDown() throws Exception {
- if (region != null) {
- region.destroyRegion();
- }
-
- if (hdfsStore != null) {
- hdfsStore.getFileSystem().delete(testDataDir, true);
- hdfsStore.destroy();
- }
-
- if (cache != null) {
- cache.close();
- }
- super.tearDown();
- for (ExpectedException ex: exceptions) {
- ex.remove();
- }
- }
-
- /**
- * creates a hoplog file with numKeys records. Keys follow key-X pattern and values follow value-X
- * pattern where X=0 to X is = numKeys -1
- *
- * @return the sorted map of inserted KVs
- */
- protected TreeMap<String, String> createHoplog(int numKeys, Hoplog oplog) throws IOException {
- int offset = (numKeys > 10 ? 100000 : 0);
-
- HoplogWriter writer = oplog.createWriter(numKeys);
- TreeMap<String, String> map = new TreeMap<String, String>();
- for (int i = offset; i < (numKeys + offset); i++) {
- String key = ("key-" + i);
- String value = ("value-" + System.nanoTime());
- writer.append(key.getBytes(), value.getBytes());
- map.put(key, value);
- }
- writer.close();
- return map;
- }
-
- protected FileStatus[] getBucketHoplogs(String regionAndBucket, final String type)
- throws IOException {
- return getBucketHoplogs(hdfsStore.getFileSystem(), regionAndBucket, type);
- }
-
- protected FileStatus[] getBucketHoplogs(FileSystem fs, String regionAndBucket, final String type)
- throws IOException {
- FileStatus[] hoplogs = fs.listStatus(
- new Path(testDataDir, regionAndBucket), new PathFilter() {
- @Override
- public boolean accept(Path file) {
- return file.getName().endsWith(type);
- }
- });
- return hoplogs;
- }
-
- protected String getRandomHoplogName() {
- String hoplogName = "hoplog-" + System.nanoTime() + "-" + rand.nextInt(10000) + ".hop";
- return hoplogName;
- }
-
-// public static MiniDFSCluster initMiniCluster(int port, int numDN) throws Exception {
-// HashMap<String, String> map = new HashMap<String, String>();
-// map.put(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
-// return initMiniCluster(port, numDN, map);
-// }
-//
-// public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
-// System.setProperty("test.build.data", "hdfs-test-cluster");
-// Configuration hconf = new HdfsConfiguration();
-// for (Entry<String, String> entry : map.entrySet()) {
-// hconf.set(entry.getKey(), entry.getValue());
-// }
-//
-// hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
-//
-// Builder builder = new MiniDFSCluster.Builder(hconf);
-// builder.numDataNodes(numDN);
-// builder.nameNodePort(port);
-// MiniDFSCluster cluster = builder.build();
-// return cluster;
-// }
-
- public static void setConfigFile(HDFSStoreFactory factory, File configFile, String config)
- throws Exception {
- BufferedWriter bw = new BufferedWriter(new FileWriter(configFile));
- bw.write(config);
- bw.close();
- factory.setHDFSClientConfigFile(configFile.getName());
- }
-
- public static void alterMajorCompaction(HDFSStoreImpl store, boolean enable) {
- HDFSStoreMutator mutator = store.createHdfsStoreMutator();
- mutator.setMajorCompaction(enable);
- store.alter(mutator);
- }
-
- public static void alterMinorCompaction(HDFSStoreImpl store, boolean enable) {
- HDFSStoreMutator mutator = store.createHdfsStoreMutator();
- mutator.setMinorCompaction(enable);
- store.alter(mutator);
- }
-
- public void deleteMiniClusterDir() throws Exception {
- File clusterDir = new File("hdfs-test-cluster");
- if (clusterDir.exists()) {
- FileUtils.deleteDirectory(clusterDir);
- }
- }
-
- public static class TestEvent extends SortedHDFSQueuePersistedEvent {
- Object key;
-
- public TestEvent(String k, String v) throws Exception {
- this(k, v, Operation.PUT_IF_ABSENT);
- }
-
- public TestEvent(String k, String v, Operation op) throws Exception {
- super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
- this.key = k;
- }
-
- public Object getKey() {
- return key;
-
- }
-
- public Object getNewValue() {
- return valueObject;
- }
-
- public Operation getOperation() {
- return op;
- }
-
- public Region<Object, Object> getRegion() {
- return null;
- }
-
- public Object getCallbackArgument() {
- return null;
- }
-
- public boolean isCallbackArgumentAvailable() {
- return false;
- }
-
- public boolean isOriginRemote() {
- return false;
- }
-
- public DistributedMember getDistributedMember() {
- return null;
- }
-
- public boolean isExpiration() {
- return false;
- }
-
- public boolean isDistributed() {
- return false;
- }
-
- public Object getOldValue() {
- return null;
- }
-
- public SerializedCacheValue<Object> getSerializedOldValue() {
- return null;
- }
-
- public SerializedCacheValue<Object> getSerializedNewValue() {
- return null;
- }
-
- public boolean isLocalLoad() {
- return false;
- }
-
- public boolean isNetLoad() {
- return false;
- }
-
- public boolean isLoad() {
- return false;
- }
-
- public boolean isNetSearch() {
- return false;
- }
-
- public TransactionId getTransactionId() {
- return null;
- }
-
- public boolean isBridgeEvent() {
- return false;
- }
-
- public boolean hasClientOrigin() {
- return false;
- }
-
- public boolean isOldValueAvailable() {
- return false;
- }
- }
-
- public abstract class AbstractCompactor implements Compactor {
- @Override
- public HDFSStore getHdfsStore() {
- return hdfsStore;
- }
-
- public void suspend() {
- }
-
- public void resume() {
- }
-
- public boolean isBusy(boolean isMajor) {
- return false;
- }
- }
-
- public HDFSStoreFactoryImpl getCloseableLocalHdfsStoreFactory() {
- final FileSystemFactory fsFactory = new FileSystemFactory() {
- // by default local FS instance is not disabled by close. Hence this
- // customization
- class CustomFileSystem extends LocalFileSystem {
- boolean isClosed = false;
-
- public void close() throws IOException {
- isClosed = true;
- super.close();
- }
-
- public FileStatus getFileStatus(Path f) throws IOException {
- if (isClosed) {
- throw new IOException();
- }
- return super.getFileStatus(f);
- }
- }
-
- public FileSystem create(URI namenode, Configuration conf, boolean forceNew) throws IOException {
- CustomFileSystem fs = new CustomFileSystem();
- fs.initialize(namenode, conf);
- return fs;
- }
- };
-
- HDFSStoreFactoryImpl storeFactory = new HDFSStoreFactoryImpl(cache) {
- public HDFSStore create(String name) {
- return new HDFSStoreImpl(name, this.configHolder) {
- public FileSystemFactory getFileSystemFactory() {
- return fsFactory;
- }
- };
- }
- };
- return storeFactory;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
deleted file mode 100644
index db050b3..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class CardinalityEstimatorJUnitTest extends BaseHoplogTestCase {
-
- public void testSingleHoplogCardinality() throws Exception {
- int count = 10;
- int bucketId = (int) System.nanoTime();
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- // flush and create hoplog
- ArrayList<TestEvent> items = new ArrayList<TestEvent>();
- for (int i = 0; i < count; i++) {
- items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
- }
- // assert that size is 0 before flush begins
- assertEquals(0, organizer.sizeEstimate());
- organizer.flush(items.iterator(), count);
-
- assertEquals(count, organizer.sizeEstimate());
- assertEquals(0, stats.getActiveReaderCount());
-
- organizer.close();
- organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
- assertEquals(count, organizer.sizeEstimate());
- assertEquals(1, stats.getActiveReaderCount());
- }
-
- public void testSingleHoplogCardinalityWithDuplicates() throws Exception {
- int bucketId = (int) System.nanoTime();
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- List<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent("key-0", "value-0"));
- items.add(new TestEvent("key-0", "value-0"));
- items.add(new TestEvent("key-1", "value-1"));
- items.add(new TestEvent("key-2", "value-2"));
- items.add(new TestEvent("key-3", "value-3"));
- items.add(new TestEvent("key-3", "value-3"));
- items.add(new TestEvent("key-4", "value-4"));
-
- organizer.flush(items.iterator(), 7);
- assertEquals(5, organizer.sizeEstimate());
- }
-
- public void testMultipleHoplogCardinality() throws Exception {
- int bucketId = (int) System.nanoTime();
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- List<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent("key-0", "value-0"));
- items.add(new TestEvent("key-1", "value-1"));
- items.add(new TestEvent("key-2", "value-2"));
- items.add(new TestEvent("key-3", "value-3"));
- items.add(new TestEvent("key-4", "value-4"));
-
- organizer.flush(items.iterator(), 5);
- assertEquals(5, organizer.sizeEstimate());
-
- items.clear();
- items.add(new TestEvent("key-1", "value-0"));
- items.add(new TestEvent("key-5", "value-5"));
- items.add(new TestEvent("key-6", "value-6"));
- items.add(new TestEvent("key-7", "value-7"));
- items.add(new TestEvent("key-8", "value-8"));
- items.add(new TestEvent("key-9", "value-9"));
-
- organizer.flush(items.iterator(), 6);
- assertEquals(10, organizer.sizeEstimate());
- }
-
- public void testCardinalityAfterRestart() throws Exception {
- int bucketId = (int) System.nanoTime();
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- List<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent("key-0", "value-0"));
- items.add(new TestEvent("key-1", "value-1"));
- items.add(new TestEvent("key-2", "value-2"));
- items.add(new TestEvent("key-3", "value-3"));
- items.add(new TestEvent("key-4", "value-4"));
-
- assertEquals(0, organizer.sizeEstimate());
- organizer.flush(items.iterator(), 5);
- assertEquals(5, organizer.sizeEstimate());
-
- // restart
- organizer.close();
- organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
- assertEquals(5, organizer.sizeEstimate());
-
- items.clear();
- items.add(new TestEvent("key-1", "value-0"));
- items.add(new TestEvent("key-5", "value-5"));
- items.add(new TestEvent("key-6", "value-6"));
- items.add(new TestEvent("key-7", "value-7"));
- items.add(new TestEvent("key-8", "value-8"));
- items.add(new TestEvent("key-9", "value-9"));
-
- organizer.flush(items.iterator(), 6);
- assertEquals(10, organizer.sizeEstimate());
-
- // restart - make sure that HLL from the youngest file is read
- organizer.close();
- organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
- assertEquals(10, organizer.sizeEstimate());
-
- items.clear();
- items.add(new TestEvent("key-1", "value-1"));
- items.add(new TestEvent("key-5", "value-5"));
- items.add(new TestEvent("key-10", "value-10"));
- items.add(new TestEvent("key-11", "value-11"));
- items.add(new TestEvent("key-12", "value-12"));
- items.add(new TestEvent("key-13", "value-13"));
- items.add(new TestEvent("key-14", "value-14"));
-
- organizer.flush(items.iterator(), 7);
- assertEquals(15, organizer.sizeEstimate());
- }
-
- public void testCardinalityAfterMajorCompaction() throws Exception {
- doCardinalityAfterCompactionWork(true);
- }
-
- public void testCardinalityAfterMinorCompaction() throws Exception {
- doCardinalityAfterCompactionWork(false);
- }
-
- private void doCardinalityAfterCompactionWork(boolean isMajor) throws Exception {
- int bucketId = (int) System.nanoTime();
- HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
- List<TestEvent> items = new ArrayList<TestEvent>();
- items.add(new TestEvent("key-0", "value-0"));
- items.add(new TestEvent("key-1", "value-1"));
- items.add(new TestEvent("key-2", "value-2"));
- items.add(new TestEvent("key-3", "value-3"));
- items.add(new TestEvent("key-4", "value-4"));
-
- organizer.flush(items.iterator(), 5);
- assertEquals(5, organizer.sizeEstimate());
-
- items.clear();
- items.add(new TestEvent("key-0", "value-0"));
- items.add(new TestEvent("key-1", "value-5", Operation.DESTROY));
- items.add(new TestEvent("key-2", "value-6", Operation.INVALIDATE));
- items.add(new TestEvent("key-5", "value-5"));
-
- organizer.flush(items.iterator(), 4);
- assertEquals(6, organizer.sizeEstimate());
-
- items.clear();
- items.add(new TestEvent("key-3", "value-5", Operation.DESTROY));
- items.add(new TestEvent("key-4", "value-6", Operation.INVALIDATE));
- items.add(new TestEvent("key-5", "value-0"));
- items.add(new TestEvent("key-6", "value-5"));
-
- organizer.flush(items.iterator(), 4);
-
- items.add(new TestEvent("key-5", "value-0"));
- items.add(new TestEvent("key-6", "value-5"));
-
- items.clear();
- organizer.flush(items.iterator(), items.size());
- assertEquals(7, organizer.sizeEstimate());
-
- organizer.getCompactor().compact(isMajor, false);
- assertEquals(3, organizer.sizeEstimate());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
deleted file mode 100644
index 67dcddf..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.List;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.AttributesMutator;
-import com.gemstone.gemfire.cache.CacheLoader;
-import com.gemstone.gemfire.cache.CacheLoaderException;
-import com.gemstone.gemfire.cache.LoaderHelper;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Tests that entries loaded from a cache loader are inserted in the HDFS queue
- *
- * @author hemantb
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSCacheLoaderJUnitTest extends BaseHoplogTestCase {
-
- private static int totalEventsReceived = 0;
- protected void configureHdfsStoreFactory() throws Exception {
- hsf = this.cache.createHDFSStoreFactory();
- hsf.setHomeDir(testDataDir.toString());
- hsf.setBatchInterval(100000000);
- hsf.setBatchSize(10000);
- }
-
- /**
- * Tests that entries loaded from a cache loader are inserted in the HDFS queue
- * but are not inserted in async queues.
- * @throws Exception
- */
- public void testCacheLoaderForAsyncQAndHDFS() throws Exception {
-
- final AsyncEventQueueStats hdfsQueuestatistics = ((AsyncEventQueueImpl)cache.
- getAsyncEventQueues().toArray()[0]).getStatistics();
-
- AttributesMutator am = this.region.getAttributesMutator();
- am.setCacheLoader(new CacheLoader() {
- private int i = 0;
- public Object load(LoaderHelper helper)
- throws CacheLoaderException {
- return new Integer(i++);
- }
-
- public void close() { }
- });
-
-
-
- String asyncQueueName = "myQueue";
- new AsyncEventQueueFactoryImpl(cache).setBatchTimeInterval(1).
- create(asyncQueueName, new AsyncEventListener() {
-
- @Override
- public void close() {
- // TODO Auto-generated method stub
-
- }
-
- @Override
- public boolean processEvents(List events) {
- totalEventsReceived += events.size();
- return true;
- }
- });
- am.addAsyncEventQueueId(asyncQueueName);
-
- region.put(1, new Integer(100));
- region.destroy(1);
- region.get(1);
- region.destroy(1);
-
- assertTrue("HDFS queue should have received four events. But it received " +
- hdfsQueuestatistics.getEventQueueSize(), 4 == hdfsQueuestatistics.getEventQueueSize());
- assertTrue("HDFS queue should have received four events. But it received " +
- hdfsQueuestatistics.getEventsReceived(), 4 == hdfsQueuestatistics.getEventsReceived());
-
- region.get(1);
- Thread.sleep(2000);
-
- assertTrue("Async queue should have received only 5 events. But it received " +
- totalEventsReceived, totalEventsReceived == 5);
- assertTrue("HDFS queue should have received 5 events. But it received " +
- hdfsQueuestatistics.getEventQueueSize(), 5 == hdfsQueuestatistics.getEventQueueSize());
- assertTrue("HDFS queue should have received 5 events. But it received " +
- hdfsQueuestatistics.getEventsReceived(), 5 == hdfsQueuestatistics.getEventsReceived());
-
-
- }
-
-}
[07/15] incubator-geode git commit: GEODE-429: Remove api for setting
HdfsStore in Attributes
Posted by as...@apache.org.
GEODE-429: Remove api for setting HdfsStore in Attributes
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/07d55bda
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/07d55bda
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/07d55bda
Branch: refs/heads/feature/GEODE-409
Commit: 07d55bda1c1c9d641ca16b3b6804994ecb53bf9d
Parents: 8fb5edd
Author: Ashvin Agrawal <as...@apache.org>
Authored: Tue Oct 20 09:28:06 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700
----------------------------------------------------------------------
.../gemfire/cache/AttributesFactory.java | 31 --------------------
.../java/com/gemstone/gemfire/cache/Cache.java | 2 --
.../gemstone/gemfire/cache/GemFireCache.java | 10 -------
.../admin/remote/RemoteRegionAttributes.java | 2 +-
.../internal/cache/xmlcache/CacheCreation.java | 6 ----
5 files changed, 1 insertion(+), 50 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
index 7acd72a..406e596 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
@@ -20,7 +20,6 @@ import com.gemstone.gemfire.GemFireIOException;
import com.gemstone.gemfire.cache.client.ClientCache;
import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
import com.gemstone.gemfire.cache.client.PoolManager;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
import com.gemstone.gemfire.compression.Compressor;
import com.gemstone.gemfire.internal.cache.AbstractRegion;
import com.gemstone.gemfire.internal.cache.CustomEvictionAttributesImpl;
@@ -456,7 +455,6 @@ public class AttributesFactory<K,V> {
this.regionAttributes.multicastEnabled = regionAttributes.getMulticastEnabled();
this.regionAttributes.gatewaySenderIds = new CopyOnWriteArraySet<String>(regionAttributes.getGatewaySenderIds());
this.regionAttributes.asyncEventQueueIds = new CopyOnWriteArraySet<String>(regionAttributes.getAsyncEventQueueIds());
- this.regionAttributes.hdfsStoreName = regionAttributes.getHDFSStoreName();
this.regionAttributes.isLockGrantor = regionAttributes.isLockGrantor(); // fix for bug 47067
if (regionAttributes instanceof UserSpecifiedRegionAttributes) {
this.regionAttributes.setIndexes(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).getIndexes());
@@ -483,10 +481,6 @@ public class AttributesFactory<K,V> {
}
this.regionAttributes.compressor = regionAttributes.getCompressor();
- this.regionAttributes.hdfsWriteOnly = regionAttributes.getHDFSWriteOnly();
- if (regionAttributes instanceof UserSpecifiedRegionAttributes) {
- this.regionAttributes.setHasHDFSWriteOnly(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).hasHDFSWriteOnly());
- }
this.regionAttributes.offHeap = regionAttributes.getOffHeap();
}
@@ -1288,31 +1282,6 @@ public class AttributesFactory<K,V> {
}
/**
- * Sets the HDFSStore name attribute.
- * This causes the region to use the {@link HDFSStore}.
- * @param name the name of the HDFSstore
- */
- public void setHDFSStoreName(String name) {
- //TODO:HDFS throw an exception if the region is already configured for a disk store and
- // vice versa
- this.regionAttributes.hdfsStoreName = name;
- this.regionAttributes.setHasHDFSStoreName(true);
- }
-
- /**
- * Sets the HDFS write only attribute. if the region
- * is configured to be write only to HDFS, events that have
- * been evicted from memory cannot be read back from HDFS.
- * Events are written to HDFS in the order in which they occurred.
- */
- public void setHDFSWriteOnly(boolean writeOnly) {
- //TODO:HDFS throw an exception if the region is already configured for a disk store and
- // vice versa
- this.regionAttributes.hdfsWriteOnly = writeOnly;
- this.regionAttributes.setHasHDFSWriteOnly(true);
- }
-
- /**
* Sets this region's compressor for compressing entry values.
* @since 8.0
* @param compressor a compressor.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
index c6495d0..63e8041 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
@@ -16,7 +16,6 @@ import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
import com.gemstone.gemfire.cache.client.ClientCache;
import com.gemstone.gemfire.cache.client.Pool;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
import com.gemstone.gemfire.cache.server.CacheServer;
import com.gemstone.gemfire.cache.snapshot.CacheSnapshotService;
import com.gemstone.gemfire.cache.util.GatewayConflictResolver;
@@ -27,7 +26,6 @@ import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
import com.gemstone.gemfire.distributed.DistributedMember;
import com.gemstone.gemfire.distributed.DistributedSystem;
import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
/**
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
index b948c5d..18455c7 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
@@ -18,8 +18,6 @@ import com.gemstone.gemfire.LogWriter;
import com.gemstone.gemfire.cache.client.ClientCache;
import com.gemstone.gemfire.cache.client.ClientCacheFactory;
import com.gemstone.gemfire.cache.control.ResourceManager;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
import com.gemstone.gemfire.cache.lucene.LuceneService;
import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
import com.gemstone.gemfire.distributed.DistributedSystem;
@@ -259,12 +257,4 @@ public interface GemFireCache extends RegionService {
* @since 8.5
*/
public LuceneService getLuceneService();
-
- /**
- * Returns the HDFSStore by name or <code>null</code> if no HDFSStore is
- * found.
- *
- * @param name the name of the HDFSStore to find.
- */
- public HDFSStore findHDFSStore(String name);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
index 0aa40a7..5c08516 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
@@ -152,7 +152,7 @@ public class RemoteRegionAttributes implements RegionAttributes,
this.isDiskSynchronous = attr.isDiskSynchronous();
this.gatewaySendersDescs = getDescs(attr.getGatewaySenderIds().toArray());
this.asyncEventQueueDescs = getDescs(attr.getAsyncEventQueueIds().toArray());
- this.hdfsStoreName = attr.getHDFSStoreName();
+ this.hdfsStoreName = attr.getHDFSStoreName();
this.hdfsWriteOnly = attr.getHDFSWriteOnly();
this.compressorDesc = getDesc(attr.getCompressor());
this.offHeap = attr.getOffHeap();
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
index e4bea7f..13eea93 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
@@ -83,7 +83,6 @@ import com.gemstone.gemfire.distributed.DistributedMember;
import com.gemstone.gemfire.distributed.DistributedSystem;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
@@ -1378,11 +1377,6 @@ public class CacheCreation implements InternalCache, Extensible<Cache> {
}
@Override
- public HDFSStore findHDFSStore(String storeName) {
- return (HDFSStore)this.hdfsStores.get(storeName);
- }
-
- @Override
public Collection<HDFSStoreImpl> getHDFSStores() {
return this.hdfsStores.values();
}
[11/15] incubator-geode git commit: GEODE-429: Remove HDFS
persistence DataPolicy
Posted by as...@apache.org.
GEODE-429: Remove HDFS persistence DataPolicy
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1b4fd2fe
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1b4fd2fe
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1b4fd2fe
Branch: refs/heads/feature/GEODE-409
Commit: 1b4fd2fe872af1520027b8e0a84ffe84b9613f27
Parents: 12318e9
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:49:31 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700
----------------------------------------------------------------------
.../com/gemstone/gemfire/cache/DataPolicy.java | 19 +-
.../internal/cache/PartitionedRegionHelper.java | 2 -
.../cache/xmlcache/CacheXmlGenerator.java | 4 -
.../internal/cache/xmlcache/CacheXmlParser.java | 6 -
.../ColocatedRegionWithHDFSDUnitTest.java | 2 +-
.../hdfs/internal/RegionRecoveryDUnitTest.java | 415 -----
.../internal/RegionWithHDFSBasicDUnitTest.java | 1594 ------------------
.../RegionWithHDFSOffHeapBasicDUnitTest.java | 114 --
...RegionWithHDFSPersistenceBasicDUnitTest.java | 77 -
.../HDFSQueueRegionOperationsJUnitTest.java | 33 -
...FSQueueRegionOperationsOffHeapJUnitTest.java | 54 -
.../cache/HDFSRegionOperationsJUnitTest.java | 542 ------
.../HDFSRegionOperationsOffHeapJUnitTest.java | 78 -
13 files changed, 5 insertions(+), 2935 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
index 4ffeaba..9223aa4 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
@@ -88,18 +88,6 @@ public class DataPolicy implements java.io.Serializable {
*/
public static final DataPolicy PERSISTENT_PARTITION = new DataPolicy(6, "PERSISTENT_PARTITION");
- /**
- * In addition to <code>PARTITION</code> also causes data to be stored to
- * HDFS. The region initialization may use the data stored on HDFS.
- */
- public static final DataPolicy HDFS_PARTITION = new DataPolicy(7, "HDFS_PARTITION");
-
- /**
- * In addition to <code>HDFS_PARTITION</code> also causes data to be stored on local
- * disk. The data can be evicted from the local disk and still be read
- * from HDFS.
- */
- public static final DataPolicy HDFS_PERSISTENT_PARTITION = new DataPolicy(10, "HDFS_PERSISTENT_PARTITION");
/**
* The data policy used by default; it is {@link #NORMAL}.
*/
@@ -169,7 +157,7 @@ public class DataPolicy implements java.io.Serializable {
* @since 6.5
*/
public boolean withPersistence() {
- return this == PERSISTENT_PARTITION || this == PERSISTENT_REPLICATE || this == HDFS_PERSISTENT_PARTITION;
+ return this == PERSISTENT_PARTITION || this == PERSISTENT_REPLICATE;
}
/** Return whether this policy does partitioning.
@@ -179,7 +167,7 @@ public class DataPolicy implements java.io.Serializable {
* @since 6.5
*/
public boolean withPartitioning() {
- return this == PARTITION || this == PERSISTENT_PARTITION || this == HDFS_PARTITION || this==HDFS_PERSISTENT_PARTITION;
+ return this == PARTITION || this == PERSISTENT_PARTITION;
}
/** Return whether this policy does preloaded.
@@ -254,7 +242,8 @@ public class DataPolicy implements java.io.Serializable {
* @see #HDFS_PARTITION
*/
public boolean withHDFS() {
- return this == HDFS_PARTITION || this == HDFS_PERSISTENT_PARTITION;
+// return this == HDFS_PARTITION || this == HDFS_PERSISTENT_PARTITION;
+ return false;
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
index 965f96c..10dc256 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
@@ -115,8 +115,6 @@ public class PartitionedRegionHelper
Set policies = new HashSet();
policies.add(DEFAULT_DATA_POLICY);
policies.add(DataPolicy.PERSISTENT_PARTITION);
- policies.add(DataPolicy.HDFS_PARTITION);
- policies.add(DataPolicy.HDFS_PERSISTENT_PARTITION);
// policies.add(DataPolicy.NORMAL);
ALLOWED_DATA_POLICIES = Collections.unmodifiableSet(policies);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
index ee4e0ae..3b587b3 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
@@ -1904,10 +1904,6 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
dpString = PERSISTENT_REPLICATE_DP;
} else if (dp == DataPolicy.PERSISTENT_PARTITION) {
dpString = PERSISTENT_PARTITION_DP;
- } else if (dp == DataPolicy.HDFS_PARTITION) {
- dpString = HDFS_PARTITION_DP;
- } else if (dp == DataPolicy.HDFS_PERSISTENT_PARTITION) {
- dpString = HDFS_PERSISTENT_PARTITION_DP;
} else if (dp.isPartition()) {
if (this.version.compareTo(CacheXmlVersion.VERSION_5_1) >= 0) {
dpString = PARTITION_DP;
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
index f0b3612..2e77d3c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
@@ -1261,12 +1261,6 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
else if (dp.equals(PERSISTENT_PARTITION_DP)) {
attrs.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
}
- else if (dp.equals(HDFS_PARTITION_DP)) {
- attrs.setDataPolicy(DataPolicy.HDFS_PARTITION);
- }
- else if (dp.equals(HDFS_PERSISTENT_PARTITION_DP)) {
- attrs.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION);
- }
else {
throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_DATA_POLICY_0.toLocalizedString(dp));
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
index 3b0be6b..44206dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
@@ -57,7 +57,7 @@ public class ColocatedRegionWithHDFSDUnitTest extends RegionWithHDFSTestBase {
hsf.create(uniqueName);
AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+ af.setDataPolicy(DataPolicy.PARTITION);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setTotalNumBuckets(totalnumOfBuckets);
paf.setRedundantCopies(1);
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
deleted file mode 100644
index 61ff18d..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
+++ /dev/null
@@ -1,415 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.io.IOException;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.internal.FileUtil;
-
-import dunit.AsyncInvocation;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.VM;
-
-/**
- * A class for testing the recovery after restart for GemFire cluster that has
- * HDFS regions
- *
- * @author Hemant Bhanawat
- */
-@SuppressWarnings({ "serial", "deprecation", "rawtypes" })
-public class RegionRecoveryDUnitTest extends CacheTestCase {
- public RegionRecoveryDUnitTest(String name) {
- super(name);
- }
-
- private static String homeDir = null;
-
- public void tearDown2() throws Exception {
- for (int h = 0; h < Host.getHostCount(); h++) {
- Host host = Host.getHost(h);
- SerializableCallable cleanUp = cleanUpStores();
- for (int v = 0; v < host.getVMCount(); v++) {
- VM vm = host.getVM(v);
- vm.invoke(cleanUp);
- }
- }
- super.tearDown2();
- }
-
- public SerializableCallable cleanUpStores() throws Exception {
- SerializableCallable cleanUp = new SerializableCallable() {
- public Object call() throws Exception {
- if (homeDir != null) {
- // Each VM will try to delete the same directory. But that's okay as
- // the subsequent invocations will be no-ops.
- FileUtil.delete(new File(homeDir));
- homeDir = null;
- }
- return 0;
- }
- };
- return cleanUp;
- }
-
- /**
- * Tests a basic restart of the system. Events if in HDFS should be read back.
- * The async queue is not persisted so we wait until async queue persists the
- * items to HDFS.
- *
- * @throws Exception
- */
- public void testBasicRestart() throws Exception {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- VM vm3 = host.getVM(3);
-
- // Going two level up to avoid home directories getting created in
- // VM-specific directory. This avoids failures in those tests where
- // datastores are restarted and bucket ownership changes between VMs.
- homeDir = "../../testBasicRestart";
- String uniqueName = "testBasicRestart";
-
- createServerRegion(vm0, 11, 1, 500, 500, homeDir, uniqueName);
- createServerRegion(vm1, 11, 1, 500, 500, homeDir, uniqueName);
- createServerRegion(vm2, 11, 1, 500, 500, homeDir, uniqueName);
- createServerRegion(vm3, 11, 1, 500, 500, homeDir, uniqueName);
-
- doPuts(vm0, uniqueName, 1, 50);
- doPuts(vm1, uniqueName, 40, 100);
- doPuts(vm2, uniqueName, 40, 100);
- doPuts(vm3, uniqueName, 90, 150);
-
- cacheClose(vm0, true);
- cacheClose(vm1, true);
- cacheClose(vm2, true);
- cacheClose(vm3, true);
-
- createServerRegion(vm0, 11, 1, 500, 500, homeDir, uniqueName);
- createServerRegion(vm1, 11, 1, 500, 500, homeDir, uniqueName);
- createServerRegion(vm2, 11, 1, 500, 500, homeDir, uniqueName);
- createServerRegion(vm3, 11, 1, 500, 500, homeDir, uniqueName);
-
- verifyGetsForValue(vm0, uniqueName, 1, 50, false);
- verifyGetsForValue(vm1, uniqueName, 40, 100, false);
- verifyGetsForValue(vm2, uniqueName, 40, 100, false);
- verifyGetsForValue(vm3, uniqueName, 90, 150, false);
-
- cacheClose(vm0, false);
- cacheClose(vm1, false);
- cacheClose(vm2, false);
- cacheClose(vm3, false);
-
- disconnectFromDS();
-
- }
-
- /**
- * Servers are stopped and restarted. Disabled due to bug 48067.
- */
- public void testPersistedAsyncQueue_Restart() throws Exception {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- VM vm3 = host.getVM(3);
-
- // Going two level up to avoid home directories getting created in
- // VM-specific directory. This avoids failures in those tests where
- // datastores are restarted and bucket ownership changes between VMs.
- homeDir = "../../testPersistedAsyncQueue_Restart";
- String uniqueName = "testPersistedAsyncQueue_Restart";
-
- // create cache and region
- createPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
- createPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
- createPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
- createPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
-
- // do some puts
- AsyncInvocation a0 = doAsyncPuts(vm0, uniqueName, 1, 50);
- AsyncInvocation a1 = doAsyncPuts(vm1, uniqueName, 40, 100);
- AsyncInvocation a2 = doAsyncPuts(vm2, uniqueName, 40, 100);
- AsyncInvocation a3 = doAsyncPuts(vm3, uniqueName, 90, 150);
-
- a3.join();
- a2.join();
- a1.join();
- a0.join();
-
- // close the cache
- cacheClose(vm0, true);
- cacheClose(vm1, true);
- cacheClose(vm2, true);
- cacheClose(vm3, true);
-
- // recreate the cache and regions
- a3 = createAsyncPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
- a2 = createAsyncPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
- a1 = createAsyncPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
- a0 = createAsyncPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
-
- a3.join();
- a2.join();
- a1.join();
- a0.join();
-
- // these gets should probably fetch the data from async queue
- verifyGetsForValue(vm0, uniqueName, 1, 50, false);
- verifyGetsForValue(vm1, uniqueName, 40, 100, false);
- verifyGetsForValue(vm2, uniqueName, 40, 100, false);
- verifyGetsForValue(vm3, uniqueName, 90, 150, false);
-
- // these gets wait for sometime before fetching the data. this will ensure
- // that the reads are done from HDFS
- verifyGetsForValue(vm0, uniqueName, 1, 50, true);
- verifyGetsForValue(vm1, uniqueName, 40, 100, true);
- verifyGetsForValue(vm2, uniqueName, 40, 100, true);
- verifyGetsForValue(vm3, uniqueName, 90, 150, true);
-
- cacheClose(vm0, false);
- cacheClose(vm1, false);
- cacheClose(vm2, false);
- cacheClose(vm3, false);
-
- disconnectFromDS();
- }
-
- /**
- * Stops a single server. A different node becomes primary for the buckets on
- * the stopped node. Everything should work fine. Disabled due to bug 48067
- *
- */
- public void testPersistedAsyncQueue_ServerRestart() throws Exception {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- VM vm3 = host.getVM(3);
-
- // Going two level up to avoid home directories getting created in
- // VM-specific directory. This avoids failures in those tests where
- // datastores are restarted and bucket ownership changes between VMs.
- homeDir = "../../testPAQ_ServerRestart";
- String uniqueName = "testPAQ_ServerRestart";
-
- createPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
- createPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
- createPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
- createPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
-
- AsyncInvocation a0 = doAsyncPuts(vm0, uniqueName, 1, 50);
- AsyncInvocation a1 = doAsyncPuts(vm1, uniqueName, 50, 75);
- AsyncInvocation a2 = doAsyncPuts(vm2, uniqueName, 75, 100);
- AsyncInvocation a3 = doAsyncPuts(vm3, uniqueName, 100, 150);
-
- a3.join();
- a2.join();
- a1.join();
- a0.join();
-
- cacheClose(vm0, false);
-
- // these gets should probably fetch the data from async queue
- verifyGetsForValue(vm1, uniqueName, 1, 50, false);
- verifyGetsForValue(vm2, uniqueName, 40, 100, false);
- verifyGetsForValue(vm3, uniqueName, 70, 150, false);
-
- // these gets wait for sometime before fetching the data. this will ensure
- // that
- // the reads are done from HDFS
- verifyGetsForValue(vm2, uniqueName, 1, 100, true);
- verifyGetsForValue(vm3, uniqueName, 40, 150, true);
-
- cacheClose(vm1, false);
- cacheClose(vm2, false);
- cacheClose(vm3, false);
-
- disconnectFromDS();
- }
-
- private int createPersistedServerRegion(final VM vm, final int totalnumOfBuckets,
- final int batchSize, final int batchInterval, final int maximumEntries,
- final String folderPath, final String uniqueName) throws IOException {
-
- return (Integer) vm.invoke(new PersistedRegionCreation(vm, totalnumOfBuckets,
- batchSize, batchInterval, maximumEntries, folderPath, uniqueName));
- }
- private AsyncInvocation createAsyncPersistedServerRegion(final VM vm, final int totalnumOfBuckets,
- final int batchSize, final int batchInterval, final int maximumEntries, final String folderPath,
- final String uniqueName) throws IOException {
-
- return (AsyncInvocation) vm.invokeAsync(new PersistedRegionCreation(vm, totalnumOfBuckets,
- batchSize, batchInterval, maximumEntries, folderPath, uniqueName));
- }
-
- class PersistedRegionCreation extends SerializableCallable {
- private VM vm;
- private int totalnumOfBuckets;
- private int batchSize;
- private int maximumEntries;
- private String folderPath;
- private String uniqueName;
- private int batchInterval;
-
- PersistedRegionCreation(final VM vm, final int totalnumOfBuckets,
- final int batchSize, final int batchInterval, final int maximumEntries,
- final String folderPath, final String uniqueName) throws IOException {
- this.vm = vm;
- this.totalnumOfBuckets = totalnumOfBuckets;
- this.batchSize = batchSize;
- this.maximumEntries = maximumEntries;
- this.folderPath = new File(folderPath).getCanonicalPath();
- this.uniqueName = uniqueName;
- this.batchInterval = batchInterval;
- }
-
- public Object call() throws Exception {
-
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
-
- af.setPartitionAttributes(paf.create());
-
- HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
- hsf.setHomeDir(folderPath);
- homeDir = folderPath; // for clean-up in tearDown2()
- hsf.setBatchSize(batchSize);
- hsf.setBatchInterval(batchInterval);
- hsf.setBufferPersistent(true);
- hsf.setDiskStoreName(uniqueName + vm.getPid());
-
- getCache().createDiskStoreFactory().create(uniqueName + vm.getPid());
-
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
- af.setHDFSStoreName(uniqueName);
- af.setHDFSWriteOnly(false);
-
- hsf.create(uniqueName);
-
- createRootRegion(uniqueName, af.create());
-
- return 0;
- }
- };
-
- private int createServerRegion(final VM vm, final int totalnumOfBuckets,
- final int batchSize, final int batchInterval, final int maximumEntries,
- final String folderPath, final String uniqueName) {
- SerializableCallable createRegion = new SerializableCallable() {
- public Object call() throws Exception {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
- af.setPartitionAttributes(paf.create());
-
- HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
- homeDir = new File(folderPath).getCanonicalPath();
- hsf.setHomeDir(homeDir);
- hsf.setBatchSize(batchSize);
- hsf.setBatchInterval(batchInterval);
- hsf.setBufferPersistent(false);
- hsf.setMaxMemory(1);
- hsf.create(uniqueName);
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-
- af.setHDFSWriteOnly(false);
- af.setHDFSStoreName(uniqueName);
- createRootRegion(uniqueName, af.create());
-
- return 0;
- }
- };
-
- return (Integer) vm.invoke(createRegion);
- }
-
- private void cacheClose(VM vm, final boolean sleep) {
- vm.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- if (sleep)
- Thread.sleep(2000);
- getCache().getLogger().info("Cache close in progress ");
- getCache().close();
- getCache().getDistributedSystem().disconnect();
- getCache().getLogger().info("Cache closed");
- return null;
- }
- });
-
- }
-
- private void doPuts(VM vm, final String regionName, final int start, final int end) throws Exception {
- vm.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion(regionName);
- getCache().getLogger().info("Putting entries ");
- for (int i = start; i < end; i++) {
- r.put("K" + i, "V" + i);
- }
- return null;
- }
-
- });
- }
-
- private AsyncInvocation doAsyncPuts(VM vm, final String regionName,
- final int start, final int end) throws Exception {
- return vm.invokeAsync(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion(regionName);
- getCache().getLogger().info("Putting entries ");
- for (int i = start; i < end; i++) {
- r.put("K" + i, "V" + i);
- }
- return null;
- }
-
- });
- }
-
- private void verifyGetsForValue(VM vm, final String regionName, final int start, final int end, final boolean sleep) throws Exception {
- vm.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- if (sleep) {
- Thread.sleep(2000);
- }
- getCache().getLogger().info("Getting entries ");
- Region r = getRootRegion(regionName);
- for (int i = start; i < end; i++) {
- String k = "K" + i;
- Object s = r.get(k);
- String v = "V" + i;
- assertTrue("The expected key " + v+ " didn't match the received value " + s, v.equals(s));
- }
- return null;
- }
-
- });
-
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
deleted file mode 100644
index 5a58dc5..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
+++ /dev/null
@@ -1,1594 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.File;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.Delta;
-import com.gemstone.gemfire.InvalidDeltaException;
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.internal.cache.DistributedPutAllOperation;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
-import dunit.AsyncInvocation;
-import dunit.DistributedTestCase;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.SerializableRunnable;
-import dunit.VM;
-
-/**
- * A class for testing the basic HDFS functionality
- *
- * @author Hemant Bhanawat
- */
-@SuppressWarnings({ "serial", "rawtypes", "deprecation", "unchecked", "unused" })
-public class RegionWithHDFSBasicDUnitTest extends RegionWithHDFSTestBase {
-
- private static final Logger logger = LogService.getLogger();
-
- private ExpectedException ee0;
- private ExpectedException ee1;
-
- public RegionWithHDFSBasicDUnitTest(String name) {
- super(name);
- }
-
- public void setUp() throws Exception {
- super.setUp();
- ee0 = DistributedTestCase.addExpectedException("com.gemstone.gemfire.cache.RegionDestroyedException");
- ee1 = DistributedTestCase.addExpectedException("com.gemstone.gemfire.cache.RegionDestroyedException");
- }
-
- public void tearDown2() throws Exception {
- ee0.remove();
- ee1.remove();
- super.tearDown2();
- }
-
- @Override
- protected SerializableCallable getCreateRegionCallable(
- final int totalnumOfBuckets, final int batchSizeMB,
- final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval,
- final boolean queuePersistent, final boolean writeonly,
- final long timeForRollover, final long maxFileSize) {
- SerializableCallable createRegion = new SerializableCallable("Create HDFS region") {
- public Object call() throws Exception {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
-
- af.setHDFSStoreName(uniqueName);
- af.setPartitionAttributes(paf.create());
-
- HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
- // Going two level up to avoid home directories getting created in
- // VM-specific directory. This avoids failures in those tests where
- // datastores are restarted and bucket ownership changes between VMs.
- homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
- logger.info("Setting homeDir to {}", homeDir);
- hsf.setHomeDir(homeDir);
- hsf.setBatchSize(batchSizeMB);
- hsf.setBufferPersistent(queuePersistent);
- hsf.setMaxMemory(3);
- hsf.setBatchInterval(batchInterval);
- if (timeForRollover != -1) {
- hsf.setWriteOnlyFileRolloverInterval((int) timeForRollover);
- System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
- }
- if (maxFileSize != -1) {
- hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
- }
- hsf.create(uniqueName);
-
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-
- af.setHDFSWriteOnly(writeonly);
- Region r = createRootRegion(uniqueName, af.create());
- ((LocalRegion) r).setIsTest();
-
- return 0;
- }
- };
- return createRegion;
- }
-
- @Override
- protected void doPuts(final String uniqueName, int start, int end) {
- Region r = getRootRegion(uniqueName);
- for (int i = start; i < end; i++) {
- r.put("K" + i, "V" + i);
- }
- }
-
- @Override
- protected void doPutAll(final String uniqueName, Map map) {
- Region r = getRootRegion(uniqueName);
- r.putAll(map);
- }
-
- @Override
- protected void doDestroys(final String uniqueName, int start, int end) {
- Region r = getRootRegion(uniqueName);
- for (int i = start; i < end; i++) {
- r.destroy("K" + i);
- }
- }
-
- @Override
- protected void checkWithGet(String uniqueName, int start, int end, boolean expectValue) {
- Region r = getRootRegion(uniqueName);
- for (int i = start; i < end; i++) {
- String expected = expectValue ? "V" + i : null;
- assertEquals("Mismatch on key " + i, expected, r.get("K" + i));
- }
- }
-
- @Override
- protected void checkWithGetAll(String uniqueName, ArrayList arrayl) {
- Region r = getRootRegion(uniqueName);
- Map map = r.getAll(arrayl);
- logger.info("Read entries {}", map.size());
- for (Object e : map.keySet()) {
- String v = e.toString().replaceFirst("K", "V");
- assertTrue( "Reading entries failed for key " + e + " where value = " + map.get(e), v.equals(map.get(e)));
- }
- }
-
- /**
- * Tests if gets go to primary even if the value resides on secondary.
- */
- public void testValueFetchedFromLocal() {
- disconnectFromDS();
-
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- String homeDir = "./testValueFetchedFromLocal";
-
- createServerRegion(vm0, 7, 1, 50, homeDir, "testValueFetchedFromLocal", 1000);
- createServerRegion(vm1, 7, 1, 50, homeDir, "testValueFetchedFromLocal", 1000);
-
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testValueFetchedFromLocal");
- for (int i = 0; i < 25; i++) {
- r.put("K" + i, "V" + i);
- }
- return null;
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testValueFetchedFromLocal");
- for (int i = 0; i < 25; i++) {
- String s = null;
- String k = "K" + i;
- s = (String) r.get(k);
- String v = "V" + i;
- assertTrue( "The expected key " + v+ " didn't match the received value " + s, v.equals(s));
- }
- // with only two members and 1 redundant copy, we will have all data locally, make sure that some
- // get operations results in a remote get operation
- assertTrue( "gets should always go to primary, ", ((LocalRegion)r).getCountNotFoundInLocal() != 0 );
- return null;
- }
- });
-
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testValueFetchedFromLocal");
- assertTrue( "HDFS queue or HDFS should not have been accessed. They were accessed " + ((LocalRegion)r).getCountNotFoundInLocal() + " times",
- ((LocalRegion)r).getCountNotFoundInLocal() == 0 );
- return null;
- }
- });
- }
-
- public void testHDFSQueueSizeTest() {
- disconnectFromDS();
-
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- String homeDir = "./testHDFSQueueSize";
-
- createServerRegion(vm0, 1, 10, 50, homeDir, "testHDFSQueueSize", 100000);
- createServerRegion(vm1, 1, 10, 50, homeDir, "testHDFSQueueSize", 100000);
-
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testHDFSQueueSize");
- byte[] b = new byte[1024];
- byte[] k = new byte[1];
- for (int i = 0; i < 1; i++) {
- r.put(k, b);
- }
- ConcurrentParallelGatewaySenderQueue hdfsqueue = (ConcurrentParallelGatewaySenderQueue)((AbstractGatewaySender)((PartitionedRegion)r).getHDFSEventQueue().getSender()).getQueue();
- HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hdfsqueue.getRegion()).getDataStore().getLocalBucketById(0);
- if (hdfsBQ.getBucketAdvisor().isPrimary()) {
- assertTrue("size should not as expected on primary " + hdfsBQ.queueSizeInBytes.get(), hdfsBQ.queueSizeInBytes.get() > 1024 && hdfsBQ.queueSizeInBytes.get() < 1150);
- } else {
- assertTrue("size should be 0 on secondary", hdfsBQ.queueSizeInBytes.get()==0);
- }
- return null;
-
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testHDFSQueueSize");
- ConcurrentParallelGatewaySenderQueue hdfsqueue = (ConcurrentParallelGatewaySenderQueue)((AbstractGatewaySender)((PartitionedRegion)r).getHDFSEventQueue().getSender()).getQueue();
- HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hdfsqueue.getRegion()).getDataStore().getLocalBucketById(0);
- if (hdfsBQ.getBucketAdvisor().isPrimary()) {
- assertTrue("size should not as expected on primary " + hdfsBQ.queueSizeInBytes.get(), hdfsBQ.queueSizeInBytes.get() > 1024 && hdfsBQ.queueSizeInBytes.get() < 1150);
- } else {
- assertTrue("size should be 0 on secondary", hdfsBQ.queueSizeInBytes.get()==0);
- }
- return null;
-
- }
- });
- }
-
- /**
- * Does put for write only HDFS store
- */
- public void testBasicPutsForWriteOnlyHDFSStore() {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- String homeDir = "./testPutsForWriteOnlyHDFSStore";
-
- createServerRegion(vm0, 7, 1, 20, homeDir, "testPutsForWriteOnlyHDFSStore",
- 100, true, false);
- createServerRegion(vm1, 7, 1, 20, homeDir, "testPutsForWriteOnlyHDFSStore",
- 100, true, false);
-
- // Do some puts
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testPutsForWriteOnlyHDFSStore");
- for (int i = 0; i < 200; i++) {
- r.put("K" + i, "V" + i);
- }
- return null;
- }
- });
-
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testPutsForWriteOnlyHDFSStore");
-
- for (int i = 200; i < 400; i++) {
- r.put("K" + i, "V" + i);
- }
-
- return null;
- }
- });
-
- }
-
- /**
- * Does put for write only HDFS store
- */
- public void testDelta() {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- String homeDir = "./testDelta";
-
- // Expected from com.gemstone.gemfire.internal.cache.ServerPingMessage.send()
- ExpectedException ee1 = DistributedTestCase.addExpectedException("java.lang.InterruptedException");
- ExpectedException ee2 = DistributedTestCase.addExpectedException("java.lang.InterruptedException");
-
- createServerRegion(vm0, 7, 1, 20, homeDir, "testDelta", 100);
- createServerRegion(vm1, 7, 1, 20, homeDir, "testDelta", 100);
-
- // Do some puts
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testDelta");
- for (int i = 0; i < 100; i++) {
- r.put("K" + i, new CustomerDelta("V" + i, "address"));
- }
- for (int i = 0; i < 50; i++) {
- CustomerDelta cd = new CustomerDelta("V" + i, "address");
- cd.setAddress("updated address");
- r.put("K" + i, cd);
- }
- return null;
- }
- });
-
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testDelta");
-
- for (int i = 100; i < 200; i++) {
- r.put("K" + i, new CustomerDelta("V" + i, "address"));
- }
- for (int i = 100; i < 150; i++) {
- CustomerDelta cd = new CustomerDelta("V" + i, "address");
- cd.setAddress("updated address");
- r.put("K" + i, cd);
- }
-
- return null;
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testDelta");
- for (int i = 0; i < 50; i++) {
- CustomerDelta custDela = new CustomerDelta ("V" + i, "updated address" );
- String k = "K" + i;
- CustomerDelta s = (CustomerDelta) r.get(k);
-
- assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
- }
- for (int i = 50; i < 100; i++) {
- CustomerDelta custDela = new CustomerDelta("V" + i, "address");
- String k = "K" + i;
- CustomerDelta s = (CustomerDelta) r.get(k);
-
- assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
- }
- for (int i = 100; i < 150; i++) {
- CustomerDelta custDela = new CustomerDelta ("V" + i, "updated address" );
- String k = "K" + i;
- CustomerDelta s = (CustomerDelta) r.get(k);
-
- assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
- }
- for (int i = 150; i < 200; i++) {
- CustomerDelta custDela = new CustomerDelta ("V" + i, "address" );
- String k = "K" + i;
- CustomerDelta s = (CustomerDelta) r.get(k);
-
- assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
- }
- return null;
- }
- });
- ee1.remove();
- ee2.remove();
-
- }
-
- /**
- * Puts byte arrays and fetches them back to ensure that serialization of byte
- * arrays is proper
- *
- */
- public void testByteArrays() {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- String homeDir = "./testByteArrays";
-
- createServerRegion(vm0, 7, 1, 20, homeDir, "testByteArrays", 100);
- createServerRegion(vm1, 7, 1, 20, homeDir, "testByteArrays", 100);
-
- // Do some puts
- vm0.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testByteArrays");
- byte[] b1 = { 0x11, 0x44, 0x77 };
- byte[] b2 = { 0x22, 0x55 };
- byte[] b3 = { 0x33 };
- for (int i = 0; i < 100; i++) {
- int x = i % 3;
- if (x == 0) {
- r.put("K" + i, b1);
- } else if (x == 1) {
- r.put("K" + i, b2);
- } else {
- r.put("K" + i, b3);
- }
- }
- return null;
- }
- });
-
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testByteArrays");
-
- byte[] b1 = { 0x11, 0x44, 0x77 };
- byte[] b2 = { 0x22, 0x55 };
- byte[] b3 = { 0x33 };
- for (int i = 100; i < 200; i++) {
- int x = i % 3;
- if (x == 0) {
- r.put("K" + i, b1);
- } else if (x == 1) {
- r.put("K" + i, b2);
- } else {
- r.put("K" + i, b3);
- }
- }
- return null;
- }
- });
- vm1.invoke(new SerializableCallable() {
- public Object call() throws Exception {
- Region r = getRootRegion("testByteArrays");
- byte[] b1 = { 0x11, 0x44, 0x77 };
- byte[] b2 = { 0x22, 0x55 };
- byte[] b3 = { 0x33 };
- for (int i = 0; i < 200; i++) {
- int x = i % 3;
- String k = "K" + i;
- byte[] s = (byte[]) r.get(k);
- if (x == 0) {
- assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b1, s));
- } else if (x == 1) {
- assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b2, s));
- } else {
- assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b3, s));
- }
-
- }
- return null;
- }
- });
- }
-
- private static class CustomerDelta implements Serializable, Delta {
- private String name;
- private String address;
- private boolean nameChanged;
- private boolean addressChanged;
-
- public CustomerDelta(CustomerDelta o) {
- this.address = o.address;
- this.name = o.name;
- }
-
- public CustomerDelta(String name, String address) {
- this.name = name;
- this.address = address;
- }
-
- public void fromDelta(DataInput in) throws IOException,
- InvalidDeltaException {
- boolean nameC = in.readBoolean();
- if (nameC) {
- this.name = in.readUTF();
- }
- boolean addressC = in.readBoolean();
- if (addressC) {
- this.address = in.readUTF();
- }
- }
-
- public boolean hasDelta() {
- return nameChanged || addressChanged;
- }
-
- public void toDelta(DataOutput out) throws IOException {
- out.writeBoolean(nameChanged);
- if (this.nameChanged) {
- out.writeUTF(name);
- }
- out.writeBoolean(addressChanged);
- if (this.addressChanged) {
- out.writeUTF(address);
- }
- }
-
- public void setName(String name) {
- this.nameChanged = true;
- this.name = name;
- }
-
- public String getName() {
- return name;
- }
-
- public void setAddress(String address) {
- this.addressChanged = true;
- this.address = address;
- }
-
- public String getAddress() {
- return address;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (!(obj instanceof CustomerDelta)) {
- return false;
- }
- CustomerDelta other = (CustomerDelta) obj;
- return this.name.equals(other.name) && this.address.equals(other.address);
- }
-
- @Override
- public int hashCode() {
- return this.address.hashCode() + this.name.hashCode();
- }
-
- @Override
- public String toString() {
- return "name=" + this.name + "address=" + address;
- }
- }
-
- public void testClearRegionDataInQueue() throws Throwable {
- doTestClearRegion(100000, false);
-
- }
-
- public void testClearRegionDataInHDFS() throws Throwable {
- doTestClearRegion(1, true);
- }
-
- public void doTestClearRegion(int batchInterval, boolean waitForWriteToHDFS) throws Throwable {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final int numEntries = 400;
-
- String name = getName();
- final String folderPath = "./" + name;
- // Create some regions. Note that we want a large batch interval
- // so that we will have some entries sitting in the queue when
- // we do a clear.
- final String uniqueName = name;
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, batchInterval,
- false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, batchInterval,
- false, true);
-
- doPuts(vm0, uniqueName, numEntries);
-
- // Make sure some files have been written to hdfs.
- if (waitForWriteToHDFS) {
- verifyDataInHDFS(vm0, uniqueName, true, true, waitForWriteToHDFS, numEntries);
- }
-
- // Do a clear
- simulateClear(uniqueName, vm0, vm1);
-
- validateEmpty(vm0, numEntries, uniqueName);
- validateEmpty(vm1, numEntries, uniqueName);
-
- // Double check that there is no data in hdfs now
- verifyDataInHDFS(vm0, uniqueName, false, false, waitForWriteToHDFS, numEntries);
- verifyDataInHDFS(vm1, uniqueName, false, false, waitForWriteToHDFS, numEntries);
-
- closeCache(vm0);
- closeCache(vm1);
-
- AsyncInvocation async0 = createServerRegionAsync(vm0, 7, 31, 200, folderPath,
- uniqueName, 100000, false, true);
- AsyncInvocation async1 = createServerRegionAsync(vm1, 7, 31, 200, folderPath,
- uniqueName, 100000, false, true);
- async0.getResult();
- async1.getResult();
-
- validateEmpty(vm0, numEntries, uniqueName);
- validateEmpty(vm1, numEntries, uniqueName);
- }
-
- private void simulateClear(final String name, VM... vms) throws Throwable {
- simulateClearForTests(true);
- try {
-
- // Gemfire PRs don't support clear
- // gemfirexd does a clear by taking gemfirexd ddl locks
- // and then clearing each primary bucket on the primary.
- // Simulate that by clearing all primaries on each vm.
- // See GemFireContainer.clear
-
- SerializableCallable clear = new SerializableCallable("clear") {
- public Object call() throws Exception {
- PartitionedRegion r = (PartitionedRegion) getRootRegion(name);
-
- r.clearLocalPrimaries();
-
- return null;
- }
- };
-
- // Invoke the clears concurrently
- AsyncInvocation[] async = new AsyncInvocation[vms.length];
- for (int i = 0; i < vms.length; i++) {
- async[i] = vms[i].invokeAsync(clear);
- }
-
- // Get the clear results.
- for (int i = 0; i < async.length; i++) {
- async[i].getResult();
- }
-
- } finally {
- simulateClearForTests(false);
- }
- }
-
- protected void simulateClearForTests(final boolean isGfxd) {
- SerializableRunnable setGfxd = new SerializableRunnable() {
- @Override
- public void run() {
- if (isGfxd) {
- LocalRegion.simulateClearForTests(true);
- } else {
- LocalRegion.simulateClearForTests(false);
- }
- }
- };
- setGfxd.run();
- invokeInEveryVM(setGfxd);
- }
-
- /**
- * Test that we can locally destroy a member, without causing problems with
- * the data in HDFS. This was disabled due to ticket 47793.
- *
- * @throws InterruptedException
- */
- public void testLocalDestroy() throws InterruptedException {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- int numEntries = 200;
-
- final String folderPath = "./testLocalDestroy";
- final String uniqueName = "testLocalDestroy";
-
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
- doPuts(vm0, uniqueName, numEntries);
-
- // Make sure some files have been written to hdfs and wait for
- // the queue to drain.
- verifyDataInHDFS(vm0, uniqueName, true, true, true, numEntries);
-
- validate(vm0, uniqueName, numEntries);
-
- SerializableCallable localDestroy = new SerializableCallable("local destroy") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
- r.localDestroyRegion();
- return null;
- }
- };
-
- vm0.invoke(localDestroy);
-
- verifyNoQOrPR(vm0);
-
- validate(vm1, uniqueName, numEntries);
-
- vm1.invoke(localDestroy);
-
- verifyNoQOrPR(vm1);
-
- closeCache(vm0);
- closeCache(vm1);
-
- // Restart vm0 and see if the data is still available from HDFS
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
- validate(vm0, uniqueName, numEntries);
- }
-
- /**
- * Test that doing a destroyRegion removes all data from HDFS.
- *
- * @throws InterruptedException
- */
- public void testGlobalDestroyWithHDFSData() throws InterruptedException {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final String folderPath = "./testGlobalDestroyWithHDFSData";
- final String uniqueName = "testGlobalDestroyWithHDFSData";
- int numEntries = 200;
-
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
- doPuts(vm0, uniqueName, numEntries);
-
- // Make sure some files have been written to hdfs.
- verifyDataInHDFS(vm0, uniqueName, true, true, false, numEntries);
-
- SerializableCallable globalDestroy = new SerializableCallable("destroy") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
- r.destroyRegion();
- return null;
- }
- };
-
- vm0.invoke(globalDestroy);
-
- // make sure data is not in HDFS
- verifyNoQOrPR(vm0);
- verifyNoQOrPR(vm1);
- verifyNoHDFSData(vm0, uniqueName);
- verifyNoHDFSData(vm1, uniqueName);
-
- closeCache(vm0);
- closeCache(vm1);
-
- // Restart vm0 and make sure it's still empty
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
- // make sure it's empty
- validateEmpty(vm0, numEntries, uniqueName);
- validateEmpty(vm1, numEntries, uniqueName);
-
- }
-
- /**
- * Test that doing a destroyRegion removes all data from HDFS.
- */
- public void _testGlobalDestroyWithQueueData() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- final String folderPath = "./testGlobalDestroyWithQueueData";
- final String uniqueName = "testGlobalDestroyWithQueueData";
- int numEntries = 200;
-
- // set a large queue timeout so that data is still in the queue
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 10000, false,
- true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 10000, false,
- true);
-
- doPuts(vm0, uniqueName, numEntries);
-
- SerializableCallable globalDestroy = new SerializableCallable("destroy") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
- r.destroyRegion();
- return null;
- }
- };
-
- vm0.invoke(globalDestroy);
-
- // make sure data is not in HDFS
- verifyNoQOrPR(vm0);
- verifyNoQOrPR(vm1);
- verifyNoHDFSData(vm0, uniqueName);
- verifyNoHDFSData(vm1, uniqueName);
-
- closeCache(vm0);
- closeCache(vm1);
-
- // Restart vm0 and make sure it's still empty
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
- // make sure it's empty
- validateEmpty(vm0, numEntries, uniqueName);
- validateEmpty(vm1, numEntries, uniqueName);
-
- }
-
- /**
- * Make sure all async event queues and PRs a destroyed in a member
- */
- public void verifyNoQOrPR(VM vm) {
- vm.invoke(new SerializableRunnable() {
- @Override
- public void run() {
- GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
- assertEquals(Collections.EMPTY_SET, cache.getAsyncEventQueues());
- assertEquals(Collections.EMPTY_SET, cache.getPartitionedRegions());
- }
- });
-
- }
-
- /**
- * Make sure all of the data for a region in HDFS is destroyed
- */
- public void verifyNoHDFSData(final VM vm, final String uniqueName) {
- vm.invoke(new SerializableCallable() {
- @Override
- public Object call() throws IOException {
- HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
- FileSystem fs = hdfsStore.getFileSystem();
- Path path = new Path(hdfsStore.getHomeDir(), uniqueName);
- if (fs.exists(path)) {
- dumpFiles(vm, uniqueName);
- fail("Found files in " + path);
- }
- return null;
- }
- });
- }
-
- protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
- final int start, final int end, final String suffix) throws Exception {
- return doAsyncPuts(vm, regionName, start, end, suffix, "");
- }
-
- protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
- final int start, final int end, final String suffix, final String value)
- throws Exception {
- return vm.invokeAsync(new SerializableCallable("doAsyncPuts") {
- public Object call() throws Exception {
- Region r = getRootRegion(regionName);
- String v = "V";
- if (!value.equals("")) {
- v = value;
- }
- logger.info("Putting entries ");
- for (int i = start; i < end; i++) {
- r.put("K" + i, v + i + suffix);
- }
- return null;
- }
-
- });
- }
-
- public void _testGlobalDestroyFromAccessor() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
-
- final String folderPath = "./testGlobalDestroyFromAccessor";
- final String uniqueName = "testGlobalDestroyFromAccessor";
- int numEntries = 200;
-
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerAccessor(vm2, 7, 40, uniqueName);
-
- doPuts(vm0, uniqueName, numEntries);
-
- // Make sure some files have been written to hdfs.
- verifyDataInHDFS(vm0, uniqueName, true, true, false, numEntries);
-
- SerializableCallable globalDestroy = new SerializableCallable("destroy") {
- public Object call() throws Exception {
- Region r = getRootRegion(uniqueName);
- r.destroyRegion();
- return null;
- }
- };
-
- // Destroy the region from an accessor
- vm2.invoke(globalDestroy);
-
- // make sure data is not in HDFS
- verifyNoQOrPR(vm0);
- verifyNoQOrPR(vm1);
- verifyNoHDFSData(vm0, uniqueName);
- verifyNoHDFSData(vm1, uniqueName);
-
- closeCache(vm0);
- closeCache(vm1);
- closeCache(vm2);
-
- // Restart vm0 and make sure it's still empty
- createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
- createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
- // make sure it's empty
- validateEmpty(vm0, numEntries, uniqueName);
- validateEmpty(vm1, numEntries, uniqueName);
- }
-
- /**
- * create a server with maxfilesize as 2 MB. Insert 4 entries of 1 MB each.
- * There should be 2 files with 2 entries each.
- *
- * @throws Throwable
- */
- public void testWOFileSizeParam() throws Throwable {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- String homeDir = "./testWOFileSizeParam";
- final String uniqueName = getName();
- String value = "V";
- for (int i = 0; i < 20; i++) {
- value += value;
- }
-
- createServerRegion(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 2000, 2);
- createServerRegion(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 2000, 2);
-
- AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 3, "vm0", value);
- AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 2, 4, "vm1", value);
-
- a1.join();
- a2.join();
-
- Thread.sleep(4000);
-
- cacheClose(vm0, false);
- cacheClose(vm1, false);
-
- // Start the VMs in parallel for the persistent version subclass
- AsyncInvocation async1 = createServerRegionAsync(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 2000, 2);
- AsyncInvocation async2 = createServerRegionAsync(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 2000, 2);
- async1.getResult();
- async2.getResult();
-
- // There should be two files in bucket 0.
- verifyTwoHDFSFilesWithTwoEntries(vm0, uniqueName, value);
-
- cacheClose(vm0, false);
- cacheClose(vm1, false);
-
- disconnectFromDS();
-
- }
-
- /**
- * Create server with file rollover time as 5 seconds. Insert few entries and
- * then sleep for 7 seconds. A file should be created. Do it again. At the end, two
- * files with inserted entries should be created.
- *
- * @throws Throwable
- */
- public void testWOTimeForRollOverParam() throws Throwable {
- disconnectFromDS();
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- String homeDir = "./testWOTimeForRollOverParam";
- final String uniqueName = getName();
-
- createServerRegion(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
- createServerRegion(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
-
- AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 8, "vm0");
- AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 4, 10, "vm1");
-
- a1.join();
- a2.join();
-
- Thread.sleep(7000);
-
- a1 = doAsyncPuts(vm0, uniqueName, 10, 18, "vm0");
- a2 = doAsyncPuts(vm1, uniqueName, 14, 20, "vm1");
-
- a1.join();
- a2.join();
-
- Thread.sleep(7000);
-
- cacheClose(vm0, false);
- cacheClose(vm1, false);
-
- AsyncInvocation async1 = createServerRegionAsync(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
- AsyncInvocation async2 = createServerRegionAsync(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
- async1.getResult();
- async2.getResult();
-
- // There should be two files in bucket 0.
- // Each should have entry 1 to 10 and duplicate from 4 to 7
- verifyTwoHDFSFiles(vm0, uniqueName);
-
- cacheClose(vm0, false);
- cacheClose(vm1, false);
-
- disconnectFromDS();
-
- }
-
- private void createServerAccessor(VM vm, final int totalnumOfBuckets,
- final int maximumEntries, final String uniqueName) {
- SerializableCallable createRegion = new SerializableCallable() {
- public Object call() throws Exception {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
- // make this member an accessor.
- paf.setLocalMaxMemory(0);
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
- af.setPartitionAttributes(paf.create());
-
- Region r = createRootRegion(uniqueName, af.create());
- assertTrue(!((PartitionedRegion) r).isDataStore());
-
- return null;
- }
- };
-
- vm.invoke(createRegion);
- }
-
- @Override
- protected void verifyHDFSData(VM vm, String uniqueName) throws Exception {
-
- HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
- HashMap<String, String> entriesMap = new HashMap<String, String>();
- for (HashMap<String, String> v : filesToEntriesMap.values()) {
- entriesMap.putAll(v);
- }
- verifyInEntriesMap(entriesMap, 1, 50, "vm0");
- verifyInEntriesMap(entriesMap, 40, 100, "vm1");
- verifyInEntriesMap(entriesMap, 40, 100, "vm2");
- verifyInEntriesMap(entriesMap, 90, 150, "vm3");
-
- }
-
- protected void verifyTwoHDFSFiles(VM vm, String uniqueName) throws Exception {
-
- HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
-
- assertTrue("there should be exactly two files, but there are "
- + filesToEntriesMap.size(), filesToEntriesMap.size() == 2);
- long timestamp = Long.MAX_VALUE;
- String olderFile = null;
- for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
- .entrySet()) {
- String fileName = e.getKey().substring(
- 0,
- e.getKey().length()
- - AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION.length());
- long newTimeStamp = Long.parseLong(fileName.substring(
- fileName.indexOf("-") + 1, fileName.lastIndexOf("-")));
- if (newTimeStamp < timestamp) {
- olderFile = e.getKey();
- timestamp = newTimeStamp;
- }
- }
- verifyInEntriesMap(filesToEntriesMap.get(olderFile), 1, 8, "vm0");
- verifyInEntriesMap(filesToEntriesMap.get(olderFile), 4, 10, "vm1");
- filesToEntriesMap.remove(olderFile);
- verifyInEntriesMap(filesToEntriesMap.values().iterator().next(), 10, 18, "vm0");
- verifyInEntriesMap(filesToEntriesMap.values().iterator().next(), 14, 20, "vm1");
- }
-
- protected void verifyTwoHDFSFilesWithTwoEntries(VM vm, String uniqueName,
- String value) throws Exception {
-
- HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
-
- assertTrue( "there should be exactly two files, but there are " + filesToEntriesMap.size(), filesToEntriesMap.size() == 2);
- HashMap<String, String> entriesMap = new HashMap<String, String>();
- for (HashMap<String, String> v : filesToEntriesMap.values()) {
- entriesMap.putAll(v);
- }
- assertTrue( "Expected key K1 received " + entriesMap.get(value+ "1vm0"), entriesMap.get(value+ "1vm0").equals("K1"));
- assertTrue( "Expected key K2 received " + entriesMap.get(value+ "2vm0"), entriesMap.get(value+ "2vm0").equals("K2"));
- assertTrue( "Expected key K2 received " + entriesMap.get(value+ "2vm1"), entriesMap.get(value+ "2vm1").equals("K2"));
- assertTrue( "Expected key K3 received " + entriesMap.get(value+ "3vm1"), entriesMap.get(value+ "3vm1").equals("K3"));
- }
-
- /**
- * verify that a PR accessor can be started
- */
- public void testPRAccessor() {
- Host host = Host.getHost(0);
- VM accessor = host.getVM(0);
- VM datastore1 = host.getVM(1);
- VM datastore2 = host.getVM(2);
- VM accessor2 = host.getVM(3);
- final String regionName = getName();
- final String storeName = "store_" + regionName;
-
- SerializableCallable createRegion = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory();
- homeDir = new File("../" + regionName).getCanonicalPath();
- storefactory.setHomeDir(homeDir);
- storefactory.create(storeName);
- AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- af.setHDFSStoreName(storeName);
- Region r = getCache().createRegionFactory(af.create()).create(regionName);
- r.put("key1", "value1");
- return null;
- }
- };
-
- SerializableCallable createAccessorRegion = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory();
- homeDir = new File("../" + regionName).getCanonicalPath();
- storefactory.setHomeDir(homeDir);
- storefactory.create(storeName);
- // DataPolicy PARTITION with localMaxMemory 0 cannot be created
- AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
- af.setDataPolicy(DataPolicy.PARTITION);
- PartitionAttributesFactory<Integer, String> paf = new PartitionAttributesFactory<Integer, String>();
- paf.setLocalMaxMemory(0);
- af.setPartitionAttributes(paf.create());
- // DataPolicy PARTITION with localMaxMemory 0 can be created if hdfsStoreName is set
- af.setHDFSStoreName(storeName);
- // No need to check with different storeNames (can never be done in GemFireXD)
- Region r = getCache().createRegionFactory(af.create()).create(regionName);
- r.localDestroyRegion();
- // DataPolicy HDFS_PARTITION with localMaxMemory 0 can be created
- af = new AttributesFactory<Integer, String>();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- af.setPartitionAttributes(paf.create());
- getCache().createRegionFactory(af.create()).create(regionName);
- return null;
- }
- };
-
- datastore1.invoke(createRegion);
- accessor.invoke(createAccessorRegion);
- datastore2.invoke(createRegion);
- accessor2.invoke(createAccessorRegion);
- }
-
- /**
- * verify that PUT dml does not read from hdfs
- */
- public void testPUTDMLSupport() {
- doPUTDMLWork(false);
- }
-
- public void testPUTDMLBulkSupport() {
- doPUTDMLWork(true);
- }
-
- private void doPUTDMLWork(final boolean isPutAll) {
- Host host = Host.getHost(0);
- VM vm1 = host.getVM(0);
- VM vm2 = host.getVM(1);
- final String regionName = getName();
-
- createServerRegion(vm1, 7, 1, 50, "./" + regionName, regionName, 1000);
- createServerRegion(vm2, 7, 1, 50, "./" + regionName, regionName, 1000);
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- LocalRegion lr = (LocalRegion) r;
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- long readsFromHDFS = stats.getRead().getCount();
- assertEquals(0, readsFromHDFS);
- if (isPutAll) {
- Map m = new HashMap();
- // map with only one entry
- m.put("key0", "value0");
- DistributedPutAllOperation ev = lr.newPutAllOperation(m, null);
- lr.basicPutAll(m, ev, null);
- m.clear();
- // map with multiple entries
- for (int i = 1; i < 100; i++) {
- m.put("key" + i, "value" + i);
- }
- ev = lr.newPutAllOperation(m, null);
- lr.basicPutAll(m, ev, null);
- } else {
- for (int i = 0; i < 100; i++) {
- r.put("key" + i, "value" + i);
- }
- }
- return null;
- }
- });
-
- SerializableCallable getHDFSReadCount = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- return stats.getRead().getCount();
- }
- };
-
- long vm1Count = (Long) vm1.invoke(getHDFSReadCount);
- long vm2Count = (Long) vm2.invoke(getHDFSReadCount);
- assertEquals(100, vm1Count + vm2Count);
-
- pause(10 * 1000);
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- // do puts using the new api
- LocalRegion lr = (LocalRegion) getCache().getRegion(regionName);
- if (isPutAll) {
- Map m = new HashMap();
- // map with only one entry
- m.put("key0", "value0");
- DistributedPutAllOperation ev = lr.newPutAllForPUTDmlOperation(m, null);
- lr.basicPutAll(m, ev, null);
- m.clear();
- // map with multiple entries
- for (int i = 1; i < 200; i++) {
- m.put("key" + i, "value" + i);
- }
- ev = lr.newPutAllForPUTDmlOperation(m, null);
- lr.basicPutAll(m, ev, null);
- } else {
- for (int i = 0; i < 200; i++) {
- EntryEventImpl ev = lr.newPutEntryEvent("key" + i, "value" + i, null);
- lr.validatedPut(ev, System.currentTimeMillis());
- }
- }
- return null;
- }
- });
-
- // verify the stat for hdfs reads has not incremented
- vm1Count = (Long) vm1.invoke(getHDFSReadCount);
- vm2Count = (Long) vm2.invoke(getHDFSReadCount);
- assertEquals(100, vm1Count + vm2Count);
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- for (int i = 0; i < 200; i++) {
- assertEquals("value" + i, r.get("key" + i));
- }
- return null;
- }
- });
- }
-
- /**
- * verify that get on operational data does not read from HDFS
- */
- public void testGetOperationalData() {
- Host host = Host.getHost(0);
- VM vm1 = host.getVM(0);
- VM vm2 = host.getVM(1);
- final String regionName = getName();
-
- createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 1000);
- createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 1000);
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- long readsFromHDFS = stats.getRead().getCount();
- assertEquals(0, readsFromHDFS);
- for (int i = 0; i < 100; i++) {
- logger.info("SWAP:DOING PUT:key{}", i);
- r.put("key" + i, "value" + i);
- }
- return null;
- }
- });
-
- SerializableCallable getHDFSReadCount = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- return stats.getRead().getCount();
- }
- };
-
- long vm1Count = (Long) vm1.invoke(getHDFSReadCount);
- long vm2Count = (Long) vm2.invoke(getHDFSReadCount);
- assertEquals(100, vm1Count + vm2Count);
-
- pause(10 * 1000);
-
- // verify that get increments the read stat
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- for (int i = 0; i < 200; i++) {
- if (i < 100) {
- logger.info("SWAP:DOING GET:key", i);
- assertEquals("value" + i, r.get("key" + i));
- } else {
- assertNull(r.get("key" + i));
- }
- }
- return null;
- }
- });
-
- vm1Count = (Long) vm1.invoke(getHDFSReadCount);
- vm2Count = (Long) vm2.invoke(getHDFSReadCount);
- // initial 100 + 150 for get (since 50 are in memory)
- assertEquals(250, vm1Count + vm2Count);
-
- // do gets with readFromHDFS set to false
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- LocalRegion lr = (LocalRegion) r;
- int numEntries = 0;
- for (int i = 0; i < 200; i++) {
- logger.info("SWAP:DOING GET NO READ:key", i);
- Object val = lr.get("key"+i, null, true, false, false, null, null, false, false/*allowReadFromHDFS*/);
- if (val != null) {
- numEntries++;
- }
- }
- assertEquals(50, numEntries); // entries in memory
- return null;
- }
- });
-
- vm1Count = (Long) vm1.invoke(getHDFSReadCount);
- vm2Count = (Long) vm2.invoke(getHDFSReadCount);
- // get should not have incremented
- assertEquals(250, vm1Count + vm2Count);
-
- /**MergeGemXDHDFSToGFE Have not merged this API as this api is not called by any code*/
- /*
- // do gets using DataView
- SerializableCallable getUsingDataView = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- LocalRegion lr = (LocalRegion) r;
- PartitionedRegion pr = (PartitionedRegion) lr;
- long numEntries = 0;
- for (int i=0; i<200; i++) {
- InternalDataView idv = lr.getDataView();
- logger.debug("SWAP:DATAVIEW");
- Object val = idv.getLocally("key"+i, null, PartitionedRegionHelper.getHashKey(pr, "key"+i), lr, true, true, null, null, false, false);
- if (val != null) {
- numEntries++;
- }
- }
- return numEntries;
- }
- };
-
- vm1Count = (Long) vm1.invoke(getUsingDataView);
- vm2Count = (Long) vm2.invoke(getUsingDataView);
- assertEquals(50 * 2, vm1Count + vm2Count);// both VMs will find 50 entries*/
-
- vm1Count = (Long) vm1.invoke(getHDFSReadCount);
- vm2Count = (Long) vm2.invoke(getHDFSReadCount);
- // get should not have incremented
- assertEquals(250, vm1Count + vm2Count);
-
- }
-
- public void testSizeEstimate() {
- Host host = Host.getHost(0);
- VM vm1 = host.getVM(0);
- VM vm2 = host.getVM(1);
- VM vm3 = host.getVM(2);
- final String regionName = getName();
-
- createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 1000);
- createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 1000);
- createServerRegion(vm3, 7, 1, 50, "./"+regionName, regionName, 1000);
-
- final int size = 226;
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- // LocalRegion lr = (LocalRegion) r;
- for (int i = 0; i < size; i++) {
- r.put("key" + i, "value" + i);
- }
- // before flush
- // assertEquals(size, lr.sizeEstimate());
- return null;
- }
- });
-
- pause(10 * 1000);
-
- vm2.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- LocalRegion lr = (LocalRegion) r;
- logger.debug("SWAP:callingsizeEstimate");
- long estimate = lr.sizeEstimate();
- double err = Math.abs(estimate - size) / (double) size;
- System.out.println("SWAP:estimate:" + estimate);
- assertTrue(err < 0.2);
- return null;
- }
- });
- }
-
- public void testForceAsyncMajorCompaction() throws Exception {
- doForceCompactionTest(true, false);
- }
-
- public void testForceSyncMajorCompaction() throws Exception {
- // more changes
- doForceCompactionTest(true, true);
- }
-
- private void doForceCompactionTest(final boolean isMajor, final boolean isSynchronous) throws Exception {
- Host host = Host.getHost(0);
- VM vm1 = host.getVM(0);
- VM vm2 = host.getVM(1);
- VM vm3 = host.getVM(2);
- final String regionName = getName();
-
- createServerRegion(vm1, 7, 1, 50, "./" + regionName, regionName, 1000);
- createServerRegion(vm2, 7, 1, 50, "./" + regionName, regionName, 1000);
- createServerRegion(vm3, 7, 1, 50, "./" + regionName, regionName, 1000);
-
- SerializableCallable noCompaction = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- if (isMajor) {
- assertEquals(0, stats.getMajorCompaction().getCount());
- } else {
- assertEquals(0, stats.getMinorCompaction().getCount());
- }
- return null;
- }
- };
-
- vm1.invoke(noCompaction);
- vm2.invoke(noCompaction);
- vm3.invoke(noCompaction);
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- Region r = getCache().getRegion(regionName);
- for (int i = 0; i < 500; i++) {
- r.put("key" + i, "value" + i);
- if (i % 100 == 0) {
- // wait for flush
- pause(3000);
- }
- }
- pause(3000);
- PartitionedRegion pr = (PartitionedRegion) r;
- long lastCompactionTS = pr.lastMajorHDFSCompaction();
- assertEquals(0, lastCompactionTS);
- long beforeCompact = System.currentTimeMillis();
- pr.forceHDFSCompaction(true, isSynchronous ? 0 : 1);
- if (isSynchronous) {
- final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- assertTrue(stats.getMajorCompaction().getCount() > 0);
- assertTrue(pr.lastMajorHDFSCompaction() >= beforeCompact);
- }
- return null;
- }
- });
-
- if (!isSynchronous) {
- SerializableCallable verifyCompactionStat = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- waitForCriterion(new WaitCriterion() {
- @Override
- public boolean done() {
- return stats.getMajorCompaction().getCount() > 0;
- }
-
- @Override
- public String description() {
- return "Major compaction stat not > 0";
- }
- }, 30 * 1000, 1000, true);
- return null;
- }
- };
-
- vm1.invoke(verifyCompactionStat);
- vm2.invoke(verifyCompactionStat);
- vm3.invoke(verifyCompactionStat);
- } else {
- SerializableCallable verifyCompactionStat = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
- assertTrue(stats.getMajorCompaction().getCount() > 0);
- return null;
- }
- };
- vm2.invoke(verifyCompactionStat);
- vm3.invoke(verifyCompactionStat);
- }
- }
-
- public void testFlushQueue() throws Exception {
- doFlushQueue(false);
- }
-
- public void testFlushQueueWO() throws Exception {
- doFlushQueue(true);
- }
-
- private void doFlushQueue(boolean wo) throws Exception {
- Host host = Host.getHost(0);
- VM vm1 = host.getVM(0);
- VM vm2 = host.getVM(1);
- VM vm3 = host.getVM(2);
- final String regionName = getName();
-
- createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
- createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
- createServerRegion(vm3, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
-
- vm1.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionName);
- for (int i = 0; i < 500; i++) {
- pr.put("key" + i, "value" + i);
- }
-
- pr.flushHDFSQueue(0);
- return null;
- }
- });
-
- SerializableCallable verify = new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionName);
- assertEquals(0, pr.getHDFSEventQueueStats().getEventQueueSize());
- return null;
- }
- };
-
- vm1.invoke(verify);
- vm2.invoke(verify);
- vm3.invoke(verify);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
deleted file mode 100644
index ee517d2..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.util.Properties;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
-
-import dunit.SerializableCallable;
-import dunit.SerializableRunnable;
-
-@SuppressWarnings({ "serial", "rawtypes", "deprecation" })
-public class RegionWithHDFSOffHeapBasicDUnitTest extends
- RegionWithHDFSBasicDUnitTest {
- static {
- System.setProperty("gemfire.trackOffHeapRefCounts", "true");
- }
-
- public RegionWithHDFSOffHeapBasicDUnitTest(String name) {
- super(name);
- }
-
- @Override
- public void tearDown2() throws Exception {
- SerializableRunnable checkOrphans = new SerializableRunnable() {
-
- @Override
- public void run() {
- if(hasCache()) {
- OffHeapTestUtil.checkOrphans();
- }
- }
- };
- try {
- checkOrphans.run();
- invokeInEveryVM(checkOrphans);
- } finally {
- // proceed with tearDown2 anyway.
- super.tearDown2();
- }
- }
-
- public void testDelta() {
- //do nothing, deltas aren't supported with off heap.
- }
-
- @Override
- protected SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets,
- final int batchSizeMB, final int maximumEntries, final String folderPath,
- final String uniqueName, final int batchInterval, final boolean queuePersistent,
- final boolean writeonly, final long timeForRollover, final long maxFileSize) {
- SerializableCallable createRegion = new SerializableCallable() {
- public Object call() throws Exception {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(totalnumOfBuckets);
- paf.setRedundantCopies(1);
-
- af.setHDFSStoreName(uniqueName);
- af.setPartitionAttributes(paf.create());
- HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
- // Going two level up to avoid home directories getting created in
- // VM-specific directory. This avoids failures in those tests where
- // datastores are restarted and bucket ownership changes between VMs.
- homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
- hsf.setHomeDir(homeDir);
- hsf.setBatchSize(batchSizeMB);
- hsf.setBufferPersistent(queuePersistent);
- hsf.setMaxMemory(3);
- hsf.setBatchInterval(batchInterval);
- if (timeForRollover != -1) {
- hsf.setWriteOnlyFileRolloverInterval((int)timeForRollover);
- System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
- }
- if (maxFileSize != -1) {
- hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
- }
- hsf.create(uniqueName);
-
- af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-
- af.setHDFSWriteOnly(writeonly);
- af.setOffHeap(true);;
- Region r = createRootRegion(uniqueName, af.create());
- ((LocalRegion)r).setIsTest();
-
- return 0;
- }
- };
- return createRegion;
- }
-
- @Override
- public Properties getDistributedSystemProperties() {
- Properties props = super.getDistributedSystemProperties();
- props.setProperty("off-heap-memory-size", "50m");
- return props;
- }
-}
[06/15] incubator-geode git commit: GEODE-429: Remove HDFS
RegionShortcuts
Posted by as...@apache.org.
GEODE-429: Remove HDFS RegionShortcuts
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b3f838ea
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b3f838ea
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b3f838ea
Branch: refs/heads/feature/GEODE-409
Commit: b3f838ea6a0b0eb150dcb92b7f6e46e5ee9db1e4
Parents: ef5d9e2
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 11:55:53 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700
----------------------------------------------------------------------
.../gemstone/gemfire/cache/RegionShortcut.java | 50 --------------------
.../internal/cache/GemFireCacheImpl.java | 42 ----------------
.../hdfs/internal/HDFSConfigJUnitTest.java | 8 ++--
.../hdfs/internal/HDFSEntriesSetJUnitTest.java | 2 +-
.../internal/hoplog/BaseHoplogTestCase.java | 2 +-
...FSQueueRegionOperationsOffHeapJUnitTest.java | 2 +-
.../cache/HDFSRegionOperationsJUnitTest.java | 2 +-
.../HDFSRegionOperationsOffHeapJUnitTest.java | 2 +-
.../HDFSRegionMBeanAttributeJUnitTest.java | 2 +-
9 files changed, 10 insertions(+), 102 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
index ae3cbdb..5000032 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
@@ -226,54 +226,4 @@ public enum RegionShortcut {
* The actual RegionAttributes for a REPLICATE_PROXY region set the {@link DataPolicy} to {@link DataPolicy#EMPTY} and {@link Scope} to {@link Scope#DISTRIBUTED_ACK}.
*/
REPLICATE_PROXY,
-
- /**
- * A PARTITION_HDFS has local state that is partitioned across each peer member
- * that created the region.
- * In addition its state is written to HDFS.
- * The random access to the data in HDFS is also enabled.
- * The actual RegionAttributes for a PARTITION_HDFS region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}.
- * The HDFS event queue's property random-access is set to true.
- * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
- * with {@link EvictionAction#OVERFLOW_TO_DISK}.
- */
- PARTITION_HDFS,
-
- /**
- * A PARTITION_REDUNDANT_HDFS has local state that is partitioned across each peer member
- * that created the region.
- * In addition its state is written to HDFS and recovered from HDFS when the region is
- * created. The random access to the data in HDFS is also enabled.
- * In addition an extra copy of the data is kept in memory.
- * The actual RegionAttributes for a PARTITION_REDUNDANT_HDFS region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}
- * and the redundant-copies to 1. The HDFS event queue's property random-access is set to true.
- * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
- * with {@link EvictionAction#OVERFLOW_TO_DISK}.
- */
- PARTITION_REDUNDANT_HDFS,
-
- /**
- * A PARTITION_WRITEONLY_HDFS_STORE has local state that is partitioned across each peer member
- * that created the region.
- * In addition its state is written to HDFS and recovered from HDFS when the region is
- * created. The random access to the data in HDFS is disabled.
- * The actual RegionAttributes for a PARTITION_WRITEONLY_HDFS_STORE region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}.
- * The HDFS event queue's property write only is set as true.
- * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
- * with {@link EvictionAction#OVERFLOW_TO_DISK}.
- */
- PARTITION_WRITEONLY_HDFS_STORE,
-
- /**
- * A PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE has local state that is partitioned across each peer member
- * that created the region.
- * In addition its state is written to HDFS and recovered from HDFS when the region is
- * created. The random access to the data in HDFS is disabled.
- * In addition an extra copy of the data is kept in memory.
- * The actual RegionAttributes for a PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}
- * and the redundant-copies to 1. The HDFS event queue's property write only is set as true.
- * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
- * with {@link EvictionAction#OVERFLOW_TO_DISK}.
- */
- PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index 4bf0f42..0d4961b 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -4916,48 +4916,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
c.setRegionAttributes(pra.toString(), af.create());
break;
}
- case PARTITION_HDFS: {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- af.setPartitionAttributes(paf.create());
- af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
- af.setHDFSWriteOnly(false);
- c.setRegionAttributes(pra.toString(), af.create());
- break;
- }
- case PARTITION_REDUNDANT_HDFS: {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setRedundantCopies(1);
- af.setPartitionAttributes(paf.create());
- af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
- af.setHDFSWriteOnly(false);
- c.setRegionAttributes(pra.toString(), af.create());
- break;
- }
- case PARTITION_WRITEONLY_HDFS_STORE: {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- af.setPartitionAttributes(paf.create());
- af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
- af.setHDFSWriteOnly(true);
- c.setRegionAttributes(pra.toString(), af.create());
- break;
- }
- case PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE: {
- AttributesFactory af = new AttributesFactory();
- af.setDataPolicy(DataPolicy.HDFS_PARTITION);
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setRedundantCopies(1);
- af.setPartitionAttributes(paf.create());
- af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
- af.setHDFSWriteOnly(true);
- c.setRegionAttributes(pra.toString(), af.create());
- break;
- }
default:
throw new IllegalStateException("unhandled enum " + pra);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
index a1c9eb1..b0c6520 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
@@ -71,7 +71,7 @@ public class HDFSConfigJUnitTest extends TestCase {
try {
HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
HDFSStore store = hsf.create("myHDFSStore");
- RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
Region r1 = rf1.setHDFSStoreName("myHDFSStore").create("r1");
r1.put("k1", "v1");
@@ -89,7 +89,7 @@ public class HDFSConfigJUnitTest extends TestCase {
hsf = this.c.createHDFSStoreFactory();
hsf.create("myHDFSStore");
- r1 = this.c.createRegionFactory(RegionShortcut.PARTITION_WRITEONLY_HDFS_STORE).setHDFSStoreName("myHDFSStore")
+ r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
.create("r1");
r1.put("k1", "v1");
@@ -126,7 +126,7 @@ public class HDFSConfigJUnitTest extends TestCase {
hsf.create("myHDFSStore");
- r1 = this.c.createRegionFactory(RegionShortcut.PARTITION_WRITEONLY_HDFS_STORE).setHDFSStoreName("myHDFSStore")
+ r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
.setHDFSWriteOnly(true).create("r1");
r1.put("k1", "v1");
@@ -467,7 +467,7 @@ public class HDFSConfigJUnitTest extends TestCase {
float percentage = 100 * (float) blockCacheSize / (float) heapSize;
hsf.setBlockCacheSize(percentage);
HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
- RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
//Create a region that evicts everything
LocalRegion r1 = (LocalRegion) rf1.setHDFSStoreName("myHDFSStore").setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
index 75dfa93..f864176 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
@@ -72,7 +72,7 @@ public class HDFSEntriesSetJUnitTest extends TestCase {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setTotalNumBuckets(1);
- RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
region = (PartitionedRegion) rf.setHDFSStoreName("test").setPartitionAttributes(paf.create()).create("test");
// prime the region so buckets get created
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
index eb713c0..b35f756 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
@@ -89,7 +89,7 @@ public abstract class BaseHoplogTestCase extends TestCase {
configureHdfsStoreFactory();
hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
- regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
region = regionfactory.create(getName());
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
index f28c138..4565568 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
@@ -33,7 +33,7 @@ public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOp
}
@Override
protected Region<Integer, String> createRegion(String regionName) {
- RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
rf.setPartitionAttributes(prAttr);
rf.setOffHeap(true);
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
index 50b213a..b24ee5d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
@@ -105,7 +105,7 @@ public class HDFSRegionOperationsJUnitTest extends TestCase {
}
protected Region<Integer, String> createRegion(String regionName) {
- RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
rf.setPartitionAttributes(prAttr);
rf.setHDFSStoreName(hdfsStore.getName());
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
index 421cd28..f9c96a2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
@@ -55,7 +55,7 @@ public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJU
}
@Override
protected Region<Integer, String> createRegion(String regionName) {
- RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
rf.setPartitionAttributes(prAttr);
rf.setOffHeap(true);
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
index 38145d1..c563d5a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
@@ -76,7 +76,7 @@ public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
configureHdfsStoreFactory();
hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
- RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+ RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
// regionfactory.setCompressionCodec("Some");
[03/15] incubator-geode git commit: GEODE-429: Remove hdfsStore gfsh
commands
Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
deleted file mode 100644
index c182edd..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.internal.web.controllers;
-
-import org.springframework.shell.core.annotation.CliOption;
-import org.springframework.stereotype.Controller;
-import org.springframework.web.bind.annotation.PathVariable;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.RequestMethod;
-import org.springframework.web.bind.annotation.RequestParam;
-import org.springframework.web.bind.annotation.ResponseBody;
-
-import com.gemstone.gemfire.internal.lang.StringUtils;
-import com.gemstone.gemfire.management.cli.CliMetaData;
-import com.gemstone.gemfire.management.cli.ConverterHint;
-import com.gemstone.gemfire.management.cli.Result;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-
-/**
- * The HDFSStoreCommandsController class implements GemFire Management REST API web service endpoints for the
- * Gfsh Hdfs Store Commands.
- * <p/>
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommands
- * @see com.gemstone.gemfire.management.internal.web.controllers.AbstractCommandsController
- * @see org.springframework.stereotype.Controller
- * @see org.springframework.web.bind.annotation.PathVariable
- * @see org.springframework.web.bind.annotation.RequestMapping
- * @see org.springframework.web.bind.annotation.RequestMethod
- * @see org.springframework.web.bind.annotation.RequestParam
- * @see org.springframework.web.bind.annotation.ResponseBody
- * @since 9.0
- */
-@Controller("hdfsStoreController")
-@RequestMapping(AbstractCommandsController.REST_API_VERSION)
-@SuppressWarnings("unused")
-public class HDFSStoreCommandsController extends AbstractCommandsController {
- @RequestMapping(method = RequestMethod.GET, value = "/hdfsstores")
- @ResponseBody
- public String listHDFSStores() {
- String my= processCommand(CliStrings.LIST_HDFS_STORE);
- return my;
- }
-
- @RequestMapping(method = RequestMethod.POST, value = "/hdfsstores")
- @ResponseBody
- public String createHdfsStore(
- @RequestParam(CliStrings.CREATE_HDFS_STORE__NAME) final String storeName,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__NAMENODE, required=false) final String namenode,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__HOMEDIR, required=false) final String homedir,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BATCHSIZE,required=false) final Integer batchSize,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL, required=false) final Integer batchInterval,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__READCACHESIZE, required=false) final Float readCachesize,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS, required=false) final Integer dispatcherThreads,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAXMEMORY, required=false) final Integer maxMemory,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT, required=false) final Boolean persistence,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE, required=false) final Boolean synchronousDiskWrite,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME, required=false) final String diskStoreName,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT, required=false) final Boolean minorCompaction,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS, required=false) final Integer minorCompactionThreads,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT, required=false) final Boolean majorCompact,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL, required=false) final Integer majorCompactionInterval,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS, required=false) final Integer majorCompactionThreads,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL, required=false) final Integer purgeInterval,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE, required=false) final Integer writeOnlyFileSize,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL, required=false) final Integer fileRolloverInterval,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE, required=false) final String clientConfigFile,
- @RequestParam(value = CliStrings.CREATE_HDFS_STORE__GROUP, required = false) final String[] groups)
- {
- CommandStringBuilder command = new CommandStringBuilder(CliStrings.CREATE_HDFS_STORE);
-
- command.addOption(CliStrings.CREATE_HDFS_STORE__NAME, storeName);
-
- if (hasValue(namenode))
- command.addOption(CliStrings.CREATE_HDFS_STORE__NAMENODE, namenode);
-
- if (hasValue(homedir))
- command.addOption(CliStrings.CREATE_HDFS_STORE__HOMEDIR, homedir);
-
- if (hasValue(batchSize))
- command.addOption(CliStrings.CREATE_HDFS_STORE__BATCHSIZE, String.valueOf(batchSize));
-
- if (hasValue(batchInterval))
- command.addOption(CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL, String.valueOf(batchInterval));
-
- if (hasValue(readCachesize))
- command.addOption(CliStrings.CREATE_HDFS_STORE__READCACHESIZE, String.valueOf(readCachesize));
-
- if (hasValue(dispatcherThreads))
- command.addOption(CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS, String.valueOf(dispatcherThreads));
-
- if (hasValue(maxMemory))
- command.addOption(CliStrings.CREATE_HDFS_STORE__MAXMEMORY,String.valueOf(maxMemory));
-
- if (hasValue(persistence))
- command.addOption(CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT,String.valueOf(Boolean.TRUE.equals(persistence)));
-
- if (hasValue(synchronousDiskWrite))
- command.addOption(CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE,String.valueOf(Boolean.TRUE.equals(synchronousDiskWrite)));
-
- if (hasValue(diskStoreName))
- command.addOption(CliStrings.CREATE_HDFS_STORE__DISKSTORENAME,String.valueOf(diskStoreName));
-
- if (hasValue(minorCompaction))
- command.addOption(CliStrings.CREATE_HDFS_STORE__MINORCOMPACT,String.valueOf(Boolean.TRUE.equals(minorCompaction)));
-
- if (hasValue(minorCompactionThreads))
- command.addOption(CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS,String.valueOf(minorCompactionThreads));
-
- if (hasValue(majorCompact))
- command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT,String.valueOf(Boolean.TRUE.equals(majorCompact)));
-
- if (hasValue(majorCompactionInterval))
- command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL,String.valueOf(majorCompactionInterval));
-
- if (hasValue(majorCompactionThreads))
- command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS,String.valueOf(majorCompactionThreads));
-
- if (hasValue(purgeInterval))
- command.addOption(CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL,String.valueOf(purgeInterval));
-
- if (hasValue(writeOnlyFileSize))
- command.addOption(CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE,String.valueOf(writeOnlyFileSize));
-
- if (hasValue(fileRolloverInterval))
- command.addOption(CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL,String.valueOf(fileRolloverInterval));
-
- if (hasValue(clientConfigFile))
- command.addOption(CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE,String.valueOf(clientConfigFile));
-
- if (hasValue(groups)) {
- command.addOption(CliStrings.CREATE_HDFS_STORE__GROUP,StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
- }
-
- return processCommand(command.toString());
- }
-
- @RequestMapping(method = RequestMethod.GET, value = "/hdfsstores/{name}")
- @ResponseBody
- public String describeHDFSStore(
- @PathVariable("name") final String hdfsStoreName,
- @RequestParam(CliStrings.DESCRIBE_HDFS_STORE__MEMBER) final String memberNameId)
- {
- CommandStringBuilder command = new CommandStringBuilder(CliStrings.DESCRIBE_HDFS_STORE);
- command.addOption(CliStrings.DESCRIBE_HDFS_STORE__NAME, decode(hdfsStoreName));
- command.addOption(CliStrings.DESCRIBE_HDFS_STORE__MEMBER, memberNameId);
- return processCommand(command.toString());
- }
-
- @RequestMapping(method = RequestMethod.PUT, value = "/hdfsstores/{name}")
- @ResponseBody
- public String alterHdfsStore(
- @PathVariable("name") final String hdfsStoreName,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__BATCHSIZE, required=false) final Integer batchSize,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL, required=false) final Integer batchInterval,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT, required=false) final Boolean minorCompaction,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS, required=false) final Integer minorCompactionThreads,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT, required=false) final Boolean majorCompact,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL, required=false) final Integer majorCompactionInterval,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS, required=false) final Integer majorCompactionThreads,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL, required=false) final Integer purgeInterval,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE, required=false) final Integer writeOnlyFileSize,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL, required=false) final Integer fileRolloverInterval,
- @RequestParam(value = CliStrings.ALTER_HDFS_STORE__GROUP, required = false) final String[] groups)
- {
- CommandStringBuilder command = new CommandStringBuilder(CliStrings.ALTER_HDFS_STORE);
-
- command.addOption(CliStrings.ALTER_HDFS_STORE__NAME, hdfsStoreName);
-
-
- if (hasValue(batchSize))
- command.addOption(CliStrings.ALTER_HDFS_STORE__BATCHSIZE, String.valueOf(batchSize));
-
- if (hasValue(batchInterval))
- command.addOption(CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL, String.valueOf(batchInterval));
-
- if (hasValue(minorCompaction))
- command.addOption(CliStrings.ALTER_HDFS_STORE__MINORCOMPACT,String.valueOf(Boolean.TRUE.equals(minorCompaction)));
-
- if (hasValue(minorCompactionThreads))
- command.addOption(CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS,String.valueOf(minorCompactionThreads));
-
- if (hasValue(majorCompact))
- command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT,String.valueOf(Boolean.TRUE.equals(majorCompact)));
-
- if (hasValue(majorCompactionInterval))
- command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL,String.valueOf(majorCompactionInterval));
-
- if (hasValue(majorCompactionThreads))
- command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS,String.valueOf(majorCompactionThreads));
-
- if (hasValue(purgeInterval))
- command.addOption(CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL,String.valueOf(purgeInterval));
-
- if (hasValue(writeOnlyFileSize))
- command.addOption(CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE,String.valueOf(writeOnlyFileSize));
-
- if (hasValue(fileRolloverInterval))
- command.addOption(CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL,String.valueOf(fileRolloverInterval));
-
- if (hasValue(groups)) {
- command.addOption(CliStrings.ALTER_HDFS_STORE__GROUP,StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
- }
-
- return processCommand(command.toString());
- }
-
- @RequestMapping(method = RequestMethod.DELETE, value = "/hdfsstores/{name}")
- @ResponseBody
- public String destroyHDFSStore(
- @PathVariable("name") final String hdfsStoreName,
- @RequestParam(value = CliStrings.DESTROY_HDFS_STORE__GROUP, required = false) final String[] groups)
- {
- CommandStringBuilder command = new CommandStringBuilder(CliStrings.DESTROY_HDFS_STORE);
- command.addOption(CliStrings.DESTROY_HDFS_STORE__NAME, decode(hdfsStoreName));
-
- if (hasValue(groups)) {
- command.addOption(CliStrings.DESTROY_HDFS_STORE__GROUP, StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
- }
- return processCommand(command.toString());
-
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
index ef4c49f..46ed1a6 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
@@ -9,21 +9,12 @@ package com.gemstone.gemfire.management.internal.web.controllers;
import java.io.IOException;
import java.util.Set;
+
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
-import com.gemstone.gemfire.internal.GemFireVersion;
-import com.gemstone.gemfire.internal.lang.ObjectUtils;
-import com.gemstone.gemfire.internal.lang.StringUtils;
-import com.gemstone.gemfire.internal.util.IOUtils;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.web.domain.Link;
-import com.gemstone.gemfire.management.internal.web.domain.LinkIndex;
-import com.gemstone.gemfire.management.internal.web.domain.QueryParameterSource;
-import com.gemstone.gemfire.management.internal.web.http.HttpMethod;
-
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
@@ -34,6 +25,16 @@ import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
+import com.gemstone.gemfire.internal.GemFireVersion;
+import com.gemstone.gemfire.internal.lang.ObjectUtils;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.internal.util.IOUtils;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.web.domain.Link;
+import com.gemstone.gemfire.management.internal.web.domain.LinkIndex;
+import com.gemstone.gemfire.management.internal.web.domain.QueryParameterSource;
+import com.gemstone.gemfire.management.internal.web.http.HttpMethod;
+
/**
* The ShellCommandsController class implements GemFire REST API calls for Gfsh Shell Commands.
*
@@ -249,12 +250,7 @@ public class ShellCommandsController extends AbstractCommandsController {
.add(new Link(CliStrings.STATUS_GATEWAYSENDER, toUri("/gateways/senders/{id}")))
.add(new Link(CliStrings.STOP_GATEWAYRECEIVER, toUri("/gateways/receivers?op=stop"), HttpMethod.POST))
.add(new Link(CliStrings.STOP_GATEWAYSENDER, toUri("/gateways/senders/{id}?op=stop"), HttpMethod.POST))
- // HDFS Store Commands
- .add(new Link(CliStrings.LIST_HDFS_STORE, toUri("/hdfsstores"), HttpMethod.GET))
- .add(new Link(CliStrings.DESCRIBE_HDFS_STORE, toUri("/hdfsstores/{name}"), HttpMethod.GET))
- .add(new Link(CliStrings.CREATE_HDFS_STORE, toUri("/hdfsstores"), HttpMethod.POST))
- .add(new Link(CliStrings.DESTROY_HDFS_STORE, toUri("/hdfsstores/{name}"), HttpMethod.DELETE))
- .add(new Link(CliStrings.ALTER_HDFS_STORE, toUri("/hdfsstores/{name}"), HttpMethod.PUT));
+ ;
}
@RequestMapping(method = { RequestMethod.GET, RequestMethod.HEAD }, value = "/ping")
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
deleted file mode 100644
index af47138..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
+++ /dev/null
@@ -1,838 +0,0 @@
-/*
- * =========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * more patents listed at http://www.pivotal.io/patents.
- * ========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.commands;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.execute.Execution;
-import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
-import com.gemstone.gemfire.cache.execute.ResultCollector;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
-import com.gemstone.gemfire.management.cli.Result;
-import com.gemstone.gemfire.management.cli.Result.Status;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.CliFunctionResult;
-import com.gemstone.gemfire.management.internal.cli.functions.CreateHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.cli.json.GfJsonObject;
-import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
-import com.gemstone.gemfire.management.internal.cli.result.InfoResultData;
-import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-import com.gemstone.gemfire.management.internal.cli.util.MemberNotFoundException;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * The HDFSStoreCommandsJUnitTest class is a test suite of test cases testing
- * the contract and functionality of the HDFSStoreCommands class implementing
- * commands in the GemFire shell (gfsh) that access and modify hdfs stores in
- * GemFire. </p>
- *
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommands
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.jmock.lib.legacy.ClassImposteriser
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSStoreCommandsJUnitTest {
-
- private Mockery mockContext;
-
- @Before
- public void setUp() {
- mockContext = new Mockery() {
- {
- setImposteriser(ClassImposteriser.INSTANCE);
- }
- };
- }
-
- @After
- public void tearDown() {
- mockContext.assertIsSatisfied();
- mockContext = null;
- }
-
- @Test
- public void testGetHDFSStoreDescription() {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
- final HDFSStoreConfigHolder expectedHdfsStoreConfigHolder = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
- "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
- 40, 40, 800);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getName();
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(expectedHdfsStoreConfigHolder)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final HDFSStoreConfigHolder actualHdfsStoreConfigHolder = commands.getHDFSStoreDescription(memberId, hdfsStoreName);
-
- assertNotNull(actualHdfsStoreConfigHolder);
- assertEquals(expectedHdfsStoreConfigHolder, actualHdfsStoreConfigHolder);
- }
-
- @Test(expected = MemberNotFoundException.class)
- public void testGetHDFSStoreDescriptionThrowsMemberNotFoundException() {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getName();
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue("testMember"));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, null);
-
- try {
- commands.getHDFSStoreDescription(memberId, hdfsStoreName);
- } catch (MemberNotFoundException expected) {
- assertEquals(CliStrings.format(CliStrings.MEMBER_NOT_FOUND_ERROR_MESSAGE, memberId), expected.getMessage());
- throw expected;
- }
- }
-
- @Test(expected = HDFSStoreNotFoundException.class)
- public void testGetHDFSStoreDescriptionThrowsResourceNotFoundException() {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
-
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getName();
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
- will(throwException(new HDFSStoreNotFoundException("expected")));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- try {
- commands.getHDFSStoreDescription(memberId, hdfsStoreName);
- } catch (HDFSStoreNotFoundException expected) {
- assertEquals("expected", expected.getMessage());
- throw expected;
- }
- }
-
- @Test(expected = RuntimeException.class)
- public void testGetHDFSStoreDescriptionThrowsRuntimeException() {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
-
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getName();
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
- will(throwException(new RuntimeException("expected")));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- try {
- commands.getHDFSStoreDescription(memberId, hdfsStoreName);
- } catch (RuntimeException expected) {
- assertEquals("expected", expected.getMessage());
- throw expected;
- }
- }
-
- @Test(expected = RuntimeException.class)
- public void testGetHDFSStoreDescriptionWithInvalidFunctionResultReturnType() {
- final String hdfsStoreName = "mockHDFSStore";
- final String memberId = "mockMember";
-
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getName();
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(new Object())));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- try {
- commands.getHDFSStoreDescription(memberId, hdfsStoreName);
- } catch (RuntimeException expected) {
- assertEquals(CliStrings.format(CliStrings.UNEXPECTED_RETURN_TYPE_EXECUTING_COMMAND_ERROR_MESSAGE, Object.class
- .getName(), CliStrings.DESCRIBE_HDFS_STORE), expected.getMessage());
- assertNull(expected.getCause());
- throw expected;
- }
- }
-
- @Test
- public void testGetHDFSStoreListing() {
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
- final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
- final AbstractExecution mockFunctionExecutor = mockContext.mock(AbstractExecution.class, "Function Executor");
-
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
- final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderOne = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName1",
- "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
- 40, 40, 800);
- final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderTwo = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName2",
- "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
- 40, 40, 800);
- final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderThree = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName3",
- "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
- 40, 40, 800);
-
-
- HdfsStoreDetails d1=new HdfsStoreDetails(expectedHdfsStoreConfigHolderOne.getName(), "member1", "member1");
- HdfsStoreDetails d2=new HdfsStoreDetails(expectedHdfsStoreConfigHolderTwo.getName(), "member2", "member2");
- HdfsStoreDetails d3=new HdfsStoreDetails(expectedHdfsStoreConfigHolderThree.getName(), "member3", "member3");
-
- final Set<HdfsStoreDetails> expectedHdfsStores = new HashSet<HdfsStoreDetails>();
- expectedHdfsStores.add( d1);
- expectedHdfsStores.add(d2 );
- expectedHdfsStores.add(d3);
-
- final List<Object> results = new ArrayList<Object>();
- results.add(expectedHdfsStores);
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).setIgnoreDepartedMembers(with(equal(true)));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(results));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
-
- final List<?> actualHdfsStores = commands.getHdfsStoreListing(commands.getNormalMembers(mockCache));
-
- Assert.assertNotNull(actualHdfsStores);
- Assert.assertTrue(actualHdfsStores.contains(d1));
- Assert.assertTrue(actualHdfsStores.contains(d2));
- Assert.assertTrue(actualHdfsStores.contains(d3));
- }
-
- @Test(expected = RuntimeException.class)
- public void testGetHDFSStoreListThrowsRuntimeException() {
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
- will(throwException(new RuntimeException("expected")));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
-
- try {
- commands.getHdfsStoreListing(commands.getNormalMembers(mockCache));
- } catch (RuntimeException expected) {
- assertEquals("expected", expected.getMessage());
- throw expected;
- }
- }
-
- @Test
- public void testGetHDFSStoreListReturnsFunctionInvocationTargetExceptionInResults() {
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final AbstractExecution mockFunctionExecutor = mockContext.mock(AbstractExecution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
- final HDFSStoreConfigHolder expectedHdfsStoreConfigHolder = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
- "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
- 40, 40, 800);
-
- final List<HdfsStoreDetails> expectedHdfsStores = Arrays.asList(new HdfsStoreDetails(
- expectedHdfsStoreConfigHolder.getName(), "member1", "member1"));
-
- final List<Object> results = new ArrayList<Object>();
-
- results.add(expectedHdfsStores);
- results.add(new FunctionInvocationTargetException("expected"));
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).setIgnoreDepartedMembers(with(equal(true)));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(results));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
-
- final List<HdfsStoreDetails> actualHdfsStores = commands.getHdfsStoreListing(commands
- .getNormalMembers(mockCache));
-
- }
-
- @Test
- public void testGetCreatedHDFSStore() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- XmlEntity xml = null;
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
- 20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
-
- assertNotNull(result);
- assertEquals(Status.OK, result.getStatus());
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("Member"));
- assertNotNull(jsonObject.get("Result"));
-
- assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
- assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
- }
-
- @Test
- public void testGetCreatedHDFSStoreWithThrowable() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- RuntimeException exception = new RuntimeException("Test Exception");
-
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, null);
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
- 20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
-
- assertNotNull(result);
- assertEquals(Status.ERROR, result.getStatus());
-
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("Member"));
- assertNotNull(jsonObject.get("Result"));
- assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
- assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
- .get("Result")).get(0)));
- }
-
- @Test
- public void testGetCreatedHDFSStoreWithCacheClosedException() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
- 20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
-
- assertNotNull(result);
- InfoResultData resultData = (InfoResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("message"));
-
- assertEquals("Unable to create hdfs store:" + hdfsStoreName, (((JSONArray)jsonObject.get("message")).get(0)));
- }
-
- @Test
- public void testGetAlteredHDFSStore() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- XmlEntity xml = null;
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
- 100, 100);
-
- assertNotNull(result);
- assertEquals(Status.OK, result.getStatus());
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("Member"));
- assertNotNull(jsonObject.get("Result"));
-
- assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
- assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
- }
-
- @Test
- public void testGetAlteredHDFSStoreWithThrowable() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- RuntimeException exception = new RuntimeException("Test Exception");
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, "Success");
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
- 100, 100);
-
- assertNotNull(result);
- assertEquals(Status.ERROR, result.getStatus());
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("Member"));
- assertNotNull(jsonObject.get("Result"));
-
- assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
- assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
- .get("Result")).get(0)));
- }
-
- @Test
- public void testGetAlteredHDFSStoreWithCacheClosedException() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
- 100, 100);
-
- assertNotNull(result);
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- JSONObject jsonObject = (JSONObject)resultData.getGfJsonObject().get("content");
- assertEquals(0, jsonObject.length());
- }
-
- @Test
- public void testDestroyStore() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- XmlEntity xml = null;
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.destroyStore(hdfsStoreName, null);
-
- assertNotNull(result);
- assertEquals(Status.OK, result.getStatus());
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("Member"));
- assertNotNull(jsonObject.get("Result"));
-
- assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
- assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
- }
-
- @Test
- public void testDestroyStoreWithThrowable() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- RuntimeException exception = new RuntimeException("Test Exception");
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, "Success");
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.destroyHdfstore(hdfsStoreName, null);
-
- assertNotNull(result);
- assertEquals(Status.ERROR, result.getStatus());
- TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("Member"));
- assertNotNull(jsonObject.get("Result"));
-
- assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
- assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
- .get("Result")).get(0)));
- }
-
- @Test
- public void testDestroyStoreWithCacheClosedException() throws JSONException {
- final String hdfsStoreName = "mockHdfsStore";
- final String memberId = "mockMember";
- final Cache mockCache = mockContext.mock(Cache.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
- final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
- final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
- // Need to fix the return value of this function
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
- will(returnValue(mockFunctionExecutor));
- oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
- will(returnValue(mockResultCollector));
- oneOf(mockResultCollector).getResult();
- will(returnValue(Arrays.asList(cliResult)));
- }
- });
-
- final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
- final Result result = commands.destroyHdfstore(hdfsStoreName, null);
-
- assertNotNull(result);
-
- assertNotNull(result);
- InfoResultData resultData = (InfoResultData)((CommandResult)result).getResultData();
- GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
- assertNotNull(jsonObject.get("message"));
-
- assertEquals("No matching hdfs stores found.", (((JSONArray)jsonObject.get("message")).get(0)));
- }
-
- public static HDFSStoreConfigHolder createMockHDFSStoreConfigHolder(Mockery mockContext, final String storeName, final String namenode,
- final String homeDir, final int maxFileSize, final int fileRolloverInterval, final float blockCachesize,
- final String clientConfigFile, final int batchSize, final int batchInterval, final String diskStoreName,
- final boolean syncDiskwrite, final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent,
- final boolean minorCompact, final boolean majorCompact, final int majorCompactionInterval,
- final int majorCompactionThreads, final int minorCompactionThreads, final int purgeInterval) {
-
- HDFSStoreConfigHolder mockHdfsStore = mockContext.mock(HDFSStoreConfigHolder.class, "HDFSStoreConfigHolder_"
- + storeName);
-
- createMockStore(mockContext, mockHdfsStore, storeName, namenode, homeDir, maxFileSize, fileRolloverInterval,
- minorCompact, minorCompactionThreads, majorCompact, majorCompactionThreads, majorCompactionInterval,
- purgeInterval, blockCachesize, clientConfigFile, batchSize,
- batchInterval, diskStoreName, syncDiskwrite, dispatcherThreads, maxMemory, bufferPersistent);
- return mockHdfsStore;
-
- }
-
- public static void createMockStore(Mockery mockContext, final HDFSStore mockStore, final String storeName,
- final String namenode, final String homeDir, final int maxFileSize, final int fileRolloverInterval,
- final boolean minorCompact, final int minorCompactionThreads, final boolean majorCompact,
- final int majorCompactionThreads, final int majorCompactionInterval, final int purgeInterval,
- final float blockCachesize, final String clientConfigFile, final int batchSize, final int batchInterval,
- final String diskStoreName, final boolean syncDiskwrite, final int dispatcherThreads, final int maxMemory,
- final boolean bufferPersistent) {
-
- mockContext.checking(new Expectations() {
- {
- allowing(mockStore).getName();
- will(returnValue(storeName));
- allowing(mockStore).getNameNodeURL();
- will(returnValue(namenode));
- allowing(mockStore).getHomeDir();
- will(returnValue(homeDir));
- allowing(mockStore).getWriteOnlyFileRolloverSize();
- will(returnValue(maxFileSize));
- allowing(mockStore).getWriteOnlyFileRolloverInterval();
- will(returnValue(fileRolloverInterval));
- allowing(mockStore).getMinorCompaction();
- will(returnValue(minorCompact));
- allowing(mockStore).getMajorCompaction();
- will(returnValue(majorCompact));
- allowing(mockStore).getMajorCompactionInterval();
- will(returnValue(majorCompactionInterval));
- allowing(mockStore).getMajorCompactionThreads();
- will(returnValue(majorCompactionThreads));
- allowing(mockStore).getMinorCompactionThreads();
- will(returnValue(minorCompactionThreads));
- allowing(mockStore).getPurgeInterval();
- will(returnValue(purgeInterval));
- allowing(mockStore).getInputFileCountMax();
- will(returnValue(10));
- allowing(mockStore).getInputFileSizeMax();
- will(returnValue(1024));
- allowing(mockStore).getInputFileCountMin();
- will(returnValue(2));
- allowing(mockStore).getBlockCacheSize();
- will(returnValue(blockCachesize));
- allowing(mockStore).getHDFSClientConfigFile();
- will(returnValue(clientConfigFile));
-
- allowing(mockStore).getBatchSize();
- will(returnValue(batchSize));
- allowing(mockStore).getBatchInterval();
- will(returnValue(batchInterval));
- allowing(mockStore).getDiskStoreName();
- will(returnValue(diskStoreName));
- allowing(mockStore).getSynchronousDiskWrite();
- will(returnValue(syncDiskwrite));
- allowing(mockStore).getBufferPersistent();
- will(returnValue(bufferPersistent));
- allowing(mockStore).getDispatcherThreads();
- will(returnValue(dispatcherThreads));
- allowing(mockStore).getMaxMemory();
- will(returnValue(maxMemory));
- }
- });
- }
-
- protected static class TestHDFSStoreCommands extends HDFSStoreCommands {
-
- private final Cache cache;
-
- private final DistributedMember distributedMember;
-
- private final Execution functionExecutor;
-
- public TestHDFSStoreCommands(final Cache cache, final DistributedMember distributedMember,
- final Execution functionExecutor) {
- assert cache != null: "The Cache cannot be null!";
- this.cache = cache;
- this.distributedMember = distributedMember;
- this.functionExecutor = functionExecutor;
- }
-
- @Override
- protected Cache getCache() {
- return this.cache;
- }
-
- @Override
- protected Set<DistributedMember> getMembers(final Cache cache) {
- assertSame(getCache(), cache);
- return Collections.singleton(this.distributedMember);
- }
-
- @Override
- protected Execution getMembersFunctionExecutor(final Set<DistributedMember> members) {
- Assert.assertNotNull(members);
- return this.functionExecutor;
- }
-
- @Override
- protected Set<DistributedMember> getNormalMembers(final Cache cache) {
- assertSame(getCache(), cache);
- return Collections.singleton(this.distributedMember);
- }
-
- @Override
- protected Set<DistributedMember> getGroupMembers(String[] groups) {
- Set<DistributedMember> dm = new HashSet<DistributedMember>();
- dm.add(distributedMember);
- return dm;
-
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index 4a93e30..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommandsJUnitTest;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction.AlterHDFSStoreAttributes;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * The AlterHDFSStoreFunctionJUnitTest test suite class tests the contract and
- * functionality of the AlterHDFSStoreFunction class. </p>
- *
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class AlterHDFSStoreFunctionJUnitTest {
-
- private static final Logger logger = LogService.getLogger();
-
- private Mockery mockContext;
-
- @Before
- public void setup() {
- mockContext = new Mockery() {
- {
- setImposteriser(ClassImposteriser.INSTANCE);
- }
- };
- }
-
- @After
- public void tearDown() {
- mockContext.assertIsSatisfied();
- mockContext = null;
- }
-
- @Test
- public void testExecute() throws Throwable {
-
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
- final TestResultSender testResultSender = new TestResultSender();
- final HDFSStoreImpl mockHdfsStore = CreateHDFSStoreFunctionJUnitTest.createMockHDFSStoreImpl(mockContext,
- "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false,
- false, true, 20, 20, 10, 100);
- final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
- "mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
- 100);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(alterHDFSStoreAttributes));
- oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
- will(returnValue(mockHdfsStore));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("Success", result.getMessage());
-
- }
-
- @Test
- @SuppressWarnings("unchecked")
- public void testExecuteOnMemberHavingNoHDFSStore() throws Throwable {
-
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final TestResultSender testResultSender = new TestResultSender();
- final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
- final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
- "mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
- 100);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
- will(returnValue(null));
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(alterHDFSStoreAttributes));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("Hdfs store not found on this member", result.getMessage());
- }
-
- @Test
- public void testExecuteOnMemberWithNoCache() throws Throwable {
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final InternalCache mockCache = mockContext.mock(InternalCache.class, "Cache");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final TestResultSender testResultSender = new TestResultSender();
- final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
- "mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
- 100);
-
- final AlterHDFSStoreFunction function = new TestAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity) {
- @Override
- protected Cache getCache() {
- throw new CacheClosedException("Expected");
- }
- };
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(alterHDFSStoreAttributes));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals("", result.getMemberIdOrName());
- assertNull(result.getMessage());
- }
-
- @Test
- public void testExecuteHandleRuntimeException() throws Throwable {
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
- final TestResultSender testResultSender = new TestResultSender();
- final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
- final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
- "mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
- 100);
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(alterHDFSStoreAttributes));
- oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
- will(throwException(new RuntimeException("expected")));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("expected", result.getThrowable().getMessage());
-
- }
-
- protected TestAlterHDFSStoreFunction createAlterHDFSStoreFunction(final Cache cache, DistributedMember member,
- XmlEntity xml) {
- return new TestAlterHDFSStoreFunction(cache, member, xml);
- }
-
- protected static class TestAlterHDFSStoreFunction extends AlterHDFSStoreFunction {
- private static final long serialVersionUID = 1L;
-
- private final Cache cache;
-
- private final DistributedMember member;
-
- private final XmlEntity xml;
-
- public TestAlterHDFSStoreFunction(final Cache cache, DistributedMember member, XmlEntity xml) {
- this.cache = cache;
- this.member = member;
- this.xml = xml;
- }
-
- @Override
- protected Cache getCache() {
- return this.cache;
- }
-
- @Override
- protected DistributedMember getDistributedMember(Cache cache) {
- return member;
- }
-
- @Override
- protected XmlEntity getXMLEntity(String storeName) {
- return xml;
- }
-
- @Override
- protected HDFSStore alterHdfsStore(HDFSStore hdfsStore, AlterHDFSStoreAttributes alterAttributes) {
- return hdfsStore;
- }
- }
-
- protected static class TestResultSender implements ResultSender {
-
- private final List<Object> results = new LinkedList<Object>();
-
- private Throwable t;
-
- protected List<Object> getResults() throws Throwable {
- if (t != null) {
- throw t;
- }
- return Collections.unmodifiableList(results);
- }
-
- public void lastResult(final Object lastResult) {
- results.add(lastResult);
- }
-
- public void sendResult(final Object oneResult) {
- results.add(oneResult);
- }
-
- public void sendException(final Throwable t) {
- this.t = t;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index 8a012b4..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommandsJUnitTest;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * The AlterHDFSStoreFunctionJUnitTest test suite class tests the contract and
- * functionality of the AlterHDFSStoreFunction class. </p>
- *
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class CreateHDFSStoreFunctionJUnitTest {
-
- private static final Logger logger = LogService.getLogger();
-
- private Mockery mockContext;
-
- private static Properties props = new Properties();
-
- @Before
- public void setup() {
-
- mockContext = new Mockery() {
- {
- setImposteriser(ClassImposteriser.INSTANCE);
- }
- };
- }
-
- @After
- public void tearDown() {
- mockContext.assertIsSatisfied();
- mockContext = null;
- }
-
- @Test
- public void testExecute() throws Throwable {
-
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final TestResultSender testResultSender = new TestResultSender();
-
- final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
- 1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-
- final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(
- mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0,
- 2048, true, true, true, 40, 40, 40, 800);
-
- final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore);
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockMember).getId();
- will(returnValue(memberId));
- exactly(2).of(mockMember).getName();
- will(returnValue(memberName));
- oneOf(mockFunctionContext).getArguments();
- will(returnValue(mockHdfsStoreConfigHolder));
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
-
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals(memberName, result.getMemberIdOrName());
- assertEquals("Success", result.getMessage());
-
- }
-
-
-
- @Test
- public void testExecuteOnMemberWithNoCache() throws Throwable {
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final TestResultSender testResultSender = new TestResultSender();
- final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
- 1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-
- final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
- "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
- 40, 40, 800);
-
- final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore) {
- @Override
- protected Cache getCache() {
- throw new CacheClosedException("Expected");
- }
- };
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
- function.execute(mockFunctionContext);
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals("", result.getMemberIdOrName());
- assertNull(result.getMessage());
- }
-
-
- @Test
- public void testExecuteHandleRuntimeException() throws Throwable {
-
- final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
- final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
- final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
- final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
- final String memberId = "mockMemberId";
- final String memberName = "mockMemberName";
-
- final TestResultSender testResultSender = new TestResultSender();
- final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
- 1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-
- final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(
- mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0,
- 2048, true, true, true, 40, 40, 40, 800);
-
- final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore) {
- @Override
- protected Cache getCache() {
- throw new RuntimeException("expected");
- }
- };
-
- mockContext.checking(new Expectations() {
- {
- oneOf(mockFunctionContext).getResultSender();
- will(returnValue(testResultSender));
- }
- });
-
-
- function.execute(mockFunctionContext);
- final List<?> results = testResultSender.getResults();
-
- assertNotNull(results);
- assertEquals(1, results.size());
-
- final CliFunctionResult result = (CliFunctionResult)results.get(0);
- assertEquals("", result.getMemberIdOrName());
- assertEquals("expected", result.getThrowable().getMessage());
-
- }
-
- public static HDFSStoreImpl createMockHDFSStoreImpl(Mockery mockContext, final String storeName, final String namenode, final String homeDir,
- final int maxFileSize, final int fileRolloverInterval, final float blockCachesize, final String clientConfigFile,
- final int batchSize, final int batchInterval, final String diskStoreName, final boolean syncDiskwrite,
- final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent, final boolean minorCompact,
- final boolean majorCompact, final int majorCompactionInterval, final int majorCompactionThreads,
- final int minorCompactionThreads, final int purgeInterval) {
-
- HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreImpl");
-
- HDFSStoreCommandsJUnitTest.createMockStore(mockContext, mockHdfsStore, storeName, namenode, homeDir, maxFileSize,
- fileRolloverInterval, minorCompact, minorCompactionThreads, majorCompact, majorCompactionThreads,
- majorCompactionInterval, purgeInterval, blockCachesize, clientConfigFile, batchSize, batchInterval,
- diskStoreName, syncDiskwrite, dispatcherThreads, maxMemory, bufferPersistent);
-
- return mockHdfsStore;
- }
-
- protected static class TestCreateHDFSStoreFunction extends CreateHDFSStoreFunction {
- private static final long serialVersionUID = 1L;
-
- private final Cache cache;
-
- private final DistributedMember member;
-
- private final XmlEntity xml;
-
- private final HDFSStoreImpl hdfsStore;
-
- public TestCreateHDFSStoreFunction(Cache cache, DistributedMember member, XmlEntity xml , HDFSStoreImpl hdfsStore) {
- this.cache = cache;
- this.member = member;
- this.xml = xml;
- this.hdfsStore = hdfsStore;
- }
-
- @Override
- protected Cache getCache() {
- return this.cache;
- }
-
- @Override
- protected DistributedMember getDistributedMember(Cache cache) {
- return member;
- }
-
- @Override
- protected XmlEntity getXMLEntity(String storeName) {
- return xml;
- }
-
- @Override
- protected HDFSStoreImpl createHdfsStore(Cache cache, HDFSStoreConfigHolder configHolder){
- return hdfsStore;
- }
- }
-
- protected static class TestResultSender implements ResultSender {
-
- private final List<Object> results = new LinkedList<Object>();
-
- private Throwable t;
-
- protected List<Object> getResults() throws Throwable {
- if (t != null) {
- throw t;
- }
- return Collections.unmodifiableList(results);
- }
-
- public void lastResult(final Object lastResult) {
- results.add(lastResult);
- }
-
- public void sendResult(final Object oneResult) {
- results.add(oneResult);
- }
-
- public void sendException(final Throwable t) {
- this.t = t;
- }
- }
-
-}
[05/15] incubator-geode git commit: GEODE-429: Remove
RegionFactory.setHdfsStore
Posted by as...@apache.org.
GEODE-429: Remove RegionFactory.setHdfsStore
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7bcc1e44
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7bcc1e44
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7bcc1e44
Branch: refs/heads/feature/GEODE-409
Commit: 7bcc1e44cb7f0f69381c06d583b058926ca85331
Parents: b3f838e
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 13:41:31 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700
----------------------------------------------------------------------
.../gemstone/gemfire/cache/RegionFactory.java | 25 --------------------
.../cli/functions/RegionCreateFunction.java | 8 -------
.../hdfs/internal/HDFSConfigJUnitTest.java | 16 ++++++++-----
.../hdfs/internal/HDFSEntriesSetJUnitTest.java | 3 ++-
.../internal/hoplog/BaseHoplogTestCase.java | 2 +-
.../HdfsSortedOplogOrganizerJUnitTest.java | 2 +-
...FSQueueRegionOperationsOffHeapJUnitTest.java | 2 +-
.../cache/HDFSRegionOperationsJUnitTest.java | 4 ++--
.../HDFSRegionOperationsOffHeapJUnitTest.java | 2 +-
.../HDFSRegionMBeanAttributeJUnitTest.java | 2 +-
10 files changed, 19 insertions(+), 47 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
index 40041cb..72a0a44 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
@@ -902,31 +902,6 @@ public class RegionFactory<K,V>
this.attrsFactory.addAsyncEventQueueId(asyncEventQueueId);
return this;
}
- /**
- * Sets the HDFSStore name attribute.
- * This causes the region to belong to the HDFSStore.
- * @param name the name of the hdfsstore
- * @return a reference to this RegionFactory object
- *
- * @see AttributesFactory#setHDFSStoreName
- * @since 9.0
- */
- public RegionFactory<K,V> setHDFSStoreName(String name) {
- this.attrsFactory.setHDFSStoreName(name);
- return this;
- }
-
- /**
- * Sets the HDFS write only attribute. if the region
- * is configured to be write only to HDFS, events that have
- * been evicted from memory cannot be read back from HDFS.
- * Events are written to HDFS in the order in which they occurred.
- * @since 9.0
- */
- public RegionFactory<K,V> setHDFSWriteOnly(boolean writeOnly) {
- this.attrsFactory.setHDFSWriteOnly(writeOnly);
- return this;
- }
/**
* Set the compressor to be used by this region for compressing
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
index 74afc47..3bf8b3f 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
@@ -309,14 +309,6 @@ public class RegionCreateFunction extends FunctionAdapter implements InternalEnt
String regionName = regionPathData.getName();
- final String hdfsStoreName = regionCreateArgs.getHDFSStoreName();
- if (hdfsStoreName != null && !hdfsStoreName.isEmpty()) {
- factory.setHDFSStoreName(hdfsStoreName);
- }
- if (regionCreateArgs.isSetHDFSWriteOnly()) {
- factory.setHDFSWriteOnly(regionCreateArgs.getHDFSWriteOnly());
- }
-
if (parentRegion != null) {
createdRegion = factory.createSubregion(parentRegion, regionName);
} else {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
index b0c6520..26e6c73 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
@@ -72,7 +72,8 @@ public class HDFSConfigJUnitTest extends TestCase {
HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
HDFSStore store = hsf.create("myHDFSStore");
RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
- Region r1 = rf1.setHDFSStoreName("myHDFSStore").create("r1");
+// rf1.setHDFSStoreName("myHDFSStore");
+ Region r1 = rf1.create("r1");
r1.put("k1", "v1");
@@ -89,8 +90,9 @@ public class HDFSConfigJUnitTest extends TestCase {
hsf = this.c.createHDFSStoreFactory();
hsf.create("myHDFSStore");
- r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
- .create("r1");
+ RegionFactory<Object, Object> rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
+// rf.setHDFSStoreName("myHDFSStore");
+ r1 = rf.create("r1");
r1.put("k1", "v1");
assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
@@ -126,8 +128,9 @@ public class HDFSConfigJUnitTest extends TestCase {
hsf.create("myHDFSStore");
- r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
- .setHDFSWriteOnly(true).create("r1");
+ rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
+// rf.setHDFSStoreName("myHDFSStore").setHDFSWriteOnly(true);
+ r1 = rf.create("r1");
r1.put("k1", "v1");
store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
@@ -469,7 +472,8 @@ public class HDFSConfigJUnitTest extends TestCase {
HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
//Create a region that evicts everything
- LocalRegion r1 = (LocalRegion) rf1.setHDFSStoreName("myHDFSStore").setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
+// rf1.setHDFSStoreName("myHDFSStore");
+ LocalRegion r1 = (LocalRegion) rf1.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
//Populate about many times our block cache size worth of data
//We want to try to cache at least 5 blocks worth of index and metadata
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
index f864176..3085a66 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
@@ -73,7 +73,8 @@ public class HDFSEntriesSetJUnitTest extends TestCase {
paf.setTotalNumBuckets(1);
RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
- region = (PartitionedRegion) rf.setHDFSStoreName("test").setPartitionAttributes(paf.create()).create("test");
+// rf.setHDFSStoreName("test");
+ region = (PartitionedRegion) rf.setPartitionAttributes(paf.create()).create("test");
// prime the region so buckets get created
region.put("test", "test");
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
index b35f756..07d9f77 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
@@ -90,7 +90,7 @@ public abstract class BaseHoplogTestCase extends TestCase {
hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
- regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+// regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
region = regionfactory.create(getName());
// disable compaction by default and clear existing queues
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
index 4529067..e6a1229 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
@@ -744,7 +744,7 @@ public class HdfsSortedOplogOrganizerJUnitTest extends BaseHoplogTestCase {
}
// create region with store
- regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+// regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
Region<Object, Object> region1 = regionfactory.create("region-1");
ExpectedException ex = DistributedTestCase.addExpectedException("CorruptHFileException");
try {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
index 4565568..24cd1dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
@@ -37,7 +37,7 @@ public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOp
PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
rf.setPartitionAttributes(prAttr);
rf.setOffHeap(true);
- rf.setHDFSStoreName(hdfsStore.getName());
+// rf.setHDFSStoreName(hdfsStore.getName());
Region<Integer, String> r = rf.create(regionName);
// addListener(r);
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
index b24ee5d..d96e31b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
@@ -108,7 +108,7 @@ public class HDFSRegionOperationsJUnitTest extends TestCase {
RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
rf.setPartitionAttributes(prAttr);
- rf.setHDFSStoreName(hdfsStore.getName());
+// rf.setHDFSStoreName(hdfsStore.getName());
Region<Integer, String> r = rf.create(regionName);
((PartitionedRegion) r).setQueryHDFS(true);
@@ -265,7 +265,7 @@ public class HDFSRegionOperationsJUnitTest extends TestCase {
public void test050LRURegionAttributesForPR() {
RegionFactory<Integer, String> rf = cache.createRegionFactory();
- rf.setHDFSStoreName(hdfsStore.getName());
+// rf.setHDFSStoreName(hdfsStore.getName());
rf.setDataPolicy(DataPolicy.HDFS_PARTITION);
verifyLRURegionAttributesForPR(rf.create(getName()));
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
index f9c96a2..de2aae3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
@@ -59,7 +59,7 @@ public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJU
PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
rf.setPartitionAttributes(prAttr);
rf.setOffHeap(true);
- rf.setHDFSStoreName(hdfsStore.getName());
+// rf.setHDFSStoreName(hdfsStore.getName());
Region<Integer, String> r = rf.create(regionName);
// addListener(r);
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
index c563d5a..14b61e6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
@@ -77,7 +77,7 @@ public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
- regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+// regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
// regionfactory.setCompressionCodec("Some");
PartitionAttributesFactory fac = new PartitionAttributesFactory();