You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/05/08 23:16:07 UTC
[29/46] geode git commit: GEODE-2632: change dependencies on
GemFireCacheImpl to InternalCache
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
index c7e6a37..e0f1d99 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
@@ -15,7 +15,7 @@
package org.apache.geode.internal.cache;
/**
- * Defines callbacks that are invoked when a <code>GemFireCache</code> is created or closed.
+ * Defines callbacks that are invoked when an {@code InternalCache} is created or closed.
*
* @see GemFireCacheImpl#addCacheLifecycleListener(CacheLifecycleListener)
* @see GemFireCacheImpl#removeCacheLifecycleListener(CacheLifecycleListener)
@@ -23,12 +23,12 @@ package org.apache.geode.internal.cache;
public interface CacheLifecycleListener {
/**
- * Invoked when a new <code>GemFireCache</code> is created
+ * Invoked when a new {@code InternalCache} is created
*/
- public void cacheCreated(GemFireCacheImpl cache);
+ void cacheCreated(InternalCache cache);
/**
- * Invoked when a <code>GemFireCache</code> is closed
+ * Invoked when a {@code InternalCache} is closed
*/
- public void cacheClosed(GemFireCacheImpl cache);
+ void cacheClosed(InternalCache cache);
}
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
index 67e04f1..59fa5dd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
@@ -1283,11 +1283,10 @@ public class CachePerfStats {
public void incDeltaFullValuesRequested() {
stats.incInt(deltaFullValuesRequestedId, 1);
}
- ////// Special Instance Methods /////
/**
- * Closes these stats so that they can not longer be used. The stats are closed when the
- * {@linkplain GemFireCacheImpl#close cache} is closed.
+ * Closes these stats so that they can not longer be used. The stats are closed when the cache is
+ * closed.
*
* @since GemFire 3.5
*/
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
index 367b6f4..670c697 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
@@ -14,19 +14,51 @@
*/
package org.apache.geode.internal.cache;
+import static java.lang.Integer.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.logging.log4j.Logger;
+
import org.apache.geode.CancelCriterion;
import org.apache.geode.GemFireIOException;
import org.apache.geode.InternalGemFireError;
import org.apache.geode.InvalidValueException;
-import org.apache.geode.cache.*;
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.ClientSession;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.DynamicRegionFactory;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.InterestRegistrationListener;
+import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.cache.RegionExistsException;
+import org.apache.geode.cache.Scope;
import org.apache.geode.cache.server.CacheServer;
import org.apache.geode.cache.server.ClientSubscriptionConfig;
import org.apache.geode.cache.server.ServerLoadProbe;
import org.apache.geode.cache.server.internal.LoadMonitor;
import org.apache.geode.cache.wan.GatewayTransportFilter;
import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.*;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionAdvisee;
+import org.apache.geode.distributed.internal.DistributionAdvisor;
import org.apache.geode.distributed.internal.DistributionAdvisor.Profile;
+import org.apache.geode.distributed.internal.DistributionConfig;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.ResourceEvent;
+import org.apache.geode.distributed.internal.ServerLocation;
import org.apache.geode.distributed.internal.membership.MemberAttributes;
import org.apache.geode.internal.Assert;
import org.apache.geode.internal.OSProcess;
@@ -42,29 +74,19 @@ import org.apache.geode.internal.logging.LogService;
import org.apache.geode.internal.logging.log4j.LocalizedMessage;
import org.apache.geode.management.membership.ClientMembership;
import org.apache.geode.management.membership.ClientMembershipListener;
-import org.apache.logging.log4j.Logger;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.atomic.AtomicInteger;
/**
- * An implementation of the <code>CacheServer</code> interface that delegates most of the heavy
- * lifting to an {@link Acceptor}.
+ * An implementation of the{@code CacheServer} interface that delegates most of the heavy lifting to
+ * an {@link Acceptor}.
*
* @since GemFire 4.0
*/
@SuppressWarnings("deprecation")
public class CacheServerImpl extends AbstractCacheServer implements DistributionAdvisee {
-
private static final Logger logger = LogService.getLogger();
- private static final int FORCE_LOAD_UPDATE_FREQUENCY =
- Integer
- .getInteger(
- DistributionConfig.GEMFIRE_PREFIX + "BridgeServer.FORCE_LOAD_UPDATE_FREQUENCY", 10)
- .intValue();
+ private static final int FORCE_LOAD_UPDATE_FREQUENCY = getInteger(
+ DistributionConfig.GEMFIRE_PREFIX + "BridgeServer.FORCE_LOAD_UPDATE_FREQUENCY", 10);
/** The acceptor that does the actual serving */
private volatile AcceptorImpl acceptor;
@@ -88,7 +110,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
*/
private boolean isGatewayReceiver;
- private List<GatewayTransportFilter> gatewayTransportFilters = Collections.EMPTY_LIST;
+ private List<GatewayTransportFilter> gatewayTransportFilters = Collections.emptyList();
/** is this a server created by a launcher as opposed to by an application or XML? */
private boolean isDefaultServer;
@@ -107,10 +129,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
// ////////////////////// Constructors //////////////////////
/**
- * Creates a new <code>BridgeServerImpl</code> that serves the contents of the give
- * <code>Cache</code>. It has the default configuration.
+ * Creates a new{@code BridgeServerImpl} that serves the contents of the give {@code Cache}. It
+ * has the default configuration.
*/
- public CacheServerImpl(GemFireCacheImpl cache, boolean isGatewayReceiver) {
+ public CacheServerImpl(InternalCache cache, boolean isGatewayReceiver) {
super(cache);
this.isGatewayReceiver = isGatewayReceiver;
}
@@ -251,8 +273,8 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
}
/**
- * Sets the configuration of <b>this</b> <code>CacheServer</code> based on the configuration of
- * <b>another</b> <code>CacheServer</code>.
+ * Sets the configuration of <b>this</b>{@code CacheServer} based on the configuration of
+ * <b>another</b>{@code CacheServer}.
*/
public void configureFrom(CacheServer other) {
setPort(other.getPort());
@@ -302,8 +324,8 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
List overflowAttributesList = new LinkedList();
ClientSubscriptionConfig csc = this.getClientSubscriptionConfig();
overflowAttributesList.add(0, csc.getEvictionPolicy());
- overflowAttributesList.add(1, Integer.valueOf(csc.getCapacity()));
- overflowAttributesList.add(2, Integer.valueOf(this.port));
+ overflowAttributesList.add(1, valueOf(csc.getCapacity()));
+ overflowAttributesList.add(2, valueOf(this.port));
String diskStoreName = csc.getDiskStoreName();
if (diskStoreName != null) {
overflowAttributesList.add(3, diskStoreName);
@@ -327,9 +349,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
// TODO : Need to provide facility to enable/disable client health monitoring.
// Creating ClientHealthMonitoring region.
// Force initialization on current cache
- if (cache instanceof GemFireCacheImpl) {
- ClientHealthMonitoringRegion.getInstance((GemFireCacheImpl) cache);
- }
+ ClientHealthMonitoringRegion.getInstance(this.cache);
this.cache.getLoggerI18n()
.config(LocalizedStrings.CacheServerImpl_CACHESERVER_CONFIGURATION___0, getConfig());
@@ -379,7 +399,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
throw new IllegalStateException(s);
}
}
- if (this.hostnameForClients == null || this.hostnameForClients.equals("")) {
+ if (this.hostnameForClients == null || this.hostnameForClients.isEmpty()) {
if (this.acceptor != null) {
return this.acceptor.getExternalAddress();
} else {
@@ -518,16 +538,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
/**
* create client subscription
*
- * @param cache
- * @param ePolicy
- * @param capacity
- * @param port
- * @param overFlowDir
- * @param isDiskStore
* @return client subscription name
* @since GemFire 5.7
*/
- public static String clientMessagesRegion(GemFireCacheImpl cache, String ePolicy, int capacity,
+ public static String clientMessagesRegion(InternalCache cache, String ePolicy, int capacity,
int port, String overFlowDir, boolean isDiskStore) {
AttributesFactory factory =
getAttribFactoryForClientMessagesRegion(cache, ePolicy, capacity, overFlowDir, isDiskStore);
@@ -536,7 +550,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
return createClientMessagesRegion(attr, cache, capacity, port);
}
- public static AttributesFactory getAttribFactoryForClientMessagesRegion(GemFireCacheImpl cache,
+ public static AttributesFactory getAttribFactoryForClientMessagesRegion(InternalCache cache,
String ePolicy, int capacity, String overflowDir, boolean isDiskStore)
throws InvalidValueException, GemFireIOException {
AttributesFactory factory = new AttributesFactory();
@@ -564,9 +578,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
"Could not create client subscription overflow directory: " + dir.getAbsolutePath());
}
File[] dirs = {dir};
+
DiskStoreFactory dsf = cache.createDiskStoreFactory();
- DiskStore bsi = dsf.setAutoCompact(true)
- .setDiskDirsAndSizes(dirs, new int[] {Integer.MAX_VALUE}).create("bsi");
+ dsf.setAutoCompact(true).setDiskDirsAndSizes(dirs, new int[] {MAX_VALUE}).create("bsi");
+
factory.setDiskStoreName("bsi");
// backward compatibility, it was sync
factory.setDiskSynchronous(true);
@@ -576,11 +591,12 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
factory.setStatisticsEnabled(true);
/* setting LIFO related eviction attributes */
if (HARegionQueue.HA_EVICTION_POLICY_ENTRY.equals(ePolicy)) {
- factory.setEvictionAttributes(EvictionAttributesImpl.createLIFOEntryAttributes(capacity,
- EvictionAction.OVERFLOW_TO_DISK));
- } else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) { // condition refinement
- factory.setEvictionAttributes(EvictionAttributesImpl.createLIFOMemoryAttributes(capacity,
- EvictionAction.OVERFLOW_TO_DISK));
+ factory.setEvictionAttributes(
+ EvictionAttributes.createLIFOEntryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
+ } else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) {
+ // condition refinement
+ factory.setEvictionAttributes(
+ EvictionAttributes.createLIFOMemoryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
} else {
// throw invalid eviction policy exception
throw new InvalidValueException(
@@ -589,7 +605,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
return factory;
}
- public static String createClientMessagesRegion(RegionAttributes attr, GemFireCacheImpl cache,
+ private static String createClientMessagesRegion(RegionAttributes attr, InternalCache cache,
int capacity, int port) {
// generating unique name in VM for ClientMessagesRegion
String regionName = generateNameForClientMsgsRegion(port);
@@ -615,22 +631,9 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
return regionName;
}
- public static String createClientMessagesRegionForTesting(GemFireCacheImpl cache, String ePolicy,
- int capacity, int port, int expiryTime, String overFlowDir, boolean isDiskStore) {
- AttributesFactory factory =
- getAttribFactoryForClientMessagesRegion(cache, ePolicy, capacity, overFlowDir, isDiskStore);
- ExpirationAttributes ea =
- new ExpirationAttributes(expiryTime, ExpirationAction.LOCAL_INVALIDATE);
- factory.setEntryTimeToLive(ea);
- RegionAttributes attr = factory.create();
-
- return createClientMessagesRegion(attr, cache, capacity, port);
- }
-
/**
* Generates the name for the client subscription using the given id.
*
- * @param id
* @return String
* @since GemFire 5.7
*/
@@ -662,9 +665,9 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
}
/**
- * Returns the underlying <code>InternalDistributedSystem</code> connection.
+ * Returns the underlying{@code InternalDistributedSystem} connection.
*
- * @return the underlying <code>InternalDistributedSystem</code>
+ * @return the underlying{@code InternalDistributedSystem}
*/
public InternalDistributedSystem getSystem() {
return (InternalDistributedSystem) this.cache.getDistributedSystem();
@@ -729,10 +732,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
}
/**
- * Registers a new <code>InterestRegistrationListener</code> with the set of
- * <code>InterestRegistrationListener</code>s.
+ * Registers a new{@code InterestRegistrationListener} with the set of
+ * {@code InterestRegistrationListener}s.
*
- * @param listener The <code>InterestRegistrationListener</code> to register
+ * @param listener The{@code InterestRegistrationListener} to register
* @throws IllegalStateException if the BridgeServer has not been started
* @since GemFire 5.8Beta
*/
@@ -745,10 +748,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
}
/**
- * Unregisters an existing <code>InterestRegistrationListener</code> from the set of
- * <code>InterestRegistrationListener</code>s.
+ * Unregisters an existing{@code InterestRegistrationListener} from the set of
+ * {@code InterestRegistrationListener}s.
*
- * @param listener The <code>InterestRegistrationListener</code> to unregister
+ * @param listener The{@code InterestRegistrationListener} to unregister
*
* @since GemFire 5.8Beta
*/
@@ -757,11 +760,9 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
}
/**
- * Returns a read-only set of <code>InterestRegistrationListener</code>s registered with this
- * notifier.
+ * Returns a read-only set of{@code InterestRegistrationListener}s registered with this notifier.
*
- * @return a read-only set of <code>InterestRegistrationListener</code>s registered with this
- * notifier
+ * @return a read-only set of{@code InterestRegistrationListener}s registered with this notifier
*
* @since GemFire 5.8Beta
*/
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
index 9a544d2..6bfb0f3 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
@@ -15,6 +15,26 @@
package org.apache.geode.internal.cache;
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.PrintStream;
+import java.io.Serializable;
+import java.net.URL;
+import java.util.AbstractList;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
import org.apache.geode.SystemFailure;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.CacheFactory;
@@ -28,26 +48,17 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
import org.apache.geode.i18n.LogWriterI18n;
import org.apache.geode.internal.OSProcess;
import org.apache.geode.internal.PureJavaMode;
-import org.apache.geode.internal.net.SocketCreator;
import org.apache.geode.internal.cache.tier.sockets.CacheServerHelper;
import org.apache.geode.internal.i18n.LocalizedStrings;
+import org.apache.geode.internal.net.SocketCreator;
import org.apache.geode.internal.process.StartupStatus;
import org.apache.geode.internal.process.StartupStatusListener;
import org.apache.geode.internal.util.IOUtils;
import org.apache.geode.internal.util.JavaCommandBuilder;
-import java.io.*;
-import java.net.URL;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.geode.distributed.ConfigurationProperties.LOG_FILE;
-import static org.apache.geode.distributed.ConfigurationProperties.SERVER_BIND_ADDRESS;
-
/**
* Launcher program to start a cache server.
*
- *
* @since GemFire 2.0.2
*/
public class CacheServerLauncher {
@@ -669,7 +680,7 @@ public class CacheServerLauncher {
// redirect output to the log file
OSProcess.redirectOutput(system.getConfig().getLogFile());
- Cache cache = this.createCache(system, options);
+ InternalCache cache = createCache(system, options);
cache.setIsServer(true);
startAdditionalServices(cache, options);
@@ -678,7 +689,7 @@ public class CacheServerLauncher {
clearLogListener();
if (ASSIGN_BUCKETS) {
- for (PartitionedRegion region : ((GemFireCacheImpl) cache).getPartitionedRegions()) {
+ for (PartitionedRegion region : cache.getPartitionedRegions()) {
PartitionRegionHelper.assignBucketsToPartitions(region);
}
}
@@ -823,9 +834,9 @@ public class CacheServerLauncher {
return -1.0f;
}
- protected Cache createCache(InternalDistributedSystem system, Map<String, Object> options)
+ protected InternalCache createCache(InternalDistributedSystem system, Map<String, Object> options)
throws IOException {
- Cache cache = CacheFactory.create(system);
+ InternalCache cache = (InternalCache) CacheFactory.create(system);
float threshold = getCriticalHeapPercent(options);
if (threshold > 0.0f) {
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
index 037e589..c332db3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
@@ -84,11 +84,8 @@ public class CachedDeserializableFactory {
}
private static boolean cachePrefersPdx() {
- GemFireCacheImpl gfc = GemFireCacheImpl.getInstance();
- if (gfc != null) {
- return gfc.getPdxReadSerialized();
- }
- return false;
+ InternalCache internalCache = GemFireCacheImpl.getInstance();
+ return internalCache != null && internalCache.getPdxReadSerialized();
}
/**
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
index d0f3ec4..ed08175 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
@@ -15,6 +15,21 @@
package org.apache.geode.internal.cache;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.cache.DiskStore;
import org.apache.geode.cache.EntryDestroyedException;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.RegionDestroyedException;
@@ -29,23 +44,13 @@ import org.apache.geode.internal.cache.persistence.PRPersistentConfig;
import org.apache.geode.internal.cache.wan.parallel.ParallelGatewaySenderQueue;
import org.apache.geode.internal.i18n.LocalizedStrings;
import org.apache.geode.internal.logging.LogService;
-import org.apache.geode.internal.logging.log4j.LocalizedMessage;
-
-import org.apache.logging.log4j.Logger;
-
-import java.io.Serializable;
-import java.util.*;
-import java.util.Map.Entry;
/**
* An utility class to retrieve colocated regions in a colocation hierarchy in various scenarios
- *
- *
+ *
* @since GemFire 6.0
*/
public class ColocationHelper {
-
- /** Logging mechanism for debugging */
private static final Logger logger = LogService.getLogger();
/**
@@ -58,7 +63,6 @@ public class ColocationHelper {
/**
* An utility method to retrieve colocated region of a given partitioned region
*
- * @param partitionedRegion
* @return colocated PartitionedRegion
* @throws IllegalStateException for missing colocated region
* @since GemFire 5.8Beta
@@ -124,7 +128,7 @@ public class ColocationHelper {
String prName = (String) itr.next();
try {
prConf = (PartitionRegionConfig) prRoot.get(prName);
- } catch (EntryDestroyedException ede) {
+ } catch (EntryDestroyedException ignore) {
continue;
}
if (prConf == null) {
@@ -134,7 +138,8 @@ public class ColocationHelper {
}
if (prConf.getColocatedWith() != null) {
if (prConf.getColocatedWith().equals(tempToBeColocatedWith.getFullPath())
- || ("/" + prConf.getColocatedWith()).equals(tempToBeColocatedWith.getFullPath())) {
+ || (Region.SEPARATOR + prConf.getColocatedWith())
+ .equals(tempToBeColocatedWith.getFullPath())) {
colocatedRegions.add(prConf);
tempcolocatedRegions.add(prConf);
}
@@ -149,11 +154,7 @@ public class ColocationHelper {
if (colocatedWithRegionName == null)
break;
else {
- try {
- prConf = (PartitionRegionConfig) prRoot.get(getRegionIdentifier(colocatedWithRegionName));
- } catch (EntryDestroyedException ede) {
- throw ede;
- }
+ prConf = (PartitionRegionConfig) prRoot.get(getRegionIdentifier(colocatedWithRegionName));
if (prConf == null) {
break;
}
@@ -193,12 +194,13 @@ public class ColocationHelper {
boolean hasOfflineChildren = false;
int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT);
try {
- GemFireCacheImpl cache = region.getCache();
- Collection<DiskStoreImpl> stores = cache.listDiskStores();
+ InternalCache cache = region.getCache();
+ Collection<DiskStore> stores = cache.listDiskStores();
// Look through all of the disk stores for offline colocated child regions
- for (DiskStoreImpl diskStore : stores) {
+ for (DiskStore diskStore : stores) {
// Look at all of the partitioned regions.
- for (Map.Entry<String, PRPersistentConfig> entry : diskStore.getAllPRs().entrySet()) {
+ for (Map.Entry<String, PRPersistentConfig> entry : ((DiskStoreImpl) diskStore).getAllPRs()
+ .entrySet()) {
PRPersistentConfig config = entry.getValue();
String childName = entry.getKey();
@@ -275,7 +277,6 @@ public class ColocationHelper {
* getAllColocationRegions(orderPR) --> List{customerPR, shipmentPR}<br>
* getAllColocationRegions(shipmentPR) --> List{customerPR, orderPR}<br>
*
- * @param partitionedRegion
* @return List of all partitioned regions (excluding self) in a colocated chain
* @since GemFire 5.8Beta
*/
@@ -309,7 +310,6 @@ public class ColocationHelper {
/**
* gets local data of colocated regions on a particular data store
*
- * @param partitionedRegion
* @return map of region name to local colocated regions
* @since GemFire 5.8Beta
*/
@@ -367,7 +367,6 @@ public class ColocationHelper {
* getColocatedChildRegions(orderPR) will return List{shipmentPR}<br>
* getColocatedChildRegions(shipmentPR) will return empty List{}<br>
*
- * @param partitionedRegion
* @return list of all child partitioned regions colocated with the region
* @since GemFire 5.8Beta
*/
@@ -387,7 +386,7 @@ public class ColocationHelper {
}
try {
prConf = (PartitionRegionConfig) prRoot.get(prName);
- } catch (EntryDestroyedException ede) {
+ } catch (EntryDestroyedException ignore) {
continue;
}
if (prConf == null) {
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
index b1dd003..815b526 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
@@ -12,7 +12,6 @@
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
-
package org.apache.geode.internal.cache;
import java.io.DataInput;
@@ -29,6 +28,7 @@ import org.apache.geode.CancelException;
import org.apache.geode.DataSerializer;
import org.apache.geode.SystemFailure;
import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DynamicRegionFactory;
import org.apache.geode.cache.PartitionAttributes;
import org.apache.geode.cache.RegionDestroyedException;
import org.apache.geode.cache.Scope;
@@ -48,6 +48,7 @@ import org.apache.geode.internal.Assert;
import org.apache.geode.internal.InternalDataSerializer;
import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
import org.apache.geode.internal.cache.CacheDistributionAdvisor.InitialImageAdvice;
+import org.apache.geode.internal.cache.partitioned.Bucket;
import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
import org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile;
@@ -96,7 +97,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recps);
-
boolean useMcast = false; // multicast is disabled for this message for now
CreateRegionMessage msg = getCreateRegionMessage(recps, replyProc, useMcast);
@@ -118,10 +118,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
// This isn't right. We should disable region creation in general, not just
// the remote case here...
// // Similarly, don't allow new regions to be created if the cache is closing
- // GemFireCache cache = (GemFireCache)this.newRegion.getCache();
- // if (cache.isClosing()) {
- // throw new CacheClosedException("Cannot create a region when the cache is closing");
- // }
try {
replyProc.waitForRepliesUninterruptibly();
if (!replyProc.needRetry()) {
@@ -166,15 +162,13 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
return recps;
}
-
-
public InitialImageAdvice getInitialImageAdvice(InitialImageAdvice previousAdvice) {
return newRegion.getCacheDistributionAdvisor().adviseInitialImage(previousAdvice);
}
private Set getAdvice() {
if (this.newRegion instanceof BucketRegion) {
- return ((BucketRegion) this.newRegion).getBucketAdvisor().adviseProfileExchange();
+ return ((Bucket) this.newRegion).getBucketAdvisor().adviseProfileExchange();
} else {
DistributionAdvisee rgn = this.newRegion.getParentAdvisee();
DistributionAdvisor advisor = rgn.getDistributionAdvisor();
@@ -195,7 +189,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
}
public void setOnline(InternalDistributedMember target) {
-
+ // nothing
}
class CreateRegionReplyProcessor extends ReplyProcessor21 {
@@ -319,6 +313,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
protected String regionPath;
protected CacheProfile profile;
protected int processorId;
+
private transient boolean incompatible = false;
private transient ReplyException replyException;
private transient CacheProfile replyProfile;
@@ -327,7 +322,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
protected transient boolean severeAlertCompatible;
private transient boolean skippedCompatibilityChecks;
-
@Override
public int getProcessorId() {
return this.processorId;
@@ -354,7 +348,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
// get the region from the path, but do NOT wait on initialization,
// otherwise we could have a distributed deadlock
- GemFireCacheImpl cache = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
+ InternalCache cache = (InternalCache) CacheFactory.getInstance(dm.getSystem());
// Fix for bug 42051 - Discover any regions that are in the process
// of being destroyed
@@ -389,15 +383,15 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
}
}
}
- } catch (PRLocallyDestroyedException fre) {
+ } catch (PRLocallyDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("<Region Locally Destroyed> {}", this);
}
- } catch (RegionDestroyedException e) {
+ } catch (RegionDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("<RegionDestroyed> {}", this);
}
- } catch (CancelException e) {
+ } catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("<CancelException> {}", this);
}
@@ -445,8 +439,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
dm.putOutgoing(replyMsg);
if (lclRgn instanceof PartitionedRegion)
((PartitionedRegion) lclRgn).sendIndexCreationMsg(this.getSender());
-
-
}
}
@@ -549,15 +541,13 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
&& this.concurrencyChecksEnabled != otherCCEnabled) {
result =
LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_CCENABLED_1_BECAUSE_ANOTHER_CACHE_HAS_THE_SAME_REGION_CCENABLED_2
- .toLocalizedString(
- new Object[] {regionPath, Boolean.valueOf(this.concurrencyChecksEnabled), myId,
- Boolean.valueOf(otherCCEnabled)});
+ .toLocalizedString(regionPath, this.concurrencyChecksEnabled, myId, otherCCEnabled);
}
Set<String> otherGatewaySenderIds = ((LocalRegion) rgn).getGatewaySenderIds();
Set<String> myGatewaySenderIds = profile.gatewaySenderIds;
if (!otherGatewaySenderIds.equals(myGatewaySenderIds)) {
- if (!rgn.getFullPath().contains(DynamicRegionFactoryImpl.dynamicRegionListName)) {
+ if (!rgn.getFullPath().contains(DynamicRegionFactory.dynamicRegionListName)) {
result =
LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_WITH_1_GATEWAY_SENDER_IDS_BECAUSE_ANOTHER_CACHE_HAS_THE_SAME_REGION_WITH_2_GATEWAY_SENDER_IDS
.toLocalizedString(this.regionPath, myGatewaySenderIds, otherGatewaySenderIds);
@@ -588,8 +578,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
if (profile.scope != otherScope) {
result =
LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_WITH_1_SCOPE_BECAUSE_ANOTHER_CACHE_HAS_SAME_REGION_WITH_2_SCOPE
- .toLocalizedString(
- new Object[] {this.regionPath, profile.scope, myId, otherScope});
+ .toLocalizedString(this.regionPath, profile.scope, myId, otherScope);
}
}
@@ -605,8 +594,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
&& profile.isOffHeap != otherIsOffHeap) {
result =
LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_WITH_OFF_HEAP_EQUALS_1_BECAUSE_ANOTHER_CACHE_2_HAS_SAME_THE_REGION_WITH_OFF_HEAP_EQUALS_3
- .toLocalizedString(
- new Object[] {this.regionPath, profile.isOffHeap, myId, otherIsOffHeap});
+ .toLocalizedString(this.regionPath, profile.isOffHeap, myId, otherIsOffHeap);
}
String cspResult = null;
@@ -652,47 +640,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
profile, result);
}
- // if (profile.membershipAttributes != null) {
- // // check to see if:
- // // 1. we do not have DataPolicy that will take queued msgs
- // // 2. the profile has queuing turned on
- // // 3. we are playing one of the queued roles
- // if (!rgn.getAttributes().getDataPolicy().withQueuedMessages()) {
- // if (profile.membershipAttributes.getLossAction().isAllAccessWithQueuing()) {
- // Set myRoles = rgn.getSystem().getDistributedMember().getRoles();
- // if (!myRoles.isEmpty()) {
- // Set intersection = new HashSet(myRoles);
- // intersection.retainAll(profile.membershipAttributes.getRequiredRoles());
- // if (!intersection.isEmpty()) {
- // result = "Cannot create region " + regionPath
- // + " with queuing because the region already exists"
- // + " with a data-policy " + rgn.getAttributes().getDataPolicy()
- // + " that does not allow queued messages with the roles "
- // + intersection;
- // }
- // }
- // }
- // }
- // } else {
- // // see if we are queuing on this region
- // MembershipAttributes ra = rgn.getMembershipAttributes();
- // if (ra != null && ra.hasRequiredRoles()
- // && ra.getLossAction().isAllAccessWithQueuing()) {
- // // we are queuing so make sure this other guy allows queued messages
- // // if he is playing a role we queue for.
- // if (!profile.dataPolicy.withQueuedMessages()) {
- // Set intersection = new HashSet(ra.getRequiredRoles());
- // intersection.retainAll(profile.getDistributedMember().getRoles());
- // if (!intersection.isEmpty()) {
- // result = "Cannot create region " + regionPath
- // + " with a data-policy " + profile.dataPolicy
- // + " that does not allow queued messages because the region"
- // + " already exists with queuing enabled for roles " + intersection;
- // }
- // }
- // }
- // }
-
return result;
}
@@ -808,16 +755,16 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
@Override
public String toString() {
- StringBuffer buff = new StringBuffer();
- buff.append("CreateRegionMessage (region='");
- buff.append(this.regionPath);
- buff.append("'; processorId=");
- buff.append(this.processorId);
- buff.append("; concurrencyChecksEnabled=").append(this.concurrencyChecksEnabled);
- buff.append("; profile=");
- buff.append(this.profile);
- buff.append(")");
- return buff.toString();
+ StringBuilder sb = new StringBuilder();
+ sb.append("CreateRegionMessage (region='");
+ sb.append(this.regionPath);
+ sb.append("'; processorId=");
+ sb.append(this.processorId);
+ sb.append("; concurrencyChecksEnabled=").append(this.concurrencyChecksEnabled);
+ sb.append("; profile=");
+ sb.append(this.profile);
+ sb.append(")");
+ return sb.toString();
}
}
@@ -848,8 +795,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
public void fromData(DataInput in) throws IOException, ClassNotFoundException {
super.fromData(in);
if (in.readBoolean()) {
- // this.profile = new CacheProfile();
- // this.profile.fromData(in);
this.profile = (CacheProfile) DataSerializer.readObject(in);
}
int size = in.readInt();
@@ -879,7 +824,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
super.toData(out);
out.writeBoolean(this.profile != null);
if (this.profile != null) {
- // this.profile.toData(out);
DataSerializer.writeObject(this.profile, out);
}
if (this.bucketProfiles == null) {
@@ -914,7 +858,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
@Override
public String toString() {
- StringBuffer buff = new StringBuffer();
+ StringBuilder buff = new StringBuilder();
buff.append("CreateRegionReplyMessage");
buff.append("(sender=").append(getSender());
buff.append("; processorId=");
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
index b36cd2a..5914ab5 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
@@ -169,7 +169,7 @@ public final class DestroyPartitionedRegionMessage extends PartitionMessage {
}
@Override
- protected void appendFields(StringBuffer buff) {
+ protected void appendFields(StringBuilder buff) {
super.appendFields(buff);
buff.append("; cbArg=").append(this.cbArg).append("; op=").append(this.op);
buff.append("; prSerial=" + prSerial);
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyRegionOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyRegionOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyRegionOperation.java
index 33cfa09..3cc988f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyRegionOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyRegionOperation.java
@@ -140,11 +140,11 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
protected HashMap subregionSerialNumbers;
protected boolean notifyOfRegionDeparture;
+
/**
* true if need to automatically recreate region, and mark destruction as a reinitialization
*/
protected transient LocalRegion lockRoot = null; // used for early destroy
- // lock acquisition
@Override
protected InternalCacheEvent createEvent(DistributedRegion rgn) throws EntryNotFoundException {
@@ -158,9 +158,8 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
}
protected RegionEventImpl createRegionEvent(DistributedRegion rgn) {
- RegionEventImpl event = new RegionEventImpl(rgn, getOperation(), this.callbackArg,
- true /* originRemote */, getSender());
- return event;
+ return new RegionEventImpl(rgn, getOperation(), this.callbackArg, true /* originRemote */,
+ getSender());
}
private Runnable destroyOp(final DistributionManager dm, final LocalRegion lclRgn,
@@ -183,12 +182,12 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
advisee =
PartitionedRegionHelper.getProxyBucketRegion(GemFireCacheImpl.getInstance(),
regionPath, waitForBucketInitializationToComplete);
- } catch (PRLocallyDestroyedException e) {
+ } catch (PRLocallyDestroyedException ignore) {
// region not found - it's been destroyed
- } catch (RegionDestroyedException e) {
+ } catch (RegionDestroyedException ignore) {
// ditto
} catch (PartitionedRegionException e) {
- if (e.getMessage().indexOf("destroyed") == -1) {
+ if (!e.getMessage().contains("destroyed")) {
throw e;
}
// region failed registration & is unusable
@@ -228,11 +227,11 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
}
doRegionDestroy(event);
- } catch (RegionDestroyedException e) {
+ } catch (RegionDestroyedException ignore) {
logger.debug("{} Region destroyed: nothing to do", this);
- } catch (CancelException e) {
+ } catch (CancelException ignore) {
logger.debug("{} Cancelled: nothing to do", this);
- } catch (EntryNotFoundException e) {
+ } catch (EntryNotFoundException ignore) {
logger.debug("{} Entry not found, nothing to do", this);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
@@ -292,7 +291,7 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
// pool, the entry
// update is allowed to complete.
dm.getWaitingThreadPool().execute(destroyOp(dm, lclRgn, sendReply));
- } catch (RejectedExecutionException e) {
+ } catch (RejectedExecutionException ignore) {
// rejected while trying to execute destroy thread
// must be shutting down, just quit
}
@@ -303,19 +302,19 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
// shared region, since another cache may
// have already destroyed it in shared memory, in which our listeners
// still need to be called and java region object cleaned up.
- GemFireCacheImpl c = (GemFireCacheImpl) CacheFactory.getInstance(sys);
+ InternalCache cache = (InternalCache) CacheFactory.getInstance(sys);
// only get the region while holding the appropriate destroy lock.
// this prevents us from getting a "stale" region
if (getOperation().isDistributed()) {
String rootName = GemFireCacheImpl.parsePath(path)[0];
- this.lockRoot = (LocalRegion) c.getRegion(rootName);
+ this.lockRoot = (LocalRegion) cache.getRegion(rootName);
if (this.lockRoot == null)
return null;
this.lockRoot.acquireDestroyLock();
}
- return (LocalRegion) c.getRegion(path);
+ return (LocalRegion) cache.getRegion(path);
}
private void disableRegionDepartureNotification() {
@@ -411,15 +410,15 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
rgn.basicDestroyRegion(ev, false /* cacheWrite */, false /* lock */,
true/* cacheCallbacks */);
}
- } catch (CacheWriterException e) {
+ } catch (CacheWriterException ignore) {
throw new Error(
LocalizedStrings.DestroyRegionOperation_CACHEWRITER_SHOULD_NOT_HAVE_BEEN_CALLED
.toLocalizedString());
- } catch (TimeoutException e) {
+ } catch (TimeoutException ignore) {
throw new Error(
LocalizedStrings.DestroyRegionOperation_DISTRIBUTEDLOCK_SHOULD_NOT_HAVE_BEEN_ACQUIRED
.toLocalizedString());
- } catch (RejectedExecutionException e) {
+ } catch (RejectedExecutionException ignore) {
// rejected while trying to execute recreate thread
// must be shutting down, so what we were trying to do must not be
// important anymore, so just quit
@@ -468,13 +467,13 @@ public class DestroyRegionOperation extends DistributedCacheOperation {
}
public static final class DestroyRegionWithContextMessage extends DestroyRegionMessage {
+
protected transient Object context;
@Override
final public RegionEventImpl createRegionEvent(DistributedRegion rgn) {
- ClientRegionEventImpl event = new ClientRegionEventImpl(rgn, getOperation(), this.callbackArg,
+ return new ClientRegionEventImpl(rgn, getOperation(), this.callbackArg,
true /* originRemote */, getSender(), (ClientProxyMembershipID) this.context);
- return event;
}
@Override
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java
index bf7c4d2..f78a6c1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java
@@ -27,7 +27,6 @@ import org.apache.geode.internal.ByteArrayDataInput;
import org.apache.geode.internal.HeapDataOutputStream;
import org.apache.geode.internal.Version;
import org.apache.geode.internal.cache.DiskStoreImpl.AsyncDiskEntry;
-import org.apache.geode.internal.cache.Token.Tombstone;
import org.apache.geode.internal.cache.lru.EnableLRU;
import org.apache.geode.internal.cache.lru.LRUClockNode;
import org.apache.geode.internal.cache.lru.LRUEntry;
@@ -52,18 +51,14 @@ import org.apache.geode.internal.util.BlobHelper;
* provides accessor and mutator methods for a disk entry's state. This allows us to abstract all of
* the interesting behavior into a {@linkplain DiskEntry.Helper helper class} that we only need to
* implement once.
- *
- * <P>
- *
- * Each <code>DiskEntry</code> has a unique <code>id</code> that is used by the {@link DiskRegion}
- * to identify the key/value pair. Before the disk entry is written to disk, the value of the
- * <code>id</code> is {@link DiskRegion#INVALID_ID invalid}. Once the object has been written to
- * disk, the <code>id</code> is a positive number. If the value is {@linkplain Helper#update
- * updated}, then the <code>id</code> is negated to signify that the value on disk is dirty.
+ * <p>
+ * Each {@code DiskEntry} has a unique {@code id} that is used by the {@link DiskRegion} to identify
+ * the key/value pair. Before the disk entry is written to disk, the value of the {@code id} is
+ * {@link DiskRegion#INVALID_ID invalid}. Once the object has been written to disk, the {@code id}
+ * is a positive number. If the value is {@linkplain Helper#update updated}, then the {@code id} is
+ * negated to signify that the value on disk is dirty.
*
* @see DiskRegion
- *
- *
* @since GemFire 3.2
*/
public interface DiskEntry extends RegionEntry {
@@ -78,8 +73,6 @@ public interface DiskEntry extends RegionEntry {
/**
* In some cases we need to do something just before we drop the value from a DiskEntry that is
* being moved (i.e. overflowed) to disk.
- *
- * @param context
*/
public void handleValueOverflow(RegionEntryContext context);
@@ -90,12 +83,10 @@ public interface DiskEntry extends RegionEntry {
public boolean isRemovedFromDisk();
/**
- * Returns the id of this <code>DiskEntry</code>
+ * Returns the id of this {@code DiskEntry}
*/
public DiskId getDiskId();
- public void _removePhase1();
-
public int updateAsyncEntrySize(EnableLRU capacityController);
public DiskEntry getPrev();
@@ -119,10 +110,8 @@ public interface DiskEntry extends RegionEntry {
*/
public static final byte[] TOMBSTONE_BYTES = new byte[0];
- /////////////////////// Inner Classes //////////////////////
-
/**
- * A Helper class for performing functions common to all <code>DiskEntry</code>s.
+ * A Helper class for performing functions common to all {@code DiskEntry}s.
*/
public static class Helper {
private static final Logger logger = LogService.getLogger();
@@ -185,12 +174,10 @@ public interface DiskEntry extends RegionEntry {
}
}
-
/**
* Get the value of an entry that is on disk without faulting it in . It checks for the presence
* in the buffer also. This method is used for concurrent map operations and CQ processing
*
- * @throws DiskAccessException
* @since GemFire 5.1
*/
static Object getValueOnDiskOrBuffer(DiskEntry entry, DiskRegion dr,
@@ -223,8 +210,8 @@ public interface DiskEntry extends RegionEntry {
synchronized (syncObj) {
if (did != null && did.isPendingAsync()) {
@Retained
- Object v = entry._getValueRetain(context, true); // TODO:KIRK:OK Rusty had Object v =
- // entry.getValueWithContext(context);
+ Object v = entry._getValueRetain(context, true);
+
if (Token.isRemovedFromDisk(v)) {
v = null;
}
@@ -309,9 +296,11 @@ public interface DiskEntry extends RegionEntry {
entry.setLastModified(mgr, de.getLastModified());
ReferenceCountHelper.setReferenceCountOwner(entry);
- v = de._getValueRetain(context, true); // OFFHEAP copied to heap entry; todo allow entry
- // to refer to offheap since it will be copied to
- // network.
+
+ // OFFHEAP copied to heap entry;
+ // TODO: allow entry to refer to offheap since it will be copied to network.
+ v = de._getValueRetain(context, true);
+
ReferenceCountHelper.setReferenceCountOwner(null);
if (v == null) {
if (did == null) {
@@ -331,7 +320,7 @@ public interface DiskEntry extends RegionEntry {
BytesAndBits bb = null;
try {
bb = dr.getBytesAndBits(did, false);
- } catch (DiskAccessException dae) {
+ } catch (DiskAccessException ignore) {
return false;
}
if (EntryBits.isInvalid(bb.getBits())) {
@@ -367,8 +356,7 @@ public interface DiskEntry extends RegionEntry {
Object tmp = cd.getValue();
if (tmp instanceof byte[]) {
- byte[] bb = (byte[]) tmp;
- entry.value = bb;
+ entry.value = (byte[]) tmp;
entry.setSerialized(true);
} else {
try {
@@ -378,11 +366,10 @@ public interface DiskEntry extends RegionEntry {
entry.value = hdos;
entry.setSerialized(true);
} catch (IOException e) {
- RuntimeException e2 = new IllegalArgumentException(
+ throw new IllegalArgumentException(
LocalizedStrings.DiskEntry_AN_IOEXCEPTION_WAS_THROWN_WHILE_SERIALIZING
- .toLocalizedString());
- e2.initCause(e);
- throw e2;
+ .toLocalizedString(),
+ e);
}
}
}
@@ -460,7 +447,7 @@ public interface DiskEntry extends RegionEntry {
} else {
entry.setValueWithContext(drv,
entry.prepareValueForCache((RegionEntryContext) r, re.getValue(), false));
- if (!Tombstone.isInvalidOrRemoved(re.getValue())) {
+ if (!Token.isInvalidOrRemoved(re.getValue())) {
updateStats(drv, r, 1/* InVM */, 0/* OnDisk */, 0);
}
}
@@ -574,7 +561,7 @@ public interface DiskEntry extends RegionEntry {
if (this.bytes == null) {
return "null";
}
- StringBuffer sb = new StringBuffer();
+ StringBuilder sb = new StringBuilder();
int len = getLength();
for (int i = 0; i < len; i++) {
sb.append(this.bytes[i]).append(", ");
@@ -808,8 +795,6 @@ public interface DiskEntry extends RegionEntry {
/**
* Writes the key/value object stored in the given entry to disk
*
- * @throws RegionClearedException
- *
* @see DiskRegion#put
*/
private static void writeToDisk(DiskEntry entry, LocalRegion region, boolean async,
@@ -833,8 +818,6 @@ public interface DiskEntry extends RegionEntry {
/**
* Updates the value of the disk entry with a new value. This allows us to free up disk space in
* the non-backup case.
- *
- * @throws RegionClearedException
*/
public static void update(DiskEntry entry, LocalRegion region, Object newValue,
EntryEventImpl event) throws RegionClearedException {
@@ -892,7 +875,7 @@ public interface DiskEntry extends RegionEntry {
if (caughtCacheClosed) {
// 47616: not to set the value to be removedFromDisk since it failed to persist
} else {
- // Asif Ensure that the value is rightly set despite clear so
+ // Ensure that the value is rightly set despite clear so
// that it can be distributed correctly
entry.setValueWithContext(region, newValue); // OFFHEAP newValue was already
// preparedForCache
@@ -1010,12 +993,12 @@ public interface DiskEntry extends RegionEntry {
@Retained
public static Object getValueOffHeapOrDiskWithoutFaultIn(DiskEntry entry, LocalRegion region) {
@Retained
- Object v = entry._getValueRetain(region, true); // TODO:KIRK:OK Object v =
- // entry.getValueWithContext(region);
+ Object v = entry._getValueRetain(region, true);
+
if (v == null || Token.isRemovedFromDisk(v) && !region.isIndexCreationThread()) {
synchronized (entry) {
- v = entry._getValueRetain(region, true); // TODO:KIRK:OK v =
- // entry.getValueWithContext(region);
+ v = entry._getValueRetain(region, true);
+
if (v == null) {
v = Helper.getOffHeapValueOnDiskOrBuffer(entry, region.getDiskRegion(), region);
}
@@ -1024,24 +1007,10 @@ public interface DiskEntry extends RegionEntry {
if (Token.isRemovedFromDisk(v)) {
// fix for bug 31800
v = null;
- // } else if (v instanceof ByteSource) {
- // // If the ByteSource contains a Delta or ListOfDelta then we want to deserialize it
- // Object deserVal = ((CachedDeserializable)v).getDeserializedForReading();
- // if (deserVal != v) {
- // OffHeapHelper.release(v);
- // v = deserVal;
- // }
}
return v;
}
- /**
- *
- * @param entry
- * @param region
- * @return Value
- * @throws DiskAccessException
- */
public static Object faultInValue(DiskEntry entry, LocalRegion region) {
return faultInValue(entry, region, false);
}
@@ -1058,8 +1027,8 @@ public interface DiskEntry extends RegionEntry {
private static Object faultInValue(DiskEntry entry, LocalRegion region, boolean retainResult) {
DiskRegion dr = region.getDiskRegion();
@Retained
- Object v = entry._getValueRetain(region, true); // TODO:KIRK:OK Object v =
- // entry.getValueWithContext(region);
+ Object v = entry._getValueRetain(region, true);
+
boolean lruFaultedIn = false;
boolean done = false;
try {
@@ -1071,7 +1040,7 @@ public interface DiskEntry extends RegionEntry {
// See if it is pending async because of a faultOut.
// If so then if we are not a backup then we can unschedule the pending async.
// In either case we need to do the lruFaultIn logic.
- boolean evicted = ((LRUEntry) entry).testEvicted();
+ boolean evicted = ((LRUClockNode) entry).testEvicted();
if (evicted) {
if (!dr.isBackup()) {
// @todo do we also need a bit that tells us if it is in the async queue?
@@ -1086,8 +1055,8 @@ public interface DiskEntry extends RegionEntry {
}
if (!done && (v == null || Token.isRemovedFromDisk(v) && !region.isIndexCreationThread())) {
synchronized (entry) {
- v = entry._getValueRetain(region, true); // TODO:KIRK:OK v =
- // entry.getValueWithContext(region);
+ v = entry._getValueRetain(region, true);
+
if (v == null) {
v = readValueFromDisk(entry, region);
if (entry instanceof LRUEntry) {
@@ -1126,8 +1095,7 @@ public interface DiskEntry extends RegionEntry {
DiskId did = entry.getDiskId();
if (did != null) {
Object value = null;
- DiskRecoveryStore region = recoveryStore;
- DiskRegionView dr = region.getDiskRegionView();
+ DiskRegionView dr = recoveryStore.getDiskRegionView();
dr.acquireReadLock();
try {
synchronized (did) {
@@ -1135,7 +1103,7 @@ public interface DiskEntry extends RegionEntry {
if (oplogId == did.getOplogId()) {
value = getValueFromDisk(dr, did, in);
if (value != null) {
- setValueOnFaultIn(value, did, entry, dr, region);
+ setValueOnFaultIn(value, did, entry, dr, recoveryStore);
}
}
}
@@ -1194,7 +1162,7 @@ public interface DiskEntry extends RegionEntry {
try {
if (recoveryStore.getEvictionAttributes() != null
&& recoveryStore.getEvictionAttributes().getAlgorithm().isLIFO()) {
- ((VMLRURegionMap) recoveryStore.getRegionMap()).updateStats();
+ ((AbstractLRURegionMap) recoveryStore.getRegionMap()).updateStats();
return;
}
// this must be done after releasing synchronization
@@ -1314,24 +1282,18 @@ public interface DiskEntry extends RegionEntry {
}
/**
- * Writes the value of this <code>DiskEntry</code> to disk and <code>null</code> s out the
- * reference to the value to free up VM space.
+ * Writes the value of this {@code DiskEntry} to disk and {@code null} s out the reference to
+ * the value to free up VM space.
* <p>
* Note that if the value had already been written to disk, it is not written again.
* <p>
* Caller must synchronize on entry and it is assumed the entry is evicted
- *
- * see #writeToDisk
- *
- * @throws RegionClearedException
*/
public static int overflowToDisk(DiskEntry entry, LocalRegion region, EnableLRU ccHelper)
throws RegionClearedException {
DiskRegion dr = region.getDiskRegion();
- final int oldSize = region.calculateRegionEntryValueSize(entry);;
- // Asif:Get diskID . If it is null, it implies it is
- // overflow only mode.
- // long id = entry.getDiskId().getKeyId();
+ final int oldSize = region.calculateRegionEntryValueSize(entry);
+ // Get diskID . If it is null, it implies it is overflow only mode.
DiskId did = entry.getDiskId();
if (did == null) {
((LRUEntry) entry).setDelayedDiskId(region);
@@ -1348,7 +1310,7 @@ public interface DiskEntry extends RegionEntry {
return 0;
}
- // TODO:Asif: Check if we need to overflow even when id is = 0
+ // TODO: Check if we need to overflow even when id is = 0
boolean wasAlreadyPendingAsync = did.isPendingAsync();
if (did.needsToBeWritten()) {
if (dr.isSync()) {
@@ -1474,7 +1436,7 @@ public interface DiskEntry extends RegionEntry {
// Only setValue to null if this was an evict.
// We could just be a backup that is writing async.
if (!Token.isInvalid(entryVal) && (entryVal != Token.TOMBSTONE)
- && entry instanceof LRUEntry && ((LRUEntry) entry).testEvicted()) {
+ && entry instanceof LRUEntry && ((LRUClockNode) entry).testEvicted()) {
// Moved this here to fix bug 40116.
region.updateSizeOnEvict(entry.getKey(), entryValSize);
updateStats(dr, region, -1/* InVM */, 1/* OnDisk */, did.getValueLength());
@@ -1603,11 +1565,6 @@ public interface DiskEntry extends RegionEntry {
return result;
}
- /**
- * @param entry
- * @param region
- * @param tag
- */
public static void updateVersionOnly(DiskEntry entry, LocalRegion region, VersionTag tag) {
DiskRegion dr = region.getDiskRegion();
if (!dr.isBackup()) {
@@ -1709,7 +1666,6 @@ public interface DiskEntry extends RegionEntry {
}
/**
- *
* @return byte indicating the user bits. The correct value is returned only in the specific
* case of entry recovered from oplog ( & not rolled to Htree) & the RECOVER_VALUES flag
* is false . In other cases the exact value is not needed
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskRegion.java
index 6d4b598..f8b8289 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskRegion.java
@@ -279,12 +279,12 @@ public class DiskRegion extends AbstractDiskRegion {
private void destroyOldTomstones(final DiskRecoveryStore drs) {
// iterate over all region entries in drs
drs.foreachRegionEntry(new RegionEntryCallback() {
- public void handleRegionEntry(RegionEntry re) {
- DiskEntry de = (DiskEntry) re;
+ public void handleRegionEntry(RegionEntry regionEntry) {
+ DiskEntry de = (DiskEntry) regionEntry;
synchronized (de) {
DiskId id = de.getDiskId();
- if (id != null && re.isTombstone()) {
- VersionStamp stamp = re.getVersionStamp();
+ if (id != null && regionEntry.isTombstone()) {
+ VersionStamp stamp = regionEntry.getVersionStamp();
if (getRegionVersionVector().isTombstoneTooOld(stamp.getMemberID(),
stamp.getRegionVersion())) {
drs.destroyRecoveredEntry(de.getKey());
@@ -299,8 +299,8 @@ public class DiskRegion extends AbstractDiskRegion {
private void destroyRemainingRecoveredEntries(final DiskRecoveryStore drs) {
// iterate over all region entries in drs
drs.foreachRegionEntry(new RegionEntryCallback() {
- public void handleRegionEntry(RegionEntry re) {
- DiskEntry de = (DiskEntry) re;
+ public void handleRegionEntry(RegionEntry regionEntry) {
+ DiskEntry de = (DiskEntry) regionEntry;
synchronized (de) {
DiskId id = de.getDiskId();
if (id != null) {
@@ -320,8 +320,8 @@ public class DiskRegion extends AbstractDiskRegion {
public void resetRecoveredEntries(final DiskRecoveryStore drs) {
// iterate over all region entries in drs
drs.foreachRegionEntry(new RegionEntryCallback() {
- public void handleRegionEntry(RegionEntry re) {
- DiskEntry de = (DiskEntry) re;
+ public void handleRegionEntry(RegionEntry regionEntry) {
+ DiskEntry de = (DiskEntry) regionEntry;
synchronized (de) {
DiskId id = de.getDiskId();
if (id != null) {
@@ -770,13 +770,13 @@ public class DiskRegion extends AbstractDiskRegion {
return;
}
region.foreachRegionEntry(new RegionEntryCallback() {
- public void handleRegionEntry(RegionEntry re) {
- DiskEntry de = (DiskEntry) re;
+ public void handleRegionEntry(RegionEntry regionEntry) {
+ DiskEntry de = (DiskEntry) regionEntry;
DiskId id = de.getDiskId();
if (id != null) {
synchronized (id) {
- re.setValueToNull(); // TODO why call _setValue twice in a row?
- re.removePhase2();
+ regionEntry.setValueToNull(); // TODO why call _setValue twice in a row?
+ regionEntry.removePhase2();
id.unmarkForWriting();
if (EntryBits.isNeedsValue(id.getUserBits())) {
long oplogId = id.getOplogId();
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
index 6f50c9f..309dea3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreBackup.java
@@ -26,8 +26,6 @@ import org.apache.geode.internal.cache.persistence.BackupInspector;
* oplogs that still need to be backed up, along with the lists of oplog files that should be
* deleted when the oplog is backed up. See
* {@link DiskStoreImpl#startBackup(File, BackupInspector, org.apache.geode.internal.cache.persistence.RestoreScript)}
- *
- *
*/
public class DiskStoreBackup {
http://git-wip-us.apache.org/repos/asf/geode/blob/557a127b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
index 000bf0d..7a7044b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreFactoryImpl.java
@@ -18,7 +18,6 @@ import java.io.File;
import java.util.Arrays;
import org.apache.geode.GemFireIOException;
-import org.apache.geode.cache.Cache;
import org.apache.geode.cache.DiskStoreFactory;
import org.apache.geode.cache.DiskStore;
import org.apache.geode.distributed.internal.ResourceEvent;
@@ -35,14 +34,15 @@ import org.apache.geode.pdx.internal.TypeRegistry;
* @since GemFire prPersistSprint2
*/
public class DiskStoreFactoryImpl implements DiskStoreFactory {
- private final Cache cache;
+
+ private final InternalCache cache;
private final DiskStoreAttributes attrs = new DiskStoreAttributes();
- public DiskStoreFactoryImpl(Cache cache) {
+ public DiskStoreFactoryImpl(InternalCache cache) {
this.cache = cache;
}
- public DiskStoreFactoryImpl(Cache cache, DiskStoreAttributes attrs) {
+ public DiskStoreFactoryImpl(InternalCache cache, DiskStoreAttributes attrs) {
this.attrs.name = attrs.name;
setAutoCompact(attrs.getAutoCompact());
setAllowForceCompaction(attrs.getAllowForceCompaction());
@@ -90,13 +90,13 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
if (compactionThreshold < 0) {
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_POSITIVE_NUMBER_AND_THE_VALUE_GIVEN_1_IS_NOT_ACCEPTABLE
- .toLocalizedString(new Object[] {CacheXml.COMPACTION_THRESHOLD,
- Integer.valueOf(compactionThreshold)}));
+ .toLocalizedString(
+ new Object[] {CacheXml.COMPACTION_THRESHOLD, compactionThreshold}));
} else if (compactionThreshold > 100) {
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesImpl_0_HAS_TO_BE_LESS_THAN_2_BUT_WAS_1
- .toLocalizedString(new Object[] {CacheXml.COMPACTION_THRESHOLD,
- Integer.valueOf(compactionThreshold), Integer.valueOf(100)}));
+ .toLocalizedString(
+ new Object[] {CacheXml.COMPACTION_THRESHOLD, compactionThreshold, 100}));
}
this.attrs.compactionThreshold = compactionThreshold;
return this;
@@ -106,7 +106,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
if (timeInterval < 0) {
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesFactory_TIME_INTERVAL_SPECIFIED_HAS_TO_BE_A_NONNEGATIVE_NUMBER_AND_THE_VALUE_GIVEN_0_IS_NOT_ACCEPTABLE
- .toLocalizedString(Long.valueOf(timeInterval)));
+ .toLocalizedString(timeInterval));
}
this.attrs.timeInterval = timeInterval;
return this;
@@ -116,14 +116,12 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
InternalRegionArguments internalRegionArgs) {
this.attrs.name = name;
synchronized (this.cache) {
- assert this.cache instanceof GemFireCacheImpl;
- GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache;
DiskStoreImpl ds =
- new DiskStoreImpl(gfc, this.attrs, true/* ownedByRegion */, internalRegionArgs);
+ new DiskStoreImpl(this.cache, this.attrs, true/* ownedByRegion */, internalRegionArgs);
if (isOwnedByPR) {
ds.doInitialRecovery();
}
- gfc.addRegionOwnedDiskStore(ds);
+ this.cache.addRegionOwnedDiskStore(ds);
return ds;
}
}
@@ -137,15 +135,14 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
result = findExisting(name);
if (result == null) {
if (this.cache instanceof GemFireCacheImpl) {
- GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache;
- TypeRegistry registry = gfc.getPdxRegistry();
- DiskStoreImpl dsi = new DiskStoreImpl(gfc, this.attrs);
+ TypeRegistry registry = this.cache.getPdxRegistry();
+ DiskStoreImpl dsi = new DiskStoreImpl(this.cache, this.attrs);
result = dsi;
- /** Added for M&M **/
- gfc.getInternalDistributedSystem().handleResourceEvent(ResourceEvent.DISKSTORE_CREATE,
- dsi);
+ // Added for M&M
+ this.cache.getInternalDistributedSystem()
+ .handleResourceEvent(ResourceEvent.DISKSTORE_CREATE, dsi);
dsi.doInitialRecovery();
- gfc.addDiskStore(dsi);
+ this.cache.addDiskStore(dsi);
if (registry != null) {
registry.creatingDiskStore(dsi);
}
@@ -163,8 +160,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
// member depends on state that goes into this disk store
// that isn't backed up.
if (this.cache instanceof GemFireCacheImpl) {
- GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache;
- BackupManager backup = gfc.getBackupManager();
+ BackupManager backup = this.cache.getBackupManager();
if (backup != null) {
backup.waitForBackup();
}
@@ -175,8 +171,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
private DiskStore findExisting(String name) {
DiskStore existing = null;
if (this.cache instanceof GemFireCacheImpl) {
- GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache;
- existing = gfc.findDiskStore(name);
+ existing = this.cache.findDiskStore(name);
if (existing != null) {
if (((DiskStoreImpl) existing).sameAs(this.attrs)) {
return existing;
@@ -192,8 +187,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
if (diskDirSizes.length != diskDirs.length) {
throw new IllegalArgumentException(
LocalizedStrings.AttributesFactory_NUMBER_OF_DISKSIZES_IS_0_WHICH_IS_NOT_EQUAL_TO_NUMBER_OF_DISK_DIRS_WHICH_IS_1
- .toLocalizedString(new Object[] {Integer.valueOf(diskDirSizes.length),
- Integer.valueOf(diskDirs.length)}));
+ .toLocalizedString(new Object[] {diskDirSizes.length, diskDirs.length}));
}
verifyNonNegativeDirSize(diskDirSizes);
checkIfDirectoriesExist(diskDirs);
@@ -207,8 +201,6 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
/**
* Checks if directories exist, if they don't then create those directories
- *
- * @param diskDirs
*/
public static void checkIfDirectoriesExist(File[] diskDirs) {
for (int i = 0; i < diskDirs.length; i++) {
@@ -225,15 +217,13 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
/**
* Verify all directory sizes are positive
- *
- * @param sizes
*/
public static void verifyNonNegativeDirSize(int[] sizes) {
for (int i = 0; i < sizes.length; i++) {
if (sizes[i] < 0) {
throw new IllegalArgumentException(
LocalizedStrings.AttributesFactory_DIR_SIZE_CANNOT_BE_NEGATIVE_0
- .toLocalizedString(Integer.valueOf(sizes[i])));
+ .toLocalizedString(sizes[i]));
}
}
}
@@ -254,7 +244,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
} else if (maxOplogSize < 0) {
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesFactory_MAXIMUM_OPLOG_SIZE_SPECIFIED_HAS_TO_BE_A_NONNEGATIVE_NUMBER_AND_THE_VALUE_GIVEN_0_IS_NOT_ACCEPTABLE
- .toLocalizedString(Long.valueOf(maxOplogSize)));
+ .toLocalizedString(maxOplogSize));
}
this.attrs.maxOplogSizeInBytes = maxOplogSize * (1024 * 1024);
return this;
@@ -267,7 +257,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
if (maxOplogSizeInBytes < 0) {
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesFactory_MAXIMUM_OPLOG_SIZE_SPECIFIED_HAS_TO_BE_A_NONNEGATIVE_NUMBER_AND_THE_VALUE_GIVEN_0_IS_NOT_ACCEPTABLE
- .toLocalizedString(Long.valueOf(maxOplogSizeInBytes)));
+ .toLocalizedString(maxOplogSizeInBytes));
}
this.attrs.maxOplogSizeInBytes = maxOplogSizeInBytes;
return this;
@@ -277,7 +267,7 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
if (queueSize < 0) {
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesFactory_QUEUE_SIZE_SPECIFIED_HAS_TO_BE_A_NONNEGATIVE_NUMBER_AND_THE_VALUE_GIVEN_0_IS_NOT_ACCEPTABLE
- .toLocalizedString(Integer.valueOf(queueSize)));
+ .toLocalizedString(queueSize));
}
this.attrs.queueSize = queueSize;
return this;
@@ -285,10 +275,10 @@ public class DiskStoreFactoryImpl implements DiskStoreFactory {
public DiskStoreFactory setWriteBufferSize(int writeBufferSize) {
if (writeBufferSize < 0) {
- // TODO Gester add a message for WriteBufferSize
+ // TODO add a message for WriteBufferSize
throw new IllegalArgumentException(
LocalizedStrings.DiskWriteAttributesFactory_QUEUE_SIZE_SPECIFIED_HAS_TO_BE_A_NONNEGATIVE_NUMBER_AND_THE_VALUE_GIVEN_0_IS_NOT_ACCEPTABLE
- .toLocalizedString(Integer.valueOf(writeBufferSize)));
+ .toLocalizedString(writeBufferSize));
}
this.attrs.writeBufferSize = writeBufferSize;
return this;