You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by vo...@apache.org on 2015/03/04 13:24:23 UTC
[15/15] incubator-ignite git commit: # IGNITE-386: API finalization.
# IGNITE-386: API finalization.
Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/8e2a08b9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/8e2a08b9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/8e2a08b9
Branch: refs/heads/ignite-386
Commit: 8e2a08b991f42e94f7de20abf81a4c27319ba493
Parents: a222a32
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Wed Mar 4 15:22:43 2015 +0300
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Wed Mar 4 15:22:43 2015 +0300
----------------------------------------------------------------------
config/hadoop/default-config.xml | 4 +-
examples/config/filesystem/example-igfs.xml | 2 +-
.../src/main/java/org/apache/ignite/Ignite.java | 7 +-
.../org/apache/ignite/IgniteFileSystem.java | 6 +-
.../configuration/FileSystemConfiguration.java | 805 ++++++++++++++++++
.../configuration/HadoopConfiguration.java | 2 +-
.../ignite/configuration/IgfsConfiguration.java | 808 -------------------
.../configuration/IgniteConfiguration.java | 12 +-
.../org/apache/ignite/igfs/IgfsMetrics.java | 2 +-
.../java/org/apache/ignite/igfs/IgfsMode.java | 8 +-
.../apache/ignite/igfs/mapreduce/IgfsTask.java | 2 +-
.../ignite/igfs/mapreduce/IgfsTaskArgs.java | 2 +-
.../igfs/secondary/IgfsSecondaryFileSystem.java | 13 +-
...fsSecondaryFileSystemPositionedReadable.java | 2 +-
.../apache/ignite/internal/IgniteKernal.java | 4 +-
.../org/apache/ignite/internal/IgnitionEx.java | 8 +-
.../processors/cache/GridCacheAdapter.java | 4 +-
.../processors/cache/GridCacheProcessor.java | 4 +-
.../processors/cache/GridCacheUtils.java | 4 +-
.../internal/processors/igfs/IgfsAsyncImpl.java | 2 +-
.../internal/processors/igfs/IgfsContext.java | 6 +-
.../processors/igfs/IgfsDataManager.java | 2 +-
.../internal/processors/igfs/IgfsFileInfo.java | 4 +-
.../internal/processors/igfs/IgfsImpl.java | 8 +-
.../processors/igfs/IgfsMetaManager.java | 2 +-
.../internal/processors/igfs/IgfsProcessor.java | 16 +-
.../processors/igfs/IgfsServerManager.java | 4 +-
.../visor/node/VisorGridConfiguration.java | 2 +-
.../visor/node/VisorIgfsConfiguration.java | 6 +-
.../internal/visor/util/VisorTaskUtils.java | 2 +-
modules/core/src/test/config/igfs-loopback.xml | 2 +-
modules/core/src/test/config/igfs-shmem.xml | 2 +-
.../ignite/igfs/IgfsEventsAbstractSelfTest.java | 8 +-
.../igfs/IgfsFragmentizerAbstractSelfTest.java | 4 +-
.../processors/igfs/IgfsAbstractSelfTest.java | 4 +-
...sCachePerBlockLruEvictionPolicySelfTest.java | 8 +-
.../processors/igfs/IgfsCacheSelfTest.java | 4 +-
.../igfs/IgfsDataManagerSelfTest.java | 4 +-
.../igfs/IgfsMetaManagerSelfTest.java | 4 +-
.../processors/igfs/IgfsMetricsSelfTest.java | 8 +-
.../processors/igfs/IgfsModesSelfTest.java | 8 +-
.../processors/igfs/IgfsProcessorSelfTest.java | 6 +-
.../igfs/IgfsProcessorValidationSelfTest.java | 48 +-
...IpcEndpointRegistrationAbstractSelfTest.java | 10 +-
...dpointRegistrationOnLinuxAndMacSelfTest.java | 4 +-
...pcEndpointRegistrationOnWindowsSelfTest.java | 2 +-
.../processors/igfs/IgfsSizeSelfTest.java | 4 +-
.../processors/igfs/IgfsStreamsSelfTest.java | 4 +-
.../processors/igfs/IgfsTaskSelfTest.java | 4 +-
.../IgfsAbstractRecordResolverSelfTest.java | 4 +-
.../ipc/shmem/IpcSharedMemoryNodeStartup.java | 4 +-
.../fs/IgniteHadoopIgfsSecondaryFileSystem.java | 8 +-
.../hadoop/fs/v1/IgniteHadoopFileSystem.java | 6 +-
.../hadoop/fs/v2/IgniteHadoopFileSystem.java | 2 +-
.../mapreduce/IgniteHadoopMapReducePlanner.java | 435 ++++++++++
.../apache/ignite/hadoop/mapreduce/package.html | 2 +-
.../processors/hadoop/HadoopProcessor.java | 4 +-
.../hadoop/fs/HadoopDistributedFileSystem.java | 2 +-
.../processors/hadoop/fs/HadoopParameters.java | 2 +-
.../hadoop/igfs/HadoopIgfsEndpoint.java | 2 +-
.../planner/HadoopDefaultMapReducePlanner.java | 434 ----------
.../HadoopIgfs20FileSystemAbstractSelfTest.java | 12 +-
.../igfs/HadoopIgfsDualAbstractSelfTest.java | 4 +-
.../apache/ignite/igfs/IgfsEventsTestSuite.java | 40 +-
.../igfs/IgfsNearOnlyMultiNodeSelfTest.java | 4 +-
.../IgniteHadoopFileSystemAbstractSelfTest.java | 12 +-
.../IgniteHadoopFileSystemClientSelfTest.java | 4 +-
...IgniteHadoopFileSystemHandshakeSelfTest.java | 4 +-
.../IgniteHadoopFileSystemIpcCacheSelfTest.java | 6 +-
...niteHadoopFileSystemLoggerStateSelfTest.java | 4 +-
...teHadoopFileSystemSecondaryModeSelfTest.java | 8 +-
.../hadoop/HadoopAbstractSelfTest.java | 6 +-
.../HadoopDefaultMapReducePlannerSelfTest.java | 6 +-
.../hadoop/HadoopTaskExecutionSelfTest.java | 4 +-
pom.xml | 12 +-
75 files changed, 1458 insertions(+), 1464 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/config/hadoop/default-config.xml
----------------------------------------------------------------------
diff --git a/config/hadoop/default-config.xml b/config/hadoop/default-config.xml
index bb35b03..0314481 100644
--- a/config/hadoop/default-config.xml
+++ b/config/hadoop/default-config.xml
@@ -52,7 +52,7 @@
<!--
Abstract IGFS file system configuration to be used as a template.
-->
- <bean id="igfsCfgBase" class="org.apache.ignite.configuration.IgfsConfiguration" abstract="true">
+ <bean id="igfsCfgBase" class="org.apache.ignite.configuration.FileSystemConfiguration" abstract="true">
<!-- Must correlate with cache affinity mapper. -->
<property name="blockSize" value="#{128 * 1024}"/>
<property name="perNodeBatchSize" value="512"/>
@@ -114,7 +114,7 @@
-->
<property name="igfsConfiguration">
<list>
- <bean class="org.apache.ignite.configuration.IgfsConfiguration" parent="igfsCfgBase">
+ <bean class="org.apache.ignite.configuration.FileSystemConfiguration" parent="igfsCfgBase">
<property name="name" value="igfs"/>
<!-- Caches with these names must be configured. -->
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/examples/config/filesystem/example-igfs.xml
----------------------------------------------------------------------
diff --git a/examples/config/filesystem/example-igfs.xml b/examples/config/filesystem/example-igfs.xml
index 30cf51e..ffab908 100644
--- a/examples/config/filesystem/example-igfs.xml
+++ b/examples/config/filesystem/example-igfs.xml
@@ -65,7 +65,7 @@
<property name="igfsConfiguration">
<list>
- <bean class="org.apache.ignite.configuration.IgfsConfiguration">
+ <bean class="org.apache.ignite.configuration.FileSystemConfiguration">
<property name="name" value="igfs"/>
<property name="metaCacheName" value="igfs-meta"/>
<property name="dataCacheName" value="igfs-data"/>
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/Ignite.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java
index 7477690..31b827e 100644
--- a/modules/core/src/main/java/org/apache/ignite/Ignite.java
+++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java
@@ -213,7 +213,7 @@ public interface Ignite extends AutoCloseable {
public <K, V> IgniteDataLoader<K, V> dataLoader(@Nullable String cacheName);
/**
- * Gets an instance of IGFS - Ignite In-Memory File System, if one is not
+ * Gets an instance of IGFS (Ignite In-Memory File System). If one is not
* configured then {@link IllegalArgumentException} will be thrown.
* <p>
* IGFS is fully compliant with Hadoop {@code FileSystem} APIs and can
@@ -222,13 +222,14 @@ public interface Ignite extends AutoCloseable {
*
* @param name IGFS name.
* @return IGFS instance.
+ * @throws IllegalArgumentException If IGFS with such name is not configured.
*/
public IgniteFileSystem fileSystem(String name);
/**
- * Gets all instances of the grid file systems.
+ * Gets all instances of IGFS (Ignite In-Memory File System).
*
- * @return Collection of grid file systems instances.
+ * @return Collection of IGFS instances.
*/
public Collection<IgniteFileSystem> fileSystems();
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java b/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java
index 7067faf..d221ae2 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java
@@ -72,7 +72,7 @@ public interface IgniteFileSystem extends IgniteAsyncSupport {
*
* @return IGFS configuration.
*/
- public IgfsConfiguration configuration();
+ public FileSystemConfiguration configuration();
/**
* Gets summary (total number of files, total number of directories and total length)
@@ -281,7 +281,7 @@ public interface IgniteFileSystem extends IgniteAsyncSupport {
/**
* Executes IGFS task with overridden maximum range length (see
- * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information).
+ * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information).
* <p>
* Supports asynchronous execution (see {@link IgniteAsyncSupport}).
*
@@ -319,7 +319,7 @@ public interface IgniteFileSystem extends IgniteAsyncSupport {
/**
* Executes IGFS task with overridden maximum range length (see
- * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information).
+ * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information).
* <p>
* Supports asynchronous execution (see {@link IgniteAsyncSupport}).
*
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
new file mode 100644
index 0000000..f679fc0
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
@@ -0,0 +1,805 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.configuration;
+
+import org.apache.ignite.igfs.*;
+import org.apache.ignite.igfs.secondary.*;
+import org.apache.ignite.internal.util.typedef.internal.*;
+import org.jetbrains.annotations.*;
+
+import java.util.*;
+import java.util.concurrent.*;
+
+/**
+ * {@code IGFS} configuration. More than one file system can be configured within grid.
+ * {@code IGFS} configuration is provided via {@link IgniteConfiguration#getFileSystemConfiguration()}
+ * method.
+ */
+public class FileSystemConfiguration {
+ /** Default file system user name. */
+ public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
+
+ /** Default IPC port. */
+ public static final int DFLT_IPC_PORT = 10500;
+
+ /** Default fragmentizer throttling block length. */
+ public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
+
+ /** Default fragmentizer throttling delay. */
+ public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
+
+ /** Default fragmentizer concurrent files. */
+ public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
+
+ /** Default fragmentizer local writes ratio. */
+ public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
+
+ /** Fragmentizer enabled property. */
+ public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
+
+ /** Default batch size for logging. */
+ public static final int DFLT_IGFS_LOG_BATCH_SIZE = 100;
+
+ /** Default {@code IGFS} log directory. */
+ public static final String DFLT_IGFS_LOG_DIR = "work/igfs/log";
+
+ /** Default per node buffer size. */
+ public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
+
+ /** Default number of per node parallel operations. */
+ public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
+
+ /** Default IGFS mode. */
+ public static final IgfsMode DFLT_MODE = IgfsMode.DUAL_ASYNC;
+
+ /** Default file's data block size (bytes). */
+ public static final int DFLT_BLOCK_SIZE = 1 << 16;
+
+ /** Default read/write buffers size (bytes). */
+ public static final int DFLT_BUF_SIZE = 1 << 16;
+
+ /** Default trash directory purge await timeout in case data cache oversize is detected. */
+ public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
+
+ /** Default management port. */
+ public static final int DFLT_MGMT_PORT = 11400;
+
+ /** Default IPC endpoint enabled flag. */
+ public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
+
+ /** IGFS instance name. */
+ private String name;
+
+ /** Cache name to store IGFS meta information. */
+ private String metaCacheName;
+
+ /** Cache name to store file's data blocks. */
+ private String dataCacheName;
+
+ /** File's data block size (bytes). */
+ private int blockSize = DFLT_BLOCK_SIZE;
+
+ /** The number of pre-fetched blocks if specific file's chunk is requested. */
+ private int prefetchBlocks;
+
+ /** Amount of sequential block reads before prefetch is triggered. */
+ private int seqReadsBeforePrefetch;
+
+ /** Read/write buffers size for stream operations (bytes). */
+ private int bufSize = DFLT_BUF_SIZE;
+
+ /** Per node buffer size. */
+ private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
+
+ /** Per node parallel operations. */
+ private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
+
+ /** IPC endpoint properties to publish IGFS over. */
+ private Map<String, String> ipcEndpointCfg;
+
+ /** IPC endpoint enabled flag. */
+ private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
+
+ /** Management port. */
+ private int mgmtPort = DFLT_MGMT_PORT;
+
+ /** Secondary file system */
+ private IgfsSecondaryFileSystem secondaryFs;
+
+ /** IGFS mode. */
+ private IgfsMode dfltMode = DFLT_MODE;
+
+ /** Fragmentizer throttling block length. */
+ private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
+
+ /** Fragmentizer throttling delay. */
+ private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
+
+ /** Fragmentizer concurrent files. */
+ private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
+
+ /** Fragmentizer local writes ratio. */
+ private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
+
+ /** Fragmentizer enabled flag. */
+ private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
+
+ /** Path modes. */
+ private Map<String, IgfsMode> pathModes;
+
+ /** Maximum space. */
+ private long maxSpace;
+
+ /** Trash purge await timeout. */
+ private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
+
+ /** Dual mode PUT operations executor service. */
+ private ExecutorService dualModePutExec;
+
+ /** Dual mode PUT operations executor service shutdown flag. */
+ private boolean dualModePutExecShutdown;
+
+ /** Maximum amount of data in pending puts. */
+ private long dualModeMaxPendingPutsSize;
+
+ /** Maximum range length. */
+ private long maxTaskRangeLen;
+
+ /**
+ * Constructs default configuration.
+ */
+ public FileSystemConfiguration() {
+ // No-op.
+ }
+
+ /**
+ * Constructs the copy of the configuration.
+ *
+ * @param cfg Configuration to copy.
+ */
+ public FileSystemConfiguration(FileSystemConfiguration cfg) {
+ assert cfg != null;
+
+ /*
+ * Must preserve alphabetical order!
+ */
+ blockSize = cfg.getBlockSize();
+ bufSize = cfg.getStreamBufferSize();
+ dataCacheName = cfg.getDataCacheName();
+ dfltMode = cfg.getDefaultMode();
+ dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
+ dualModePutExec = cfg.getDualModePutExecutorService();
+ dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
+ fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
+ fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
+ fragmentizerEnabled = cfg.isFragmentizerEnabled();
+ fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
+ fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
+ secondaryFs = cfg.getSecondaryFileSystem();
+ ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
+ ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
+ maxSpace = cfg.getMaxSpaceSize();
+ maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
+ metaCacheName = cfg.getMetaCacheName();
+ mgmtPort = cfg.getManagementPort();
+ name = cfg.getName();
+ pathModes = cfg.getPathModes();
+ perNodeBatchSize = cfg.getPerNodeBatchSize();
+ perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
+ prefetchBlocks = cfg.getPrefetchBlocks();
+ seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
+ trashPurgeTimeout = cfg.getTrashPurgeTimeout();
+ }
+
+ /**
+ * Gets IGFS instance name. If {@code null}, then instance with default
+ * name will be used.
+ *
+ * @return IGFS instance name.
+ */
+ @Nullable public String getName() {
+ return name;
+ }
+
+ /**
+ * Sets IGFS instance name.
+ *
+ * @param name IGFS instance name.
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Cache name to store IGFS meta information. If {@code null}, then instance
+ * with default meta-cache name will be used.
+ *
+ * @return Cache name to store IGFS meta information.
+ */
+ @Nullable public String getMetaCacheName() {
+ return metaCacheName;
+ }
+
+ /**
+ * Sets cache name to store IGFS meta information.
+ *
+ * @param metaCacheName Cache name to store IGFS meta information.
+ */
+ public void setMetaCacheName(String metaCacheName) {
+ this.metaCacheName = metaCacheName;
+ }
+
+ /**
+ * Cache name to store IGFS data.
+ *
+ * @return Cache name to store IGFS data.
+ */
+ @Nullable public String getDataCacheName() {
+ return dataCacheName;
+ }
+
+ /**
+ * Sets cache name to store IGFS data.
+ *
+ * @param dataCacheName Cache name to store IGFS data.
+ */
+ public void setDataCacheName(String dataCacheName) {
+ this.dataCacheName = dataCacheName;
+ }
+
+ /**
+ * Get file's data block size.
+ *
+ * @return File's data block size.
+ */
+ public int getBlockSize() {
+ return blockSize;
+ }
+
+ /**
+ * Sets file's data block size.
+ *
+ * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
+ */
+ public void setBlockSize(int blockSize) {
+ A.ensure(blockSize >= 0, "blockSize >= 0");
+
+ this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
+ }
+
+ /**
+ * Get number of pre-fetched blocks if specific file's chunk is requested.
+ *
+ * @return The number of pre-fetched blocks.
+ */
+ public int getPrefetchBlocks() {
+ return prefetchBlocks;
+ }
+
+ /**
+ * Sets the number of pre-fetched blocks if specific file's chunk is requested.
+ *
+ * @param prefetchBlocks New number of pre-fetched blocks.
+ */
+ public void setPrefetchBlocks(int prefetchBlocks) {
+ A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
+
+ this.prefetchBlocks = prefetchBlocks;
+ }
+
+ /**
+ * Get amount of sequential block reads before prefetch is triggered. The
+ * higher this value, the longer IGFS will wait before starting to prefetch
+ * values ahead of time. Depending on the use case, this can either help
+ * or hurt performance.
+ * <p>
+ * Default is {@code 0} which means that pre-fetching will start right away.
+ * <h1 class="header">Integration With Hadoop</h1>
+ * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+ * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
+ * MapReduce task.
+ * <p>
+ * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+ *
+ * @return Amount of sequential block reads.
+ */
+ public int getSequentialReadsBeforePrefetch() {
+ return seqReadsBeforePrefetch;
+ }
+
+ /**
+ * Sets amount of sequential block reads before prefetch is triggered. The
+ * higher this value, the longer IGFS will wait before starting to prefetch
+ * values ahead of time. Depending on the use case, this can either help
+ * or hurt performance.
+ * <p>
+ * Default is {@code 0} which means that pre-fetching will start right away.
+ * <h1 class="header">Integration With Hadoop</h1>
+ * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+ * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
+ * MapReduce task.
+ * <p>
+ * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+ *
+ * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
+ */
+ public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
+ A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
+
+ this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
+ }
+
+ /**
+ * Get read/write buffer size for {@code IGFS} stream operations in bytes.
+ *
+ * @return Read/write buffers size (bytes).
+ */
+ public int getStreamBufferSize() {
+ return bufSize;
+ }
+
+ /**
+ * Sets read/write buffers size for {@code IGFS} stream operations (bytes).
+ *
+ * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
+ */
+ public void setStreamBufferSize(int bufSize) {
+ A.ensure(bufSize >= 0, "bufSize >= 0");
+
+ this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
+ }
+
+ /**
+ * Gets number of file blocks buffered on local node before sending batch to remote node.
+ *
+ * @return Per node buffer size.
+ */
+ public int getPerNodeBatchSize() {
+ return perNodeBatchSize;
+ }
+
+ /**
+ * Sets number of file blocks collected on local node before sending batch to remote node.
+ *
+ * @param perNodeBatchSize Per node buffer size.
+ */
+ public void setPerNodeBatchSize(int perNodeBatchSize) {
+ this.perNodeBatchSize = perNodeBatchSize;
+ }
+
+ /**
+ * Gets number of batches that can be concurrently sent to remote node.
+ *
+ * @return Number of batches for each node.
+ */
+ public int getPerNodeParallelBatchCount() {
+ return perNodeParallelBatchCnt;
+ }
+
+ /**
+ * Sets number of file block batches that can be concurrently sent to remote node.
+ *
+ * @param perNodeParallelBatchCnt Per node parallel load operations.
+ */
+ public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
+ this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
+ }
+
+ /**
+ * Gets map of IPC endpoint configuration properties. There are 2 different
+ * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
+ * <p>
+ * The following configuration properties are supported for {@code shared-memory}
+ * endpoint:
+ * <ul>
+ * <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
+ * <li>{@code port} - endpoint port.</li>
+ * <li>{@code size} - memory size allocated for single endpoint communication.</li>
+ * <li>
+ * {@code tokenDirectoryPath} - path, either absolute or relative to {@code IGNITE_HOME} to
+ * store shared memory tokens.
+ * </li>
+ * </ul>
+ * <p>
+ * The following configuration properties are supported for {@code TCP} approach:
+ * <ul>
+ * <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
+ * <li>{@code port} - endpoint bind port.</li>
+ * <li>
+ * {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
+ * </li>
+ * </ul>
+ * <p>
+ * Note that {@code shared-memory} approach is not supported on Windows environments.
+ * In case IGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
+ *
+ * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
+ * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
+ */
+ @Nullable public Map<String,String> getIpcEndpointConfiguration() {
+ return ipcEndpointCfg;
+ }
+
+ /**
+ * Sets IPC endpoint configuration to publish IGFS over.
+ *
+ * @param ipcEndpointCfg Map of IPC endpoint config properties.
+ */
+ public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
+ this.ipcEndpointCfg = ipcEndpointCfg;
+ }
+
+ /**
+ * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
+ * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
+ *
+ * @return {@code True} in case endpoint is enabled.
+ */
+ public boolean isIpcEndpointEnabled() {
+ return ipcEndpointEnabled;
+ }
+
+ /**
+ * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
+ *
+ * @param ipcEndpointEnabled IPC endpoint enabled flag.
+ */
+ public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
+ this.ipcEndpointEnabled = ipcEndpointEnabled;
+ }
+
+ /**
+ * Gets port number for management endpoint. All IGFS nodes should have this port open
+ * for Visor Management Console to work with IGFS.
+ * <p>
+ * Default value is {@link #DFLT_MGMT_PORT}
+ *
+ * @return Port number or {@code -1} if management endpoint should be disabled.
+ */
+ public int getManagementPort() {
+ return mgmtPort;
+ }
+
+ /**
+ * Sets management endpoint port.
+ *
+ * @param mgmtPort port number or {@code -1} to disable management endpoint.
+ */
+ public void setManagementPort(int mgmtPort) {
+ this.mgmtPort = mgmtPort;
+ }
+
+ /**
+ * Gets mode to specify how {@code IGFS} interacts with Hadoop file system, like {@code HDFS}.
+ * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
+ * purposes.
+ * <p>
+ * Default mode is {@link org.apache.ignite.igfs.IgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
+ * not configured, this mode will work just like {@link org.apache.ignite.igfs.IgfsMode#PRIMARY} mode.
+ *
+ * @return Mode to specify how IGFS interacts with secondary HDFS file system.
+ */
+ public IgfsMode getDefaultMode() {
+ return dfltMode;
+ }
+
+ /**
+ * Sets {@code IGFS} mode to specify how it should interact with secondary
+ * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
+ * for pass-through, write-through, and read-through purposes.
+ *
+ * @param dfltMode {@code IGFS} mode.
+ */
+ public void setDefaultMode(IgfsMode dfltMode) {
+ this.dfltMode = dfltMode;
+ }
+
+ /**
+ * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
+ * and read-through purposes.
+ *
+ * @return Secondary file system.
+ */
+ public IgfsSecondaryFileSystem getSecondaryFileSystem() {
+ return secondaryFs;
+ }
+
+ /**
+ * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
+ * and read-through purposes.
+ *
+ * @param fileSystem
+ */
+ public void setSecondaryFileSystem(IgfsSecondaryFileSystem fileSystem) {
+ secondaryFs = fileSystem;
+ }
+
+ /**
+ * Gets map of path prefixes to {@code IGFS} modes used for them.
+ * <p>
+ * If path doesn't correspond to any specified prefix or mappings are not provided, then
+ * {@link #getDefaultMode()} is used.
+ * <p>
+ * Several folders under {@code '/apache/ignite'} folder have predefined mappings which cannot be overridden.
+ * <li>{@code /apache/ignite/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
+ * <p>
+ * And in case secondary file system URI is provided:
+ * <li>{@code /apache/ignite/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
+ * <li>{@code /apache/ignite/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
+ * <li>{@code /apache/ignite/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
+ *
+ * @return Map of paths to {@code IGFS} modes.
+ */
+ @Nullable public Map<String, IgfsMode> getPathModes() {
+ return pathModes;
+ }
+
+ /**
+ * Sets map of path prefixes to {@code IGFS} modes used for them.
+ * <p>
+ * If path doesn't correspond to any specified prefix or mappings are not provided, then
+ * {@link #getDefaultMode()} is used.
+ *
+ * @param pathModes Map of paths to {@code IGFS} modes.
+ */
+ public void setPathModes(Map<String, IgfsMode> pathModes) {
+ this.pathModes = pathModes;
+ }
+
+ /**
+ * Gets the length of file chunk to send before delaying the fragmentizer.
+ *
+ * @return File chunk length in bytes.
+ */
+ public long getFragmentizerThrottlingBlockLength() {
+ return fragmentizerThrottlingBlockLen;
+ }
+
+ /**
+ * Sets length of file chunk to transmit before throttling is delayed.
+ *
+ * @param fragmentizerThrottlingBlockLen Block length in bytes.
+ */
+ public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
+ this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
+ }
+
+ /**
+ * Gets throttle delay for fragmentizer.
+ *
+ * @return Throttle delay in milliseconds.
+ */
+ public long getFragmentizerThrottlingDelay() {
+ return fragmentizerThrottlingDelay;
+ }
+
+ /**
+ * Sets delay in milliseconds for which fragmentizer is paused.
+ *
+ * @param fragmentizerThrottlingDelay Delay in milliseconds.
+ */
+ public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
+ this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
+ }
+
+ /**
+ * Gets number of files that can be processed by fragmentizer concurrently.
+ *
+ * @return Number of files to process concurrently.
+ */
+ public int getFragmentizerConcurrentFiles() {
+ return fragmentizerConcurrentFiles;
+ }
+
+ /**
+ * Sets number of files to process concurrently by fragmentizer.
+ *
+ * @param fragmentizerConcurrentFiles Number of files to process concurrently.
+ */
+ public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
+ this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
+ }
+
+ /**
+ * Gets amount of local memory (in % of local IGFS max space size) available for local writes
+ * during file creation.
+ * <p>
+ * If current IGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
+ * then file blocks will be written to the local node first and then asynchronously distributed
+ * among cluster nodes (fragmentized).
+ * <p>
+ * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
+ *
+ * @return Ratio for local writes space.
+ */
+ public float getFragmentizerLocalWritesRatio() {
+ return fragmentizerLocWritesRatio;
+ }
+
+ /**
+ * Sets ratio for space available for local file writes.
+ *
+ * @param fragmentizerLocWritesRatio Ratio for local file writes.
+ * @see #getFragmentizerLocalWritesRatio()
+ */
+ public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
+ this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
+ }
+
+ /**
+ * Gets flag indicating whether IGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
+ * written in distributed fashion.
+ *
+ * @return Flag indicating whether fragmentizer is enabled.
+ */
+ public boolean isFragmentizerEnabled() {
+ return fragmentizerEnabled;
+ }
+
+ /**
+ * Sets property indicating whether fragmentizer is enabled.
+ *
+ * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
+ */
+ public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
+ this.fragmentizerEnabled = fragmentizerEnabled;
+ }
+
+ /**
+ * Get maximum space available for data cache to store file system entries.
+ *
+ * @return Maximum space available for data cache.
+ */
+ public long getMaxSpaceSize() {
+ return maxSpace;
+ }
+
+ /**
+ * Set maximum space in bytes available in data cache.
+ *
+ * @param maxSpace Maximum space available in data cache.
+ */
+ public void setMaxSpaceSize(long maxSpace) {
+ this.maxSpace = maxSpace;
+ }
+
+ /**
+ * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+ *
+ * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+ */
+ public long getTrashPurgeTimeout() {
+ return trashPurgeTimeout;
+ }
+
+ /**
+ * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+ *
+ * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+ */
+ public void setTrashPurgeTimeout(long trashPurgeTimeout) {
+ this.trashPurgeTimeout = trashPurgeTimeout;
+ }
+
+ /**
+ * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
+ * data which came from the secondary file system and about to be written to IGFS data cache.
+ * In case no executor service is provided, default one will be created with maximum amount of threads equals
+ * to amount of processor cores.
+ *
+ * @return Get DUAL mode put operation executor service
+ */
+ @Nullable public ExecutorService getDualModePutExecutorService() {
+ return dualModePutExec;
+ }
+
+ /**
+ * Set DUAL mode put operations executor service.
+ *
+ * @param dualModePutExec Dual mode put operations executor service.
+ */
+ public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
+ this.dualModePutExec = dualModePutExec;
+ }
+
+ /**
+ * Get DUAL mode put operation executor service shutdown flag.
+ *
+ * @return DUAL mode put operation executor service shutdown flag.
+ */
+ public boolean getDualModePutExecutorServiceShutdown() {
+ return dualModePutExecShutdown;
+ }
+
+ /**
+ * Set DUAL mode put operations executor service shutdown flag.
+ *
+ * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
+ */
+ public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
+ this.dualModePutExecShutdown = dualModePutExecShutdown;
+ }
+
+ /**
+ * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
+ * cache. {@code 0} or negative value stands for unlimited size.
+ * <p>
+ * By default this value is set to {@code 0}. It is recommended to set positive value in case your
+ * application performs frequent reads of large amount of data from the secondary file system in order to
+ * avoid issues with increasing GC pauses or out-of-memory error.
+ *
+ * @return Maximum amount of pending data read from the secondary file system
+ */
+ public long getDualModeMaxPendingPutsSize() {
+ return dualModeMaxPendingPutsSize;
+ }
+
+ /**
+ * Set maximum amount of data in pending put operations.
+ *
+ * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
+ */
+ public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
+ this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
+ }
+
+ /**
+ * Get maximum default range size of a file being split during IGFS task execution. When IGFS task is about to
+ * be executed, it requests file block locations first. Each location is defined as {@link org.apache.ignite.igfs.mapreduce.IgfsFileRange} which
+ * has length. In case this parameter is set to positive value, then IGFS will split single file range into smaller
+ * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
+ * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
+ * block size.
+ * <p>
+ * Note that this parameter is applied when task is split into jobs before {@link org.apache.ignite.igfs.mapreduce.IgfsRecordResolver} is
+ * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
+ * parameter depending on file data layout and selected resolver type.
+ * <p>
+ * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
+ * so that task execution suffers from insufficient parallelism. E.g., in case you have one IGFS node in topology
+ * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
+ * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
+ * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
+ * parallel.
+ * <p>
+ * Note that some {@code IgniteFs.execute()} methods can override value of this parameter.
+ * <p>
+ * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
+ * {@code 0}.
+ *
+ * @return Maximum range size of a file being split during IGFS task execution.
+ */
+ public long getMaximumTaskRangeLength() {
+ return maxTaskRangeLen;
+ }
+
+ /**
+ * Set maximum default range size of a file being split during IGFS task execution.
+ * See {@link #getMaximumTaskRangeLength()} for more details.
+ *
+ * @param maxTaskRangeLen Set maximum default range size of a file being split during IGFS task execution.
+ */
+ public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
+ this.maxTaskRangeLen = maxTaskRangeLen;
+ }
+
+ /** {@inheritDoc} */
+ @Override public String toString() {
+ return S.toString(FileSystemConfiguration.class, this);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
index 7e6183d..01ef8b0 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
@@ -21,7 +21,7 @@ import org.apache.ignite.internal.processors.hadoop.*;
import org.apache.ignite.internal.util.typedef.internal.*;
/**
- * Hadoop configuration.
+ * Ignite Hadoop Accelerator configuration.
*/
public class HadoopConfiguration {
/** Default finished job info time-to-live. */
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
deleted file mode 100644
index a5dbedf..0000000
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.configuration;
-
-import org.apache.ignite.igfs.*;
-import org.apache.ignite.igfs.secondary.*;
-import org.apache.ignite.internal.util.typedef.internal.*;
-import org.jetbrains.annotations.*;
-
-import java.util.*;
-import java.util.concurrent.*;
-
-/**
- * {@code IGFS} configuration. More than one file system can be configured within grid.
- * {@code IGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getIgfsConfiguration()}
- * method.
- * <p>
- * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml}
- * configuration files under Ignite installation to see sample {@code IGFS} configuration.
- */
-public class IgfsConfiguration {
- /** Default file system user name. */
- public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
-
- /** Default IPC port. */
- public static final int DFLT_IPC_PORT = 10500;
-
- /** Default fragmentizer throttling block length. */
- public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
-
- /** Default fragmentizer throttling delay. */
- public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
-
- /** Default fragmentizer concurrent files. */
- public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
-
- /** Default fragmentizer local writes ratio. */
- public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
-
- /** Fragmentizer enabled property. */
- public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
-
- /** Default batch size for logging. */
- public static final int DFLT_IGFS_LOG_BATCH_SIZE = 100;
-
- /** Default {@code IGFS} log directory. */
- public static final String DFLT_IGFS_LOG_DIR = "work/igfs/log";
-
- /** Default per node buffer size. */
- public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
-
- /** Default number of per node parallel operations. */
- public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
-
- /** Default IGFS mode. */
- public static final IgfsMode DFLT_MODE = IgfsMode.DUAL_ASYNC;
-
- /** Default file's data block size (bytes). */
- public static final int DFLT_BLOCK_SIZE = 1 << 16;
-
- /** Default read/write buffers size (bytes). */
- public static final int DFLT_BUF_SIZE = 1 << 16;
-
- /** Default trash directory purge await timeout in case data cache oversize is detected. */
- public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
-
- /** Default management port. */
- public static final int DFLT_MGMT_PORT = 11400;
-
- /** Default IPC endpoint enabled flag. */
- public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
-
- /** IGFS instance name. */
- private String name;
-
- /** Cache name to store IGFS meta information. */
- private String metaCacheName;
-
- /** Cache name to store file's data blocks. */
- private String dataCacheName;
-
- /** File's data block size (bytes). */
- private int blockSize = DFLT_BLOCK_SIZE;
-
- /** The number of pre-fetched blocks if specific file's chunk is requested. */
- private int prefetchBlocks;
-
- /** Amount of sequential block reads before prefetch is triggered. */
- private int seqReadsBeforePrefetch;
-
- /** Read/write buffers size for stream operations (bytes). */
- private int bufSize = DFLT_BUF_SIZE;
-
- /** Per node buffer size. */
- private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
-
- /** Per node parallel operations. */
- private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
-
- /** IPC endpoint properties to publish IGFS over. */
- private Map<String, String> ipcEndpointCfg;
-
- /** IPC endpoint enabled flag. */
- private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
-
- /** Management port. */
- private int mgmtPort = DFLT_MGMT_PORT;
-
- /** Secondary file system */
- private IgfsSecondaryFileSystem secondaryFs;
-
- /** IGFS mode. */
- private IgfsMode dfltMode = DFLT_MODE;
-
- /** Fragmentizer throttling block length. */
- private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
-
- /** Fragmentizer throttling delay. */
- private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
-
- /** Fragmentizer concurrent files. */
- private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
-
- /** Fragmentizer local writes ratio. */
- private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
-
- /** Fragmentizer enabled flag. */
- private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
-
- /** Path modes. */
- private Map<String, IgfsMode> pathModes;
-
- /** Maximum space. */
- private long maxSpace;
-
- /** Trash purge await timeout. */
- private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
-
- /** Dual mode PUT operations executor service. */
- private ExecutorService dualModePutExec;
-
- /** Dual mode PUT operations executor service shutdown flag. */
- private boolean dualModePutExecShutdown;
-
- /** Maximum amount of data in pending puts. */
- private long dualModeMaxPendingPutsSize;
-
- /** Maximum range length. */
- private long maxTaskRangeLen;
-
- /**
- * Constructs default configuration.
- */
- public IgfsConfiguration() {
- // No-op.
- }
-
- /**
- * Constructs the copy of the configuration.
- *
- * @param cfg Configuration to copy.
- */
- public IgfsConfiguration(IgfsConfiguration cfg) {
- assert cfg != null;
-
- /*
- * Must preserve alphabetical order!
- */
- blockSize = cfg.getBlockSize();
- bufSize = cfg.getStreamBufferSize();
- dataCacheName = cfg.getDataCacheName();
- dfltMode = cfg.getDefaultMode();
- dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
- dualModePutExec = cfg.getDualModePutExecutorService();
- dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
- fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
- fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
- fragmentizerEnabled = cfg.isFragmentizerEnabled();
- fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
- fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
- secondaryFs = cfg.getSecondaryFileSystem();
- ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
- ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
- maxSpace = cfg.getMaxSpaceSize();
- maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
- metaCacheName = cfg.getMetaCacheName();
- mgmtPort = cfg.getManagementPort();
- name = cfg.getName();
- pathModes = cfg.getPathModes();
- perNodeBatchSize = cfg.getPerNodeBatchSize();
- perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
- prefetchBlocks = cfg.getPrefetchBlocks();
- seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
- trashPurgeTimeout = cfg.getTrashPurgeTimeout();
- }
-
- /**
- * Gets IGFS instance name. If {@code null}, then instance with default
- * name will be used.
- *
- * @return IGFS instance name.
- */
- @Nullable public String getName() {
- return name;
- }
-
- /**
- * Sets IGFS instance name.
- *
- * @param name IGFS instance name.
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /**
- * Cache name to store IGFS meta information. If {@code null}, then instance
- * with default meta-cache name will be used.
- *
- * @return Cache name to store IGFS meta information.
- */
- @Nullable public String getMetaCacheName() {
- return metaCacheName;
- }
-
- /**
- * Sets cache name to store IGFS meta information.
- *
- * @param metaCacheName Cache name to store IGFS meta information.
- */
- public void setMetaCacheName(String metaCacheName) {
- this.metaCacheName = metaCacheName;
- }
-
- /**
- * Cache name to store IGFS data.
- *
- * @return Cache name to store IGFS data.
- */
- @Nullable public String getDataCacheName() {
- return dataCacheName;
- }
-
- /**
- * Sets cache name to store IGFS data.
- *
- * @param dataCacheName Cache name to store IGFS data.
- */
- public void setDataCacheName(String dataCacheName) {
- this.dataCacheName = dataCacheName;
- }
-
- /**
- * Get file's data block size.
- *
- * @return File's data block size.
- */
- public int getBlockSize() {
- return blockSize;
- }
-
- /**
- * Sets file's data block size.
- *
- * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
- */
- public void setBlockSize(int blockSize) {
- A.ensure(blockSize >= 0, "blockSize >= 0");
-
- this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
- }
-
- /**
- * Get number of pre-fetched blocks if specific file's chunk is requested.
- *
- * @return The number of pre-fetched blocks.
- */
- public int getPrefetchBlocks() {
- return prefetchBlocks;
- }
-
- /**
- * Sets the number of pre-fetched blocks if specific file's chunk is requested.
- *
- * @param prefetchBlocks New number of pre-fetched blocks.
- */
- public void setPrefetchBlocks(int prefetchBlocks) {
- A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
-
- this.prefetchBlocks = prefetchBlocks;
- }
-
- /**
- * Get amount of sequential block reads before prefetch is triggered. The
- * higher this value, the longer IGFS will wait before starting to prefetch
- * values ahead of time. Depending on the use case, this can either help
- * or hurt performance.
- * <p>
- * Default is {@code 0} which means that pre-fetching will start right away.
- * <h1 class="header">Integration With Hadoop</h1>
- * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
- * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
- * MapReduce task.
- * <p>
- * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
- *
- * @return Amount of sequential block reads.
- */
- public int getSequentialReadsBeforePrefetch() {
- return seqReadsBeforePrefetch;
- }
-
- /**
- * Sets amount of sequential block reads before prefetch is triggered. The
- * higher this value, the longer IGFS will wait before starting to prefetch
- * values ahead of time. Depending on the use case, this can either help
- * or hurt performance.
- * <p>
- * Default is {@code 0} which means that pre-fetching will start right away.
- * <h1 class="header">Integration With Hadoop</h1>
- * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
- * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
- * MapReduce task.
- * <p>
- * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
- *
- * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
- */
- public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
- A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
-
- this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
- }
-
- /**
- * Get read/write buffer size for {@code IGFS} stream operations in bytes.
- *
- * @return Read/write buffers size (bytes).
- */
- public int getStreamBufferSize() {
- return bufSize;
- }
-
- /**
- * Sets read/write buffers size for {@code IGFS} stream operations (bytes).
- *
- * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
- */
- public void setStreamBufferSize(int bufSize) {
- A.ensure(bufSize >= 0, "bufSize >= 0");
-
- this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
- }
-
- /**
- * Gets number of file blocks buffered on local node before sending batch to remote node.
- *
- * @return Per node buffer size.
- */
- public int getPerNodeBatchSize() {
- return perNodeBatchSize;
- }
-
- /**
- * Sets number of file blocks collected on local node before sending batch to remote node.
- *
- * @param perNodeBatchSize Per node buffer size.
- */
- public void setPerNodeBatchSize(int perNodeBatchSize) {
- this.perNodeBatchSize = perNodeBatchSize;
- }
-
- /**
- * Gets number of batches that can be concurrently sent to remote node.
- *
- * @return Number of batches for each node.
- */
- public int getPerNodeParallelBatchCount() {
- return perNodeParallelBatchCnt;
- }
-
- /**
- * Sets number of file block batches that can be concurrently sent to remote node.
- *
- * @param perNodeParallelBatchCnt Per node parallel load operations.
- */
- public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
- this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
- }
-
- /**
- * Gets map of IPC endpoint configuration properties. There are 2 different
- * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
- * <p>
- * The following configuration properties are supported for {@code shared-memory}
- * endpoint:
- * <ul>
- * <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
- * <li>{@code port} - endpoint port.</li>
- * <li>{@code size} - memory size allocated for single endpoint communication.</li>
- * <li>
- * {@code tokenDirectoryPath} - path, either absolute or relative to {@code IGNITE_HOME} to
- * store shared memory tokens.
- * </li>
- * </ul>
- * <p>
- * The following configuration properties are supported for {@code TCP} approach:
- * <ul>
- * <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
- * <li>{@code port} - endpoint bind port.</li>
- * <li>
- * {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
- * </li>
- * </ul>
- * <p>
- * Note that {@code shared-memory} approach is not supported on Windows environments.
- * In case IGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
- *
- * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
- * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
- */
- @Nullable public Map<String,String> getIpcEndpointConfiguration() {
- return ipcEndpointCfg;
- }
-
- /**
- * Sets IPC endpoint configuration to publish IGFS over.
- *
- * @param ipcEndpointCfg Map of IPC endpoint config properties.
- */
- public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
- this.ipcEndpointCfg = ipcEndpointCfg;
- }
-
- /**
- * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
- * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
- *
- * @return {@code True} in case endpoint is enabled.
- */
- public boolean isIpcEndpointEnabled() {
- return ipcEndpointEnabled;
- }
-
- /**
- * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
- *
- * @param ipcEndpointEnabled IPC endpoint enabled flag.
- */
- public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
- this.ipcEndpointEnabled = ipcEndpointEnabled;
- }
-
- /**
- * Gets port number for management endpoint. All IGFS nodes should have this port open
- * for Visor Management Console to work with IGFS.
- * <p>
- * Default value is {@link #DFLT_MGMT_PORT}
- *
- * @return Port number or {@code -1} if management endpoint should be disabled.
- */
- public int getManagementPort() {
- return mgmtPort;
- }
-
- /**
- * Sets management endpoint port.
- *
- * @param mgmtPort port number or {@code -1} to disable management endpoint.
- */
- public void setManagementPort(int mgmtPort) {
- this.mgmtPort = mgmtPort;
- }
-
- /**
- * Gets mode to specify how {@code IGFS} interacts with Hadoop file system, like {@code HDFS}.
- * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
- * purposes.
- * <p>
- * Default mode is {@link org.apache.ignite.igfs.IgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
- * not configured, this mode will work just like {@link org.apache.ignite.igfs.IgfsMode#PRIMARY} mode.
- *
- * @return Mode to specify how IGFS interacts with secondary HDFS file system.
- */
- public IgfsMode getDefaultMode() {
- return dfltMode;
- }
-
- /**
- * Sets {@code IGFS} mode to specify how it should interact with secondary
- * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
- * for pass-through, write-through, and read-through purposes.
- *
- * @param dfltMode {@code IGFS} mode.
- */
- public void setDefaultMode(IgfsMode dfltMode) {
- this.dfltMode = dfltMode;
- }
-
- /**
- * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
- * and read-through purposes.
- *
- * @return Secondary file system.
- */
- public IgfsSecondaryFileSystem getSecondaryFileSystem() {
- return secondaryFs;
- }
-
- /**
- * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
- * and read-through purposes.
- *
- * @param fileSystem
- */
- public void setSecondaryFileSystem(IgfsSecondaryFileSystem fileSystem) {
- secondaryFs = fileSystem;
- }
-
- /**
- * Gets map of path prefixes to {@code IGFS} modes used for them.
- * <p>
- * If path doesn't correspond to any specified prefix or mappings are not provided, then
- * {@link #getDefaultMode()} is used.
- * <p>
- * Several folders under {@code '/apache/ignite'} folder have predefined mappings which cannot be overridden.
- * <li>{@code /apache/ignite/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
- * <p>
- * And in case secondary file system URI is provided:
- * <li>{@code /apache/ignite/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
- * <li>{@code /apache/ignite/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
- * <li>{@code /apache/ignite/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
- *
- * @return Map of paths to {@code IGFS} modes.
- */
- @Nullable public Map<String, IgfsMode> getPathModes() {
- return pathModes;
- }
-
- /**
- * Sets map of path prefixes to {@code IGFS} modes used for them.
- * <p>
- * If path doesn't correspond to any specified prefix or mappings are not provided, then
- * {@link #getDefaultMode()} is used.
- *
- * @param pathModes Map of paths to {@code IGFS} modes.
- */
- public void setPathModes(Map<String, IgfsMode> pathModes) {
- this.pathModes = pathModes;
- }
-
- /**
- * Gets the length of file chunk to send before delaying the fragmentizer.
- *
- * @return File chunk length in bytes.
- */
- public long getFragmentizerThrottlingBlockLength() {
- return fragmentizerThrottlingBlockLen;
- }
-
- /**
- * Sets length of file chunk to transmit before throttling is delayed.
- *
- * @param fragmentizerThrottlingBlockLen Block length in bytes.
- */
- public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
- this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
- }
-
- /**
- * Gets throttle delay for fragmentizer.
- *
- * @return Throttle delay in milliseconds.
- */
- public long getFragmentizerThrottlingDelay() {
- return fragmentizerThrottlingDelay;
- }
-
- /**
- * Sets delay in milliseconds for which fragmentizer is paused.
- *
- * @param fragmentizerThrottlingDelay Delay in milliseconds.
- */
- public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
- this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
- }
-
- /**
- * Gets number of files that can be processed by fragmentizer concurrently.
- *
- * @return Number of files to process concurrently.
- */
- public int getFragmentizerConcurrentFiles() {
- return fragmentizerConcurrentFiles;
- }
-
- /**
- * Sets number of files to process concurrently by fragmentizer.
- *
- * @param fragmentizerConcurrentFiles Number of files to process concurrently.
- */
- public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
- this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
- }
-
- /**
- * Gets amount of local memory (in % of local IGFS max space size) available for local writes
- * during file creation.
- * <p>
- * If current IGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
- * then file blocks will be written to the local node first and then asynchronously distributed
- * among cluster nodes (fragmentized).
- * <p>
- * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
- *
- * @return Ratio for local writes space.
- */
- public float getFragmentizerLocalWritesRatio() {
- return fragmentizerLocWritesRatio;
- }
-
- /**
- * Sets ratio for space available for local file writes.
- *
- * @param fragmentizerLocWritesRatio Ratio for local file writes.
- * @see #getFragmentizerLocalWritesRatio()
- */
- public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
- this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
- }
-
- /**
- * Gets flag indicating whether IGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
- * written in distributed fashion.
- *
- * @return Flag indicating whether fragmentizer is enabled.
- */
- public boolean isFragmentizerEnabled() {
- return fragmentizerEnabled;
- }
-
- /**
- * Sets property indicating whether fragmentizer is enabled.
- *
- * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
- */
- public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
- this.fragmentizerEnabled = fragmentizerEnabled;
- }
-
- /**
- * Get maximum space available for data cache to store file system entries.
- *
- * @return Maximum space available for data cache.
- */
- public long getMaxSpaceSize() {
- return maxSpace;
- }
-
- /**
- * Set maximum space in bytes available in data cache.
- *
- * @param maxSpace Maximum space available in data cache.
- */
- public void setMaxSpaceSize(long maxSpace) {
- this.maxSpace = maxSpace;
- }
-
- /**
- * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
- *
- * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
- */
- public long getTrashPurgeTimeout() {
- return trashPurgeTimeout;
- }
-
- /**
- * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
- *
- * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
- */
- public void setTrashPurgeTimeout(long trashPurgeTimeout) {
- this.trashPurgeTimeout = trashPurgeTimeout;
- }
-
- /**
- * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
- * data which came from the secondary file system and about to be written to IGFS data cache.
- * In case no executor service is provided, default one will be created with maximum amount of threads equals
- * to amount of processor cores.
- *
- * @return Get DUAL mode put operation executor service
- */
- @Nullable public ExecutorService getDualModePutExecutorService() {
- return dualModePutExec;
- }
-
- /**
- * Set DUAL mode put operations executor service.
- *
- * @param dualModePutExec Dual mode put operations executor service.
- */
- public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
- this.dualModePutExec = dualModePutExec;
- }
-
- /**
- * Get DUAL mode put operation executor service shutdown flag.
- *
- * @return DUAL mode put operation executor service shutdown flag.
- */
- public boolean getDualModePutExecutorServiceShutdown() {
- return dualModePutExecShutdown;
- }
-
- /**
- * Set DUAL mode put operations executor service shutdown flag.
- *
- * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
- */
- public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
- this.dualModePutExecShutdown = dualModePutExecShutdown;
- }
-
- /**
- * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
- * cache. {@code 0} or negative value stands for unlimited size.
- * <p>
- * By default this value is set to {@code 0}. It is recommended to set positive value in case your
- * application performs frequent reads of large amount of data from the secondary file system in order to
- * avoid issues with increasing GC pauses or out-of-memory error.
- *
- * @return Maximum amount of pending data read from the secondary file system
- */
- public long getDualModeMaxPendingPutsSize() {
- return dualModeMaxPendingPutsSize;
- }
-
- /**
- * Set maximum amount of data in pending put operations.
- *
- * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
- */
- public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
- this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
- }
-
- /**
- * Get maximum default range size of a file being split during IGFS task execution. When IGFS task is about to
- * be executed, it requests file block locations first. Each location is defined as {@link org.apache.ignite.igfs.mapreduce.IgfsFileRange} which
- * has length. In case this parameter is set to positive value, then IGFS will split single file range into smaller
- * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
- * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
- * block size.
- * <p>
- * Note that this parameter is applied when task is split into jobs before {@link org.apache.ignite.igfs.mapreduce.IgfsRecordResolver} is
- * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
- * parameter depending on file data layout and selected resolver type.
- * <p>
- * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
- * so that task execution suffers from insufficient parallelism. E.g., in case you have one IGFS node in topology
- * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
- * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
- * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
- * parallel.
- * <p>
- * Note that some {@code IgniteFs.execute()} methods can override value of this parameter.
- * <p>
- * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
- * {@code 0}.
- *
- * @return Maximum range size of a file being split during IGFS task execution.
- */
- public long getMaximumTaskRangeLength() {
- return maxTaskRangeLen;
- }
-
- /**
- * Set maximum default range size of a file being split during IGFS task execution.
- * See {@link #getMaximumTaskRangeLength()} for more details.
- *
- * @param maxTaskRangeLen Set maximum default range size of a file being split during IGFS task execution.
- */
- public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
- this.maxTaskRangeLen = maxTaskRangeLen;
- }
-
- /** {@inheritDoc} */
- @Override public String toString() {
- return S.toString(IgfsConfiguration.class, this);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
index 1036e0e..8bd2f83 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
@@ -344,7 +344,7 @@ public class IgniteConfiguration {
private Map<IgnitePredicate<? extends Event>, int[]> lsnrs;
/** IGFS configuration. */
- private IgfsConfiguration[] igfsCfg;
+ private FileSystemConfiguration[] igfsCfg;
/** Streamer configuration. */
private StreamerConfiguration[] streamerCfg;
@@ -417,7 +417,7 @@ public class IgniteConfiguration {
ggHome = cfg.getIgniteHome();
ggWork = cfg.getWorkDirectory();
gridName = cfg.getGridName();
- igfsCfg = cfg.getIgfsConfiguration();
+ igfsCfg = cfg.getFileSystemConfiguration();
igfsPoolSize = cfg.getIgfsThreadPoolSize();
hadoopCfg = cfg.getHadoopConfiguration();
inclEvtTypes = cfg.getIncludeEventTypes();
@@ -1727,20 +1727,20 @@ public class IgniteConfiguration {
}
/**
- * Gets IGFS configurations.
+ * Gets IGFS (Ignite In-Memory File System) configurations.
*
* @return IGFS configurations.
*/
- public IgfsConfiguration[] getIgfsConfiguration() {
+ public FileSystemConfiguration[] getFileSystemConfiguration() {
return igfsCfg;
}
/**
- * Sets IGFS configurations.
+ * Sets IGFS (Ignite In-Memory File System) configurations.
*
* @param igfsCfg IGFS configurations.
*/
- public void setIgfsConfiguration(IgfsConfiguration... igfsCfg) {
+ public void setFileSystemConfiguration(FileSystemConfiguration... igfsCfg) {
this.igfsCfg = igfsCfg;
}
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
index afdae1a..50b5435 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
@@ -33,7 +33,7 @@ public interface IgfsMetrics {
/**
* Gets maximum amount of data that can be stored on local node. This metrics is either
- * equal to {@link org.apache.ignite.configuration.IgfsConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to
+ * equal to {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to
* {@code 80%} of maximum heap size allocated for JVM.
*
* @return Maximum IGFS local space size.
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
index 3c440ab..2c9fcdd 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
@@ -24,7 +24,7 @@ import org.jetbrains.annotations.*;
* Secondary Hadoop file system is provided for pass-through, write-through, and
* read-through purposes.
* <p>
- * This mode is configured via {@link org.apache.ignite.configuration.IgfsConfiguration#getDefaultMode()}
+ * This mode is configured via {@link org.apache.ignite.configuration.FileSystemConfiguration#getDefaultMode()}
* configuration property.
*/
public enum IgfsMode {
@@ -39,7 +39,7 @@ public enum IgfsMode {
* through to secondary Hadoop file system. If this mode is enabled, then
* secondary Hadoop file system must be configured.
*
- * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem()
+ * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem()
*/
PROXY,
@@ -50,7 +50,7 @@ public enum IgfsMode {
* If secondary Hadoop file system is not configured, then this mode behaves like
* {@link #PRIMARY} mode.
*
- * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem()
+ * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem()
*/
DUAL_SYNC,
@@ -61,7 +61,7 @@ public enum IgfsMode {
* If secondary Hadoop file system is not configured, then this mode behaves like
* {@link #PRIMARY} mode.
*
- * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem()
+ * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem()
*/
DUAL_ASYNC;
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java
index e257c38..9936140 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java
@@ -37,7 +37,7 @@ import java.util.*;
* <p>
* Each file participating in IGFS task is split into {@link IgfsFileRange}s first. Normally range is a number of
* consequent bytes located on a single node (see {@code IgfssGroupDataBlocksKeyMapper}). In case maximum range size
- * is provided (either through {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} or {@code IgniteFs.execute()}
+ * is provided (either through {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} or {@code IgniteFs.execute()}
* argument), then ranges could be further divided into smaller chunks.
* <p>
* Once file is split into ranges, each range is passed to {@code IgfsTask.createJob()} method in order to create a
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java
index 7db26ec..5ef5352 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java
@@ -33,7 +33,7 @@ import java.util.*;
* <li>{@link IgfsRecordResolver} for that task</li>
* <li>Flag indicating whether to skip non-existent file paths or throw an exception</li>
* <li>User-defined task argument</li>
- * <li>Maximum file range length for that task (see {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()})</li>
+ * <li>Maximum file range length for that task (see {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()})</li>
* </ul>
*/
public interface IgfsTaskArgs<T> {
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
index abefd74..089a8e3 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
@@ -25,20 +25,13 @@ import java.io.*;
import java.util.*;
/**
- * Common file system interface. It provides a typical generalized "view" of any file system:
- * <ul>
- * <li>list directories or get information for a single path</li>
- * <li>create/move/delete files or directories</li>
- * <li>write/read data streams into/from files</li>
- * </ul>
- *
- * This is the minimum of functionality that is needed to work as secondary file system in dual modes of IGFS.
+ * Secondary file system interface.
*/
public interface IgfsSecondaryFileSystem {
/**
- * Checks if the specified path exists in the file system.
+ * Checks if the specified path exists.
*
- * @param path Path to check for existence in the file system.
+ * @param path Path to check for existence.
* @return {@code True} if such file exists, otherwise - {@code false}.
* @throws IgniteException In case of error.
*/
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java
index 3b43c2a..3d36236 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java
@@ -20,7 +20,7 @@ package org.apache.ignite.igfs.secondary;
import java.io.*;
/**
- * The simplest data input interface to read from secondary file system in dual modes.
+ * The simplest data input interface to read from secondary file system.
*/
public interface IgfsSecondaryFileSystemPositionedReadable extends Closeable {
/**
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
index 62a2b26..c6ea165 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
@@ -704,7 +704,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable {
// Starts lifecycle aware components.
U.startLifecycleAware(lifecycleAwares(cfg));
- addHelper(ctx, IGFS_HELPER.create(F.isEmpty(cfg.getIgfsConfiguration())));
+ addHelper(ctx, IGFS_HELPER.create(F.isEmpty(cfg.getFileSystemConfiguration())));
startProcessor(ctx, new IgnitePluginProcessor(ctx, cfg), attrs);
@@ -756,7 +756,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable {
startProcessor(ctx, new GridRestProcessor(ctx), attrs);
startProcessor(ctx, new GridDataLoaderProcessor(ctx), attrs);
startProcessor(ctx, new GridStreamProcessor(ctx), attrs);
- startProcessor(ctx, (GridProcessor) IGFS.create(ctx, F.isEmpty(cfg.getIgfsConfiguration())), attrs);
+ startProcessor(ctx, (GridProcessor) IGFS.create(ctx, F.isEmpty(cfg.getFileSystemConfiguration())), attrs);
startProcessor(ctx, new GridContinuousProcessor(ctx), attrs);
startProcessor(ctx, (GridProcessor)(cfg.isPeerClassLoadingEnabled() ?
IgniteComponentType.HADOOP.create(ctx, true): // No-op when peer class loading is enabled.
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
index cb2efbf..2e8cfc1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
@@ -1606,15 +1606,15 @@ public class IgnitionEx {
if (myCfg.getPeerClassLoadingLocalClassPathExclude() == null)
myCfg.setPeerClassLoadingLocalClassPathExclude(EMPTY_STR_ARR);
- IgfsConfiguration[] igfsCfgs = myCfg.getIgfsConfiguration();
+ FileSystemConfiguration[] igfsCfgs = myCfg.getFileSystemConfiguration();
if (igfsCfgs != null) {
- IgfsConfiguration[] clone = igfsCfgs.clone();
+ FileSystemConfiguration[] clone = igfsCfgs.clone();
for (int i = 0; i < igfsCfgs.length; i++)
- clone[i] = new IgfsConfiguration(igfsCfgs[i]);
+ clone[i] = new FileSystemConfiguration(igfsCfgs[i]);
- myCfg.setIgfsConfiguration(clone);
+ myCfg.setFileSystemConfiguration(clone);
}
StreamerConfiguration[] streamerCfgs = myCfg.getStreamerConfiguration();
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/8e2a08b9/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
index 12ea535..fe88012 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
@@ -236,10 +236,10 @@ public abstract class GridCacheAdapter<K, V> implements GridCache<K, V>,
mxBean = new CacheMetricsMXBeanImpl(this);
- IgfsConfiguration[] igfsCfgs = gridCfg.getIgfsConfiguration();
+ FileSystemConfiguration[] igfsCfgs = gridCfg.getFileSystemConfiguration();
if (igfsCfgs != null) {
- for (IgfsConfiguration igfsCfg : igfsCfgs) {
+ for (FileSystemConfiguration igfsCfg : igfsCfgs) {
if (F.eq(ctx.name(), igfsCfg.getDataCacheName())) {
if (!ctx.isNear()) {
igfsDataCache = true;