You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by an...@apache.org on 2015/05/28 10:13:45 UTC
[4/6] hbase git commit: HBASE-13763 Handle the rename,
annotation and typo stuff in MOB. (Jingcheng)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
deleted file mode 100644
index b3c7d83..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
+++ /dev/null
@@ -1,636 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mob.filecompactions;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.crypto.Encryption;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.mob.MobFileName;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.mob.filecompactions.MobFileCompactionRequest.CompactionType;
-import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartition;
-import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartitionId;
-import org.apache.hadoop.hbase.regionserver.*;
-import org.apache.hadoop.hbase.regionserver.StoreFile.Writer;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-
-/**
- * An implementation of {@link MobFileCompactor} that compacts the mob files in partitions.
- */
-@InterfaceAudience.Private
-public class PartitionedMobFileCompactor extends MobFileCompactor {
-
- private static final Log LOG = LogFactory.getLog(PartitionedMobFileCompactor.class);
- protected long mergeableSize;
- protected int delFileMaxCount;
- /** The number of files compacted in a batch */
- protected int compactionBatchSize;
- protected int compactionKVMax;
-
- private Path tempPath;
- private Path bulkloadPath;
- private CacheConfig compactionCacheConfig;
- private Tag tableNameTag;
- private Encryption.Context cryptoContext = Encryption.Context.NONE;
-
- public PartitionedMobFileCompactor(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor column, ExecutorService pool) throws IOException {
- super(conf, fs, tableName, column, pool);
- mergeableSize = conf.getLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD,
- MobConstants.DEFAULT_MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD);
- delFileMaxCount = conf.getInt(MobConstants.MOB_DELFILE_MAX_COUNT,
- MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
- // default is 100
- compactionBatchSize = conf.getInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE,
- MobConstants.DEFAULT_MOB_FILE_COMPACTION_BATCH_SIZE);
- tempPath = new Path(MobUtils.getMobHome(conf), MobConstants.TEMP_DIR_NAME);
- bulkloadPath = new Path(tempPath, new Path(MobConstants.BULKLOAD_DIR_NAME, new Path(
- tableName.getNamespaceAsString(), tableName.getQualifierAsString())));
- compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX,
- HConstants.COMPACTION_KV_MAX_DEFAULT);
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- compactionCacheConfig = new CacheConfig(copyOfConf);
- tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
- cryptoContext = MobUtils.createEncryptionContext(copyOfConf, column);
- }
-
- @Override
- public List<Path> compact(List<FileStatus> files, boolean isForceAllFiles) throws IOException {
- if (files == null || files.isEmpty()) {
- LOG.info("No candidate mob files");
- return null;
- }
- LOG.info("isForceAllFiles: " + isForceAllFiles);
- // find the files to compact.
- PartitionedMobFileCompactionRequest request = select(files, isForceAllFiles);
- // compact the files.
- return performCompaction(request);
- }
-
- /**
- * Selects the compacted mob/del files.
- * Iterates the candidates to find out all the del files and small mob files.
- * @param candidates All the candidates.
- * @param isForceAllFiles Whether add all mob files into the compaction.
- * @return A compaction request.
- * @throws IOException
- */
- protected PartitionedMobFileCompactionRequest select(List<FileStatus> candidates,
- boolean isForceAllFiles) throws IOException {
- Collection<FileStatus> allDelFiles = new ArrayList<FileStatus>();
- Map<CompactionPartitionId, CompactionPartition> filesToCompact =
- new HashMap<CompactionPartitionId, CompactionPartition>();
- int selectedFileCount = 0;
- int irrelevantFileCount = 0;
- for (FileStatus file : candidates) {
- if (!file.isFile()) {
- irrelevantFileCount++;
- continue;
- }
- // group the del files and small files.
- FileStatus linkedFile = file;
- if (HFileLink.isHFileLink(file.getPath())) {
- HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
- linkedFile = getLinkedFileStatus(link);
- if (linkedFile == null) {
- // If the linked file cannot be found, regard it as an irrelevantFileCount file
- irrelevantFileCount++;
- continue;
- }
- }
- if (StoreFileInfo.isDelFile(linkedFile.getPath())) {
- allDelFiles.add(file);
- } else if (isForceAllFiles || linkedFile.getLen() < mergeableSize) {
- // add all files if isForceAllFiles is true,
- // otherwise add the small files to the merge pool
- MobFileName fileName = MobFileName.create(linkedFile.getPath().getName());
- CompactionPartitionId id = new CompactionPartitionId(fileName.getStartKey(),
- fileName.getDate());
- CompactionPartition compactionPartition = filesToCompact.get(id);
- if (compactionPartition == null) {
- compactionPartition = new CompactionPartition(id);
- compactionPartition.addFile(file);
- filesToCompact.put(id, compactionPartition);
- } else {
- compactionPartition.addFile(file);
- }
- selectedFileCount++;
- }
- }
- PartitionedMobFileCompactionRequest request = new PartitionedMobFileCompactionRequest(
- filesToCompact.values(), allDelFiles);
- if (candidates.size() == (allDelFiles.size() + selectedFileCount + irrelevantFileCount)) {
- // all the files are selected
- request.setCompactionType(CompactionType.ALL_FILES);
- }
- LOG.info("The compaction type is " + request.getCompactionType() + ", the request has "
- + allDelFiles.size() + " del files, " + selectedFileCount + " selected files, and "
- + irrelevantFileCount + " irrelevant files");
- return request;
- }
-
- /**
- * Performs the compaction on the selected files.
- * <ol>
- * <li>Compacts the del files.</li>
- * <li>Compacts the selected small mob files and all the del files.</li>
- * <li>If all the candidates are selected, delete the del files.</li>
- * </ol>
- * @param request The compaction request.
- * @return The paths of new mob files generated in the compaction.
- * @throws IOException
- */
- protected List<Path> performCompaction(PartitionedMobFileCompactionRequest request)
- throws IOException {
- // merge the del files
- List<Path> delFilePaths = new ArrayList<Path>();
- for (FileStatus delFile : request.delFiles) {
- delFilePaths.add(delFile.getPath());
- }
- List<Path> newDelPaths = compactDelFiles(request, delFilePaths);
- List<StoreFile> newDelFiles = new ArrayList<StoreFile>();
- for (Path newDelPath : newDelPaths) {
- StoreFile sf = new StoreFile(fs, newDelPath, conf, compactionCacheConfig, BloomType.NONE);
- newDelFiles.add(sf);
- }
- LOG.info("After merging, there are " + newDelFiles.size() + " del files");
- // compact the mob files by partitions.
- List<Path> paths = compactMobFiles(request, newDelFiles);
- LOG.info("After compaction, there are " + paths.size() + " mob files");
- // archive the del files if all the mob files are selected.
- if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) {
- LOG.info("After a mob file compaction with all files selected, archiving the del files "
- + newDelFiles);
- try {
- MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), newDelFiles);
- } catch (IOException e) {
- LOG.error("Failed to archive the del files " + newDelFiles, e);
- }
- }
- return paths;
- }
-
- /**
- * Compacts the selected small mob files and all the del files.
- * @param request The compaction request.
- * @param delFiles The del files.
- * @return The paths of new mob files after compactions.
- * @throws IOException
- */
- protected List<Path> compactMobFiles(final PartitionedMobFileCompactionRequest request,
- final List<StoreFile> delFiles) throws IOException {
- Collection<CompactionPartition> partitions = request.compactionPartitions;
- if (partitions == null || partitions.isEmpty()) {
- LOG.info("No partitions of mob files");
- return Collections.emptyList();
- }
- List<Path> paths = new ArrayList<Path>();
- Connection c = ConnectionFactory.createConnection(conf);
- final Table table = c.getTable(tableName);
- try {
- Map<CompactionPartitionId, Future<List<Path>>> results =
- new HashMap<CompactionPartitionId, Future<List<Path>>>();
- // compact the mob files by partitions in parallel.
- for (final CompactionPartition partition : partitions) {
- results.put(partition.getPartitionId(), pool.submit(new Callable<List<Path>>() {
- @Override
- public List<Path> call() throws Exception {
- LOG.info("Compacting mob files for partition " + partition.getPartitionId());
- return compactMobFilePartition(request, partition, delFiles, table);
- }
- }));
- }
- // compact the partitions in parallel.
- boolean hasFailure = false;
- for (Entry<CompactionPartitionId, Future<List<Path>>> result : results.entrySet()) {
- try {
- paths.addAll(result.getValue().get());
- } catch (Exception e) {
- // just log the error
- LOG.error("Failed to compact the partition " + result.getKey(), e);
- hasFailure = true;
- }
- }
- if (hasFailure) {
- // if any partition fails in the compaction, directly throw an exception.
- throw new IOException("Failed to compact the partitions");
- }
- } finally {
- try {
- table.close();
- } catch (IOException e) {
- LOG.error("Failed to close the HTable", e);
- }
- }
- return paths;
- }
-
- /**
- * Compacts a partition of selected small mob files and all the del files.
- * @param request The compaction request.
- * @param partition A compaction partition.
- * @param delFiles The del files.
- * @param table The current table.
- * @return The paths of new mob files after compactions.
- * @throws IOException
- */
- private List<Path> compactMobFilePartition(PartitionedMobFileCompactionRequest request,
- CompactionPartition partition, List<StoreFile> delFiles, Table table) throws IOException {
- List<Path> newFiles = new ArrayList<Path>();
- List<FileStatus> files = partition.listFiles();
- int offset = 0;
- Path bulkloadPathOfPartition = new Path(bulkloadPath, partition.getPartitionId().toString());
- Path bulkloadColumnPath = new Path(bulkloadPathOfPartition, column.getNameAsString());
- while (offset < files.size()) {
- int batch = compactionBatchSize;
- if (files.size() - offset < compactionBatchSize) {
- batch = files.size() - offset;
- }
- if (batch == 1 && delFiles.isEmpty()) {
- // only one file left and no del files, do not compact it,
- // and directly add it to the new files.
- newFiles.add(files.get(offset).getPath());
- offset++;
- continue;
- }
- // clean the bulkload directory to avoid loading old files.
- fs.delete(bulkloadPathOfPartition, true);
- // add the selected mob files and del files into filesToCompact
- List<StoreFile> filesToCompact = new ArrayList<StoreFile>();
- for (int i = offset; i < batch + offset; i++) {
- StoreFile sf = new StoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig,
- BloomType.NONE);
- filesToCompact.add(sf);
- }
- filesToCompact.addAll(delFiles);
- // compact the mob files in a batch.
- compactMobFilesInBatch(request, partition, table, filesToCompact, batch,
- bulkloadPathOfPartition, bulkloadColumnPath, newFiles);
- // move to the next batch.
- offset += batch;
- }
- LOG.info("Compaction is finished. The number of mob files is changed from " + files.size()
- + " to " + newFiles.size());
- return newFiles;
- }
-
- /**
- * Compacts a partition of selected small mob files and all the del files in a batch.
- * @param request The compaction request.
- * @param partition A compaction partition.
- * @param table The current table.
- * @param filesToCompact The files to be compacted.
- * @param batch The number of mob files to be compacted in a batch.
- * @param bulkloadPathOfPartition The directory where the bulkload column of the current
- * partition is saved.
- * @param bulkloadColumnPath The directory where the bulkload files of current partition
- * are saved.
- * @param newFiles The paths of new mob files after compactions.
- * @throws IOException
- */
- private void compactMobFilesInBatch(PartitionedMobFileCompactionRequest request,
- CompactionPartition partition, Table table, List<StoreFile> filesToCompact, int batch,
- Path bulkloadPathOfPartition, Path bulkloadColumnPath, List<Path> newFiles)
- throws IOException {
- // open scanner to the selected mob files and del files.
- StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES);
- // the mob files to be compacted, not include the del files.
- List<StoreFile> mobFilesToCompact = filesToCompact.subList(0, batch);
- // Pair(maxSeqId, cellsCount)
- Pair<Long, Long> fileInfo = getFileInfo(mobFilesToCompact);
- // open writers for the mob files and new ref store files.
- Writer writer = null;
- Writer refFileWriter = null;
- Path filePath = null;
- Path refFilePath = null;
- long mobCells = 0;
- try {
- writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getDate(),
- tempPath, Long.MAX_VALUE, column.getCompactionCompression(), partition.getPartitionId()
- .getStartKey(), compactionCacheConfig, cryptoContext);
- filePath = writer.getPath();
- byte[] fileName = Bytes.toBytes(filePath.getName());
- // create a temp file and open a writer for it in the bulkloadPath
- refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo
- .getSecond().longValue(), compactionCacheConfig, cryptoContext);
- refFilePath = refFileWriter.getPath();
- List<Cell> cells = new ArrayList<Cell>();
- boolean hasMore = false;
- ScannerContext scannerContext =
- ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
- do {
- hasMore = scanner.next(cells, scannerContext);
- for (Cell cell : cells) {
- // write the mob cell to the mob file.
- writer.append(cell);
- // write the new reference cell to the store file.
- KeyValue reference = MobUtils.createMobRefKeyValue(cell, fileName, tableNameTag);
- refFileWriter.append(reference);
- mobCells++;
- }
- cells.clear();
- } while (hasMore);
- } finally {
- // close the scanner.
- scanner.close();
- // append metadata to the mob file, and close the mob file writer.
- closeMobFileWriter(writer, fileInfo.getFirst(), mobCells);
- // append metadata and bulkload info to the ref mob file, and close the writer.
- closeRefFileWriter(refFileWriter, fileInfo.getFirst(), request.selectionTime);
- }
- if (mobCells > 0) {
- // commit mob file
- MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
- // bulkload the ref file
- bulkloadRefFile(table, bulkloadPathOfPartition, filePath.getName());
- newFiles.add(new Path(mobFamilyDir, filePath.getName()));
- } else {
- // remove the new files
- // the mob file is empty, delete it instead of committing.
- deletePath(filePath);
- // the ref file is empty, delete it instead of committing.
- deletePath(refFilePath);
- }
- // archive the old mob files, do not archive the del files.
- try {
- MobUtils
- .removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), mobFilesToCompact);
- } catch (IOException e) {
- LOG.error("Failed to archive the files " + mobFilesToCompact, e);
- }
- }
-
- /**
- * Compacts the del files in batches which avoids opening too many files.
- * @param request The compaction request.
- * @param delFilePaths
- * @return The paths of new del files after merging or the original files if no merging
- * is necessary.
- * @throws IOException
- */
- protected List<Path> compactDelFiles(PartitionedMobFileCompactionRequest request,
- List<Path> delFilePaths) throws IOException {
- if (delFilePaths.size() <= delFileMaxCount) {
- return delFilePaths;
- }
- // when there are more del files than the number that is allowed, merge it firstly.
- int offset = 0;
- List<Path> paths = new ArrayList<Path>();
- while (offset < delFilePaths.size()) {
- // get the batch
- int batch = compactionBatchSize;
- if (delFilePaths.size() - offset < compactionBatchSize) {
- batch = delFilePaths.size() - offset;
- }
- List<StoreFile> batchedDelFiles = new ArrayList<StoreFile>();
- if (batch == 1) {
- // only one file left, do not compact it, directly add it to the new files.
- paths.add(delFilePaths.get(offset));
- offset++;
- continue;
- }
- for (int i = offset; i < batch + offset; i++) {
- batchedDelFiles.add(new StoreFile(fs, delFilePaths.get(i), conf, compactionCacheConfig,
- BloomType.NONE));
- }
- // compact the del files in a batch.
- paths.add(compactDelFilesInBatch(request, batchedDelFiles));
- // move to the next batch.
- offset += batch;
- }
- return compactDelFiles(request, paths);
- }
-
- /**
- * Compacts the del file in a batch.
- * @param request The compaction request.
- * @param delFiles The del files.
- * @return The path of new del file after merging.
- * @throws IOException
- */
- private Path compactDelFilesInBatch(PartitionedMobFileCompactionRequest request,
- List<StoreFile> delFiles) throws IOException {
- // create a scanner for the del files.
- StoreScanner scanner = createScanner(delFiles, ScanType.COMPACT_RETAIN_DELETES);
- Writer writer = null;
- Path filePath = null;
- try {
- writer = MobUtils.createDelFileWriter(conf, fs, column,
- MobUtils.formatDate(new Date(request.selectionTime)), tempPath, Long.MAX_VALUE,
- column.getCompactionCompression(), HConstants.EMPTY_START_ROW, compactionCacheConfig,
- cryptoContext);
- filePath = writer.getPath();
- List<Cell> cells = new ArrayList<Cell>();
- boolean hasMore = false;
- ScannerContext scannerContext =
- ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
- do {
- hasMore = scanner.next(cells, scannerContext);
- for (Cell cell : cells) {
- writer.append(cell);
- }
- cells.clear();
- } while (hasMore);
- } finally {
- scanner.close();
- if (writer != null) {
- try {
- writer.close();
- } catch (IOException e) {
- LOG.error("Failed to close the writer of the file " + filePath, e);
- }
- }
- }
- // commit the new del file
- Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
- // archive the old del files
- try {
- MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
- } catch (IOException e) {
- LOG.error("Failed to archive the old del files " + delFiles, e);
- }
- return path;
- }
-
- /**
- * Creates a store scanner.
- * @param filesToCompact The files to be compacted.
- * @param scanType The scan type.
- * @return The store scanner.
- * @throws IOException
- */
- private StoreScanner createScanner(List<StoreFile> filesToCompact, ScanType scanType)
- throws IOException {
- List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false,
- null, HConstants.LATEST_TIMESTAMP);
- Scan scan = new Scan();
- scan.setMaxVersions(column.getMaxVersions());
- long ttl = HStore.determineTTLFromFamily(column);
- ScanInfo scanInfo = new ScanInfo(column, ttl, 0, CellComparator.COMPARATOR);
- StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L,
- HConstants.LATEST_TIMESTAMP);
- return scanner;
- }
-
- /**
- * Bulkloads the current file.
- * @param table The current table.
- * @param bulkloadDirectory The path of bulkload directory.
- * @param fileName The current file name.
- * @throws IOException
- */
- private void bulkloadRefFile(Table table, Path bulkloadDirectory, String fileName)
- throws IOException {
- // bulkload the ref file
- try {
- LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
- bulkload.doBulkLoad(bulkloadDirectory, (HTable)table);
- } catch (Exception e) {
- // delete the committed mob file
- deletePath(new Path(mobFamilyDir, fileName));
- throw new IOException(e);
- } finally {
- // delete the bulkload files in bulkloadPath
- deletePath(bulkloadDirectory);
- }
- }
-
- /**
- * Closes the mob file writer.
- * @param writer The mob file writer.
- * @param maxSeqId Maximum sequence id.
- * @param mobCellsCount The number of mob cells.
- * @throws IOException
- */
- private void closeMobFileWriter(Writer writer, long maxSeqId, long mobCellsCount)
- throws IOException {
- if (writer != null) {
- writer.appendMetadata(maxSeqId, false, mobCellsCount);
- try {
- writer.close();
- } catch (IOException e) {
- LOG.error("Failed to close the writer of the file " + writer.getPath(), e);
- }
- }
- }
-
- /**
- * Closes the ref file writer.
- * @param writer The ref file writer.
- * @param maxSeqId Maximum sequence id.
- * @param bulkloadTime The timestamp at which the bulk load file is created.
- * @throws IOException
- */
- private void closeRefFileWriter(Writer writer, long maxSeqId, long bulkloadTime)
- throws IOException {
- if (writer != null) {
- writer.appendMetadata(maxSeqId, false);
- writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(bulkloadTime));
- try {
- writer.close();
- } catch (IOException e) {
- LOG.error("Failed to close the writer of the ref file " + writer.getPath(), e);
- }
- }
- }
-
- /**
- * Gets the max seqId and number of cells of the store files.
- * @param storeFiles The store files.
- * @return The pair of the max seqId and number of cells of the store files.
- * @throws IOException
- */
- private Pair<Long, Long> getFileInfo(List<StoreFile> storeFiles) throws IOException {
- long maxSeqId = 0;
- long maxKeyCount = 0;
- for (StoreFile sf : storeFiles) {
- // the readers will be closed later after the merge.
- maxSeqId = Math.max(maxSeqId, sf.getMaxSequenceId());
- byte[] count = sf.createReader().loadFileInfo().get(StoreFile.MOB_CELLS_COUNT);
- if (count != null) {
- maxKeyCount += Bytes.toLong(count);
- }
- }
- return new Pair<Long, Long>(Long.valueOf(maxSeqId), Long.valueOf(maxKeyCount));
- }
-
- /**
- * Deletes a file.
- * @param path The path of the file to be deleted.
- */
- private void deletePath(Path path) {
- try {
- if (path != null) {
- fs.delete(path, true);
- }
- } catch (IOException e) {
- LOG.error("Failed to delete the file " + path, e);
- }
- }
-
- private FileStatus getLinkedFileStatus(HFileLink link) throws IOException {
- Path[] locations = link.getLocations();
- for (Path location : locations) {
- FileStatus file = getFileStatus(location);
- if (file != null) {
- return file;
- }
- }
- return null;
- }
-
- private FileStatus getFileStatus(Path path) throws IOException {
- try {
- if (path != null) {
- FileStatus file = fs.getFileStatus(path);
- return file;
- }
- } catch (FileNotFoundException e) {
- LOG.warn("The file " + path + " can not be found", e);
- }
- return null;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
index 458e187..82d03cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId;
import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.SweepCounter;
-import org.apache.hadoop.hbase.mob.mapreduce.SweepReducer.SweepPartitionId;
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.MemStore;
import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
@@ -68,7 +68,7 @@ public class MemStoreWrapper {
private MemStore memstore;
private long flushSize;
- private SweepPartitionId partitionId;
+ private CompactionPartitionId partitionId;
private Context context;
private Configuration conf;
private BufferedMutator table;
@@ -78,8 +78,8 @@ public class MemStoreWrapper {
private CacheConfig cacheConfig;
private Encryption.Context cryptoContext = Encryption.Context.NONE;
- public MemStoreWrapper(Context context, FileSystem fs, BufferedMutator table, HColumnDescriptor hcd,
- MemStore memstore, CacheConfig cacheConfig) throws IOException {
+ public MemStoreWrapper(Context context, FileSystem fs, BufferedMutator table,
+ HColumnDescriptor hcd, MemStore memstore, CacheConfig cacheConfig) throws IOException {
this.memstore = memstore;
this.context = context;
this.fs = fs;
@@ -93,7 +93,7 @@ public class MemStoreWrapper {
cryptoContext = MobUtils.createEncryptionContext(conf, hcd);
}
- public void setPartitionId(SweepPartitionId partitionId) {
+ public void setPartitionId(CompactionPartitionId partitionId) {
this.partitionId = partitionId;
}
@@ -155,16 +155,19 @@ public class MemStoreWrapper {
scanner = snapshot.getScanner();
scanner.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW));
cell = null;
- Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, Bytes.toBytes(this.table.getName().toString()));
+ Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, Bytes.toBytes(this.table.getName()
+ .toString()));
+ long updatedCount = 0;
while (null != (cell = scanner.next())) {
KeyValue reference = MobUtils.createMobRefKeyValue(cell, referenceValue, tableNameTag);
Put put =
new Put(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength());
put.add(reference);
table.mutate(put);
- context.getCounter(SweepCounter.RECORDS_UPDATED).increment(1);
+ updatedCount++;
}
table.flush();
+ context.getCounter(SweepCounter.RECORDS_UPDATED).increment(updatedCount);
scanner.close();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
index 6e4ea98..5da220f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
@@ -85,12 +85,12 @@ public class SweepJob {
private final FileSystem fs;
private final Configuration conf;
private static final Log LOG = LogFactory.getLog(SweepJob.class);
- static final String SWEEP_JOB_ID = "mob.sweep.job.id";
- static final String SWEEP_JOB_SERVERNAME = "mob.sweep.job.servername";
- static final String SWEEP_JOB_TABLE_NODE = "mob.sweep.job.table.node";
- static final String WORKING_DIR_KEY = "mob.sweep.job.dir";
- static final String WORKING_ALLNAMES_FILE_KEY = "mob.sweep.job.all.file";
- static final String WORKING_VISITED_DIR_KEY = "mob.sweep.job.visited.dir";
+ static final String SWEEP_JOB_ID = "hbase.mob.sweep.job.id";
+ static final String SWEEP_JOB_SERVERNAME = "hbase.mob.sweep.job.servername";
+ static final String SWEEP_JOB_TABLE_NODE = "hbase.mob.sweep.job.table.node";
+ static final String WORKING_DIR_KEY = "hbase.mob.sweep.job.dir";
+ static final String WORKING_ALLNAMES_FILE_KEY = "hbase.mob.sweep.job.all.file";
+ static final String WORKING_VISITED_DIR_KEY = "hbase.mob.sweep.job.visited.dir";
static final String WORKING_ALLNAMES_DIR = "all";
static final String WORKING_VISITED_DIR = "visited";
public static final String WORKING_FILES_DIR_KEY = "mob.sweep.job.files.dir";
@@ -228,7 +228,7 @@ public class SweepJob {
try {
lock.release();
} catch (IOException e) {
- LOG.error("Fail to release the table lock " + tableName, e);
+ LOG.error("Failed to release the table lock " + tableName, e);
}
}
}
@@ -435,7 +435,7 @@ public class SweepJob {
FSUtils.getTableDir(MobUtils.getMobHome(conf), tn), hcd.getName(), storeFiles);
LOG.info(storeFiles.size() + " unused MOB files are removed");
} catch (Exception e) {
- LOG.error("Fail to archive the store files " + storeFiles, e);
+ LOG.error("Failed to archive the store files " + storeFiles, e);
}
}
}
@@ -452,7 +452,7 @@ public class SweepJob {
try {
fs.delete(workingPath, true);
} catch (IOException e) {
- LOG.warn("Fail to delete the working directory after sweeping store " + familyName
+ LOG.warn("Failed to delete the working directory after sweeping store " + familyName
+ " in the table " + tn.getNameAsString(), e);
}
}
@@ -480,10 +480,12 @@ public class SweepJob {
@Override
public int compareTo(IndexedResult o) {
- if (this.value == null) {
+ if (this.value == null && o.getValue() == null) {
return 0;
} else if (o.value == null) {
return 1;
+ } else if (this.value == null) {
+ return -1;
} else {
return this.value.compareTo(o.value);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
index 787b242..a2dfa29 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
@@ -43,7 +43,11 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.BufferedMutatorParams;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
@@ -51,6 +55,7 @@ import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobFile;
import org.apache.hadoop.hbase.mob.MobFileName;
import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId;
import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.DummyMobAbortable;
import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.SweepCounter;
import org.apache.hadoop.hbase.regionserver.BloomType;
@@ -120,7 +125,7 @@ public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
try {
admin.close();
} catch (IOException e) {
- LOG.warn("Fail to close the HBaseAdmin", e);
+ LOG.warn("Failed to close the HBaseAdmin", e);
}
}
// disable the block cache.
@@ -138,7 +143,8 @@ public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
mobTableDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), tn);
}
- private SweepPartition createPartition(SweepPartitionId id, Context context) throws IOException {
+ private SweepPartition createPartition(CompactionPartitionId id, Context context)
+ throws IOException {
return new SweepPartition(id, context);
}
@@ -161,13 +167,13 @@ public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
fout = fs.create(nameFilePath, true);
writer = SequenceFile.createWriter(context.getConfiguration(), fout, String.class,
String.class, CompressionType.NONE, null);
- SweepPartitionId id;
+ CompactionPartitionId id;
SweepPartition partition = null;
// the mob files which have the same start key and date are in the same partition.
while (context.nextKey()) {
Text key = context.getCurrentKey();
String keyString = key.toString();
- id = SweepPartitionId.create(keyString);
+ id = createPartitionId(keyString);
if (null == partition || !id.equals(partition.getId())) {
// It's the first mob file in the current partition.
if (null != partition) {
@@ -215,21 +221,21 @@ public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
*/
public class SweepPartition {
- private final SweepPartitionId id;
+ private final CompactionPartitionId id;
private final Context context;
private boolean memstoreUpdated = false;
private boolean mergeSmall = false;
private final Map<String, MobFileStatus> fileStatusMap = new HashMap<String, MobFileStatus>();
private final List<Path> toBeDeleted = new ArrayList<Path>();
- public SweepPartition(SweepPartitionId id, Context context) throws IOException {
+ public SweepPartition(CompactionPartitionId id, Context context) throws IOException {
this.id = id;
this.context = context;
memstore.setPartitionId(id);
init();
}
- public SweepPartitionId getId() {
+ public CompactionPartitionId getId() {
return this.id;
}
@@ -294,7 +300,7 @@ public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
storeFiles);
context.getCounter(SweepCounter.FILE_TO_BE_MERGE_OR_CLEAN).increment(storeFiles.size());
} catch (IOException e) {
- LOG.error("Fail to archive the store files " + storeFiles, e);
+ LOG.error("Failed to archive the store files " + storeFiles, e);
}
storeFiles.clear();
}
@@ -390,58 +396,13 @@ public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
}
/**
- * The sweep partition id.
- * It consists of the start key and date.
- * The start key is a hex string of the checksum of a region start key.
- * The date is the latest timestamp of cells in a mob file.
+ * Creates the partition id.
+ * @param fileNameAsString The current file name, in string.
+ * @return The partition id.
*/
- public static class SweepPartitionId {
- private String date;
- private String startKey;
-
- public SweepPartitionId(MobFileName fileName) {
- this.date = fileName.getDate();
- this.startKey = fileName.getStartKey();
- }
-
- public SweepPartitionId(String date, String startKey) {
- this.date = date;
- this.startKey = startKey;
- }
-
- public static SweepPartitionId create(String key) {
- return new SweepPartitionId(MobFileName.create(key));
- }
-
- @Override
- public boolean equals(Object anObject) {
- if (this == anObject) {
- return true;
- }
- if (anObject instanceof SweepPartitionId) {
- SweepPartitionId another = (SweepPartitionId) anObject;
- if (this.date.equals(another.getDate()) && this.startKey.equals(another.getStartKey())) {
- return true;
- }
- }
- return false;
- }
-
- public String getDate() {
- return this.date;
- }
-
- public String getStartKey() {
- return this.startKey;
- }
-
- public void setDate(String date) {
- this.date = date;
- }
-
- public void setStartKey(String startKey) {
- this.startKey = startKey;
- }
+ private CompactionPartitionId createPartitionId(String fileNameAsString) {
+ MobFileName fileName = MobFileName.create(fileNameAsString);
+ return new CompactionPartitionId(fileName.getStartKey(), fileName.getDate());
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/Sweeper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/Sweeper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/Sweeper.java
index 9342a31..5436554 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/Sweeper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/Sweeper.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.Tool;
@@ -43,6 +44,7 @@ import com.google.protobuf.ServiceException;
* same column family are mutually exclusive too.
*/
@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class Sweeper extends Configured implements Tool {
/**
@@ -82,7 +84,7 @@ public class Sweeper extends Configured implements Tool {
try {
admin.close();
} catch (IOException e) {
- System.out.println("Fail to close the HBaseAdmin: " + e.getMessage());
+ System.out.println("Failed to close the HBaseAdmin: " + e.getMessage());
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index dd5d895..4a782dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -85,10 +85,10 @@ public class HMobStore extends HStore {
private MobCacheConfig mobCacheConfig;
private Path homePath;
private Path mobFamilyPath;
- private volatile long mobCompactedIntoMobCellsCount = 0;
- private volatile long mobCompactedFromMobCellsCount = 0;
- private volatile long mobCompactedIntoMobCellsSize = 0;
- private volatile long mobCompactedFromMobCellsSize = 0;
+ private volatile long cellsCountCompactedToMob = 0;
+ private volatile long cellsCountCompactedFromMob = 0;
+ private volatile long cellsSizeCompactedToMob = 0;
+ private volatile long cellsSizeCompactedFromMob = 0;
private volatile long mobFlushCount = 0;
private volatile long mobFlushedCellsCount = 0;
private volatile long mobFlushedCellsSize = 0;
@@ -490,36 +490,36 @@ public class HMobStore extends HStore {
}
}
- public void updateMobCompactedIntoMobCellsCount(long count) {
- mobCompactedIntoMobCellsCount += count;
+ public void updateCellsCountCompactedToMob(long count) {
+ cellsCountCompactedToMob += count;
}
- public long getMobCompactedIntoMobCellsCount() {
- return mobCompactedIntoMobCellsCount;
+ public long getCellsCountCompactedToMob() {
+ return cellsCountCompactedToMob;
}
- public void updateMobCompactedFromMobCellsCount(long count) {
- mobCompactedFromMobCellsCount += count;
+ public void updateCellsCountCompactedFromMob(long count) {
+ cellsCountCompactedFromMob += count;
}
- public long getMobCompactedFromMobCellsCount() {
- return mobCompactedFromMobCellsCount;
+ public long getCellsCountCompactedFromMob() {
+ return cellsCountCompactedFromMob;
}
- public void updateMobCompactedIntoMobCellsSize(long size) {
- mobCompactedIntoMobCellsSize += size;
+ public void updateCellsSizeCompactedToMob(long size) {
+ cellsSizeCompactedToMob += size;
}
- public long getMobCompactedIntoMobCellsSize() {
- return mobCompactedIntoMobCellsSize;
+ public long getCellsSizeCompactedToMob() {
+ return cellsSizeCompactedToMob;
}
- public void updateMobCompactedFromMobCellsSize(long size) {
- mobCompactedFromMobCellsSize += size;
+ public void updateCellsSizeCompactedFromMob(long size) {
+ cellsSizeCompactedFromMob += size;
}
- public long getMobCompactedFromMobCellsSize() {
- return mobCompactedFromMobCellsSize;
+ public long getCellsSizeCompactedFromMob() {
+ return cellsSizeCompactedFromMob;
}
public void updateMobFlushCount() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 24790e3..cf0d3f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -82,10 +82,10 @@ class MetricsRegionServerWrapperImpl
private volatile long flushedCellsSize = 0;
private volatile long compactedCellsSize = 0;
private volatile long majorCompactedCellsSize = 0;
- private volatile long mobCompactedIntoMobCellsCount = 0;
- private volatile long mobCompactedFromMobCellsCount = 0;
- private volatile long mobCompactedIntoMobCellsSize = 0;
- private volatile long mobCompactedFromMobCellsSize = 0;
+ private volatile long cellsCountCompactedToMob = 0;
+ private volatile long cellsCountCompactedFromMob = 0;
+ private volatile long cellsSizeCompactedToMob = 0;
+ private volatile long cellsSizeCompactedFromMob = 0;
private volatile long mobFlushCount = 0;
private volatile long mobFlushedCellsCount = 0;
private volatile long mobFlushedCellsSize = 0;
@@ -449,23 +449,23 @@ class MetricsRegionServerWrapperImpl
}
@Override
- public long getMobCompactedFromMobCellsCount() {
- return mobCompactedFromMobCellsCount;
+ public long getCellsCountCompactedFromMob() {
+ return cellsCountCompactedFromMob;
}
@Override
- public long getMobCompactedIntoMobCellsCount() {
- return mobCompactedIntoMobCellsCount;
+ public long getCellsCountCompactedToMob() {
+ return cellsCountCompactedToMob;
}
@Override
- public long getMobCompactedFromMobCellsSize() {
- return mobCompactedFromMobCellsSize;
+ public long getCellsSizeCompactedFromMob() {
+ return cellsSizeCompactedFromMob;
}
@Override
- public long getMobCompactedIntoMobCellsSize() {
- return mobCompactedIntoMobCellsSize;
+ public long getCellsSizeCompactedToMob() {
+ return cellsSizeCompactedToMob;
}
@Override
@@ -560,10 +560,10 @@ class MetricsRegionServerWrapperImpl
long tempFlushedCellsSize = 0;
long tempCompactedCellsSize = 0;
long tempMajorCompactedCellsSize = 0;
- long tempMobCompactedIntoMobCellsCount = 0;
- long tempMobCompactedFromMobCellsCount = 0;
- long tempMobCompactedIntoMobCellsSize = 0;
- long tempMobCompactedFromMobCellsSize = 0;
+ long tempCellsCountCompactedToMob = 0;
+ long tempCellsCountCompactedFromMob = 0;
+ long tempCellsSizeCompactedToMob = 0;
+ long tempCellsSizeCompactedFromMob = 0;
long tempMobFlushCount = 0;
long tempMobFlushedCellsCount = 0;
long tempMobFlushedCellsSize = 0;
@@ -596,10 +596,10 @@ class MetricsRegionServerWrapperImpl
tempMajorCompactedCellsSize += store.getMajorCompactedCellsSize();
if (store instanceof HMobStore) {
HMobStore mobStore = (HMobStore) store;
- tempMobCompactedIntoMobCellsCount += mobStore.getMobCompactedIntoMobCellsCount();
- tempMobCompactedFromMobCellsCount += mobStore.getMobCompactedFromMobCellsCount();
- tempMobCompactedIntoMobCellsSize += mobStore.getMobCompactedIntoMobCellsSize();
- tempMobCompactedFromMobCellsSize += mobStore.getMobCompactedFromMobCellsSize();
+ tempCellsCountCompactedToMob += mobStore.getCellsCountCompactedToMob();
+ tempCellsCountCompactedFromMob += mobStore.getCellsCountCompactedFromMob();
+ tempCellsSizeCompactedToMob += mobStore.getCellsSizeCompactedToMob();
+ tempCellsSizeCompactedFromMob += mobStore.getCellsSizeCompactedFromMob();
tempMobFlushCount += mobStore.getMobFlushCount();
tempMobFlushedCellsCount += mobStore.getMobFlushedCellsCount();
tempMobFlushedCellsSize += mobStore.getMobFlushedCellsSize();
@@ -666,10 +666,10 @@ class MetricsRegionServerWrapperImpl
flushedCellsSize = tempFlushedCellsSize;
compactedCellsSize = tempCompactedCellsSize;
majorCompactedCellsSize = tempMajorCompactedCellsSize;
- mobCompactedIntoMobCellsCount = tempMobCompactedIntoMobCellsCount;
- mobCompactedFromMobCellsCount = tempMobCompactedFromMobCellsCount;
- mobCompactedIntoMobCellsSize = tempMobCompactedIntoMobCellsSize;
- mobCompactedFromMobCellsSize = tempMobCompactedFromMobCellsSize;
+ cellsCountCompactedToMob = tempCellsCountCompactedToMob;
+ cellsCountCompactedFromMob = tempCellsCountCompactedFromMob;
+ cellsSizeCompactedToMob = tempCellsSizeCompactedToMob;
+ cellsSizeCompactedFromMob = tempCellsSizeCompactedFromMob;
mobFlushCount = tempMobFlushCount;
mobFlushedCellsCount = tempMobFlushedCellsCount;
mobFlushedCellsSize = tempMobFlushedCellsSize;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
index d1cca98..aa36e3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
@@ -49,7 +49,7 @@ public class ReversedMobStoreScanner extends ReversedStoreScanner {
}
/**
- * Firstly reads the cells from the HBase. If the cell are a reference cell (which has the
+ * Firstly reads the cells from the HBase. If the cell is a reference cell (which has the
* reference tag), the scanner need seek this cell from the mob file, and use the cell found
* from the mob file as the result.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index 8f94795..619a134 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -58,7 +58,7 @@ public class StoreFileInfo {
Pattern.compile("^(" + HFILE_NAME_REGEX + ")");
/**
- * A non-capture group, for hfiles, so that this can be embedded.
+ * A non-capture group, for del files, so that this can be embedded.
* A del file has (_del) as suffix.
*/
public static final String DELFILE_NAME_REGEX = "[0-9a-f]+(?:_del)";
http://git-wip-us.apache.org/repos/asf/hbase/blob/b31a6acf/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 175e8d8..e7c4dac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -159,12 +159,12 @@ public class SnapshotManifest {
RegionVisitor visitor = createRegionVisitor(desc);
// 1. dump region meta info into the snapshot directory
- LOG.debug("Storing '" + regionInfo + "' region-info for snapshot.");
+ LOG.debug("Storing mob region '" + regionInfo + "' region-info for snapshot.");
Object regionData = visitor.regionOpen(regionInfo);
monitor.rethrowException();
// 2. iterate through all the stores in the region
- LOG.debug("Creating references for hfiles");
+ LOG.debug("Creating references for mob files");
Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
for (HColumnDescriptor hcd : hcds) {
@@ -188,7 +188,7 @@ public class SnapshotManifest {
storeFiles.add(new StoreFileInfo(conf, fs, stat));
}
if (LOG.isDebugEnabled()) {
- LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
+ LOG.debug("Adding snapshot references for " + storeFiles + " mob files");
}
// 2.2. iterate through all the mob files and create "references".
@@ -198,7 +198,7 @@ public class SnapshotManifest {
// create "reference" to this store file.
if (LOG.isDebugEnabled()) {
- LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): "
+ LOG.debug("Adding reference for mob file (" + (i + 1) + "/" + sz + "): "
+ storeFile.getPath());
}
visitor.storeFile(regionData, familyData, storeFile);