You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2016/02/24 07:12:48 UTC

[41/53] [abbrv] lucene-solr git commit: LUCENE-7042: more cleanup for Point encodings

LUCENE-7042: more cleanup for Point encodings


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9ca1a19b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9ca1a19b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9ca1a19b

Branch: refs/heads/apiv2
Commit: 9ca1a19b81448594bbaf046da5d5e33228e6974f
Parents: d610533
Author: Robert Muir <rm...@apache.org>
Authored: Mon Feb 22 19:32:28 2016 -0500
Committer: Robert Muir <rm...@apache.org>
Committed: Mon Feb 22 19:32:52 2016 -0500

----------------------------------------------------------------------
 .../lucene/analysis/en/PorterStemmer.java       |  3 +-
 .../miscellaneous/ASCIIFoldingFilter.java       |  4 +-
 .../miscellaneous/WordDelimiterFilter.java      |  3 +-
 .../lucene/analysis/synonym/SynonymFilter.java  |  7 +---
 .../lucene/analysis/util/RollingCharBuffer.java |  4 +-
 .../org/tartarus/snowball/SnowballProgram.java  |  4 +-
 .../lucene/analysis/ja/JapaneseTokenizer.java   |  5 +--
 .../lucene50/Lucene50DocValuesProducer.java     |  6 +--
 .../codecs/blocktreeords/FSTOrdsOutputs.java    |  2 +-
 .../bloom/BloomFilteringPostingsFormat.java     |  5 +--
 .../codecs/memory/DirectPostingsFormat.java     |  3 +-
 .../codecs/memory/MemoryPostingsFormat.java     |  5 +--
 .../tokenattributes/CharTermAttributeImpl.java  |  8 ++--
 .../lucene54/Lucene54DocValuesProducer.java     |  3 +-
 .../perfield/PerFieldDocValuesFormat.java       |  5 +--
 .../org/apache/lucene/document/DoublePoint.java | 30 +++++++-------
 .../org/apache/lucene/document/FloatPoint.java  | 28 ++++++-------
 .../org/apache/lucene/document/IntPoint.java    | 28 ++++++-------
 .../org/apache/lucene/document/LongPoint.java   | 28 ++++++-------
 .../apache/lucene/index/BufferedUpdates.java    | 16 ++++----
 .../apache/lucene/index/DocValuesUpdate.java    | 14 +++----
 .../lucene/index/DocumentsWriterPerThread.java  |  7 +---
 .../index/FreqProxTermsWriterPerField.java      | 10 ++---
 .../lucene/index/FrozenBufferedUpdates.java     |  3 +-
 .../lucene/index/ParallelPostingsArray.java     |  4 +-
 .../apache/lucene/index/PointValuesWriter.java  |  6 +--
 .../apache/lucene/index/PrefixCodedTerms.java   |  4 +-
 .../lucene/index/SortedDocValuesWriter.java     |  4 +-
 .../lucene/index/SortedSetDocValuesWriter.java  |  6 +--
 .../index/TermVectorsConsumerPerField.java      |  3 +-
 .../apache/lucene/search/CachingCollector.java  |  8 ++--
 .../apache/lucene/search/PointRangeQuery.java   |  8 ++--
 .../apache/lucene/search/ScoringRewrite.java    |  3 +-
 .../java/org/apache/lucene/util/ArrayUtil.java  | 21 +++++-----
 .../org/apache/lucene/util/BytesRefArray.java   |  6 +--
 .../org/apache/lucene/util/BytesRefHash.java    | 15 ++++---
 .../org/apache/lucene/util/DocIdSetBuilder.java |  3 +-
 .../org/apache/lucene/util/NumericUtils.java    | 31 +++++++--------
 .../apache/lucene/util/RamUsageEstimator.java   | 41 ++++++++------------
 .../lucene/util/RecyclingIntBlockAllocator.java |  6 +--
 .../org/apache/lucene/util/SentinelIntSet.java  |  2 +-
 .../apache/lucene/util/automaton/Automaton.java |  4 +-
 .../lucene/util/automaton/SortedIntSet.java     |  4 +-
 .../org/apache/lucene/util/bkd/BKDReader.java   |  4 +-
 .../org/apache/lucene/util/bkd/BKDWriter.java   |  5 +--
 .../apache/lucene/util/bkd/HeapPointWriter.java |  4 +-
 .../lucene/util/bkd/OfflinePointReader.java     |  5 +--
 .../lucene/util/bkd/OfflinePointWriter.java     |  6 +--
 .../util/packed/AbstractPagedMutable.java       |  4 +-
 .../org/apache/lucene/util/packed/Direct16.java |  2 +-
 .../org/apache/lucene/util/packed/Direct32.java |  2 +-
 .../org/apache/lucene/util/packed/Direct64.java |  2 +-
 .../org/apache/lucene/util/packed/Direct8.java  |  2 +-
 .../lucene/util/packed/GrowableWriter.java      |  4 +-
 .../lucene/util/packed/Packed16ThreeBlocks.java |  2 +-
 .../org/apache/lucene/util/packed/Packed64.java |  4 +-
 .../lucene/util/packed/Packed64SingleBlock.java |  2 +-
 .../lucene/util/packed/Packed8ThreeBlocks.java  |  2 +-
 .../apache/lucene/util/packed/PackedInts.java   |  2 +-
 .../lucene/util/packed/PagedGrowableWriter.java |  4 +-
 .../apache/lucene/index/TestIntBlockPool.java   | 11 ++----
 .../org/apache/lucene/util/bkd/TestBKD.java     | 12 +++---
 .../apache/lucene/index/memory/MemoryIndex.java | 10 ++---
 .../lucene/uninverting/FieldCacheImpl.java      |  4 +-
 .../org/apache/lucene/document/LatLonPoint.java |  4 +-
 .../lucene/search/PointInPolygonQuery.java      |  6 +--
 .../apache/lucene/search/PointInRectQuery.java  |  6 +--
 .../lucene/search/TestDocValuesRangeQuery.java  |  2 +-
 .../spatial/prefix/ContainsPrefixTreeQuery.java |  2 +-
 .../org/apache/lucene/geo3d/Geo3DPoint.java     |  6 +--
 .../lucene/geo3d/PointInGeo3DShapeQuery.java    | 12 +++---
 .../search/suggest/tst/TernaryTreeNode.java     |  2 +-
 72 files changed, 221 insertions(+), 322 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemmer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemmer.java
index 041d7b8..ef239b1 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemmer.java
@@ -48,7 +48,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.FileInputStream;
 
-import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_CHAR;
 import org.apache.lucene.util.ArrayUtil;
 
 /**
@@ -453,7 +452,7 @@ class PorterStemmer
   public boolean stem(char[] wordBuffer, int offset, int wordLen) {
     reset();
     if (b.length < wordLen) {
-      b = new char[ArrayUtil.oversize(wordLen, NUM_BYTES_CHAR)];
+      b = new char[ArrayUtil.oversize(wordLen, Character.BYTES)];
     }
     System.arraycopy(wordBuffer, offset, b, 0, wordLen);
     i = wordLen;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java
index bd8d571..a327d17 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis.miscellaneous;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.analysis.TokenFilter;
@@ -24,7 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /**
  * This class converts alphabetic, numeric, and symbolic Unicode characters
@@ -142,7 +140,7 @@ public final class ASCIIFoldingFilter extends TokenFilter {
     // Worst-case length required:
     final int maxSizeNeeded = 4 * length;
     if (output.length < maxSizeNeeded) {
-      output = new char[ArrayUtil.oversize(maxSizeNeeded, RamUsageEstimator.NUM_BYTES_CHAR)];
+      output = new char[ArrayUtil.oversize(maxSizeNeeded, Character.BYTES)];
     }
 
     outputPos = foldToASCII(input, 0, output, 0, length);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
index e2e7074..20e013d 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
@@ -28,7 +28,6 @@ import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.InPlaceMergeSorter;
-import org.apache.lucene.util.RamUsageEstimator;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -429,7 +428,7 @@ public final class WordDelimiterFilter extends TokenFilter {
     savedType = typeAttribute.type();
 
     if (savedBuffer.length < termAttribute.length()) {
-      savedBuffer = new char[ArrayUtil.oversize(termAttribute.length(), RamUsageEstimator.NUM_BYTES_CHAR)];
+      savedBuffer = new char[ArrayUtil.oversize(termAttribute.length(), Character.BYTES)];
     }
 
     System.arraycopy(termAttribute.buffer(), 0, savedBuffer, 0, termAttribute.length());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
index 8b298f7..6a72920 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis.synonym;
 
-
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -31,11 +30,9 @@ import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.RamUsageEstimator;
-import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.util.fst.FST;
 
 /**
@@ -207,12 +204,12 @@ public final class SynonymFilter extends TokenFilter {
         outputs = Arrays.copyOf(outputs, ArrayUtil.oversize(1+count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
       }
       if (count == endOffsets.length) {
-        final int[] next = new int[ArrayUtil.oversize(1+count, RamUsageEstimator.NUM_BYTES_INT)];
+        final int[] next = new int[ArrayUtil.oversize(1+count, Integer.BYTES)];
         System.arraycopy(endOffsets, 0, next, 0, count);
         endOffsets = next;
       }
       if (count == posLengths.length) {
-        final int[] next = new int[ArrayUtil.oversize(1+count, RamUsageEstimator.NUM_BYTES_INT)];
+        final int[] next = new int[ArrayUtil.oversize(1+count, Integer.BYTES)];
         System.arraycopy(posLengths, 0, next, 0, count);
         posLengths = next;
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/RollingCharBuffer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/RollingCharBuffer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/RollingCharBuffer.java
index 1ced960..2464ebd 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/RollingCharBuffer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/RollingCharBuffer.java
@@ -16,12 +16,10 @@
  */
 package org.apache.lucene.analysis.util;
 
-
 import java.io.IOException;
 import java.io.Reader;
 
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** Acts like a forever growing char[] as you read
  *  characters into it from the provided reader, but
@@ -71,7 +69,7 @@ public final class RollingCharBuffer {
       }
       if (count == buffer.length) {
         // Grow
-        final char[] newBuffer = new char[ArrayUtil.oversize(1+count, RamUsageEstimator.NUM_BYTES_CHAR)];
+        final char[] newBuffer = new char[ArrayUtil.oversize(1+count, Character.BYTES)];
         //System.out.println(Thread.currentThread().getName() + ": cb grow " + newBuffer.length);
         System.arraycopy(buffer, nextWrite, newBuffer, 0, buffer.length - nextWrite);
         System.arraycopy(buffer, 0, newBuffer, buffer.length - nextWrite, nextWrite);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java b/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java
index 7032749..bfc8ec0 100644
--- a/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java
+++ b/lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java
@@ -29,11 +29,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
  */
 
-
 package org.tartarus.snowball;
 
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /**
  * This is the rev 502 of the Snowball SVN trunk,
@@ -397,7 +395,7 @@ public abstract class SnowballProgram {
     final int newLength = limit + adjustment;
     //resize if necessary
     if (newLength > current.length) {
-      char newBuffer[] = new char[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_CHAR)];
+      char newBuffer[] = new char[ArrayUtil.oversize(newLength, Character.BYTES)];
       System.arraycopy(current, 0, newBuffer, 0, limit);
       current = newBuffer;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java
index 5641e6c..dfab482 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis.ja;
 
-
 import java.io.IOException;
 import java.io.StringReader;
 import java.util.ArrayList;
@@ -1053,7 +1052,7 @@ public final class JapaneseTokenizer extends Tokenizer {
       assert baseOffset <= lastOffset;
       int size = lastOffset - baseOffset + 1;
       if (rootCapacity < size) {
-        int oversize = ArrayUtil.oversize(size, RamUsageEstimator.NUM_BYTES_INT);
+        int oversize = ArrayUtil.oversize(size, Integer.BYTES);
         lRoot = new int[oversize];
         rRoot = new int[oversize];
         rootCapacity = oversize;
@@ -1067,7 +1066,7 @@ public final class JapaneseTokenizer extends Tokenizer {
     // Reserve at least N nodes.
     private void reserve(int n) {
       if (capacity < n) {
-        int oversize = ArrayUtil.oversize(n, RamUsageEstimator.NUM_BYTES_INT);
+        int oversize = ArrayUtil.oversize(n, Integer.BYTES);
         nodeDicType = new Type[oversize];
         nodeWordID = new int[oversize];
         nodeMark = new int[oversize];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
index c323d6e..62c9477 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
@@ -537,7 +537,7 @@ class Lucene50DocValuesProducer extends DocValuesProducer implements Closeable {
       addresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, bytes.count+1, false);
       if (!merging) {
         addressInstances.put(field.name, addresses);
-        ramBytesUsed.addAndGet(addresses.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
+        ramBytesUsed.addAndGet(addresses.ramBytesUsed() + Integer.BYTES);
       }
     }
     return addresses;
@@ -577,7 +577,7 @@ class Lucene50DocValuesProducer extends DocValuesProducer implements Closeable {
       addresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, size, false);
       if (!merging) {
         addressInstances.put(field.name, addresses);
-        ramBytesUsed.addAndGet(addresses.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
+        ramBytesUsed.addAndGet(addresses.ramBytesUsed() + Integer.BYTES);
       }
     }
     return addresses;
@@ -662,7 +662,7 @@ class Lucene50DocValuesProducer extends DocValuesProducer implements Closeable {
       instance = MonotonicBlockPackedReader.of(data, entry.packedIntsVersion, entry.blockSize, entry.count+1, false);
       if (!merging) {
         ordIndexInstances.put(field.name, instance);
-        ramBytesUsed.addAndGet(instance.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
+        ramBytesUsed.addAndGet(instance.ramBytesUsed() + Integer.BYTES);
       }
     }
     return instance;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/FSTOrdsOutputs.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/FSTOrdsOutputs.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/FSTOrdsOutputs.java
index 050ad2f..8d61e2d 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/FSTOrdsOutputs.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/FSTOrdsOutputs.java
@@ -228,6 +228,6 @@ final class FSTOrdsOutputs extends Outputs<FSTOrdsOutputs.Output> {
 
   @Override
   public long ramBytesUsed(Output output) {
-    return 2 * RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * RamUsageEstimator.NUM_BYTES_LONG + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 2 * RamUsageEstimator.NUM_BYTES_INT + output.bytes.length;
+    return 2 * RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * Long.BYTES + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 2 * Integer.BYTES + output.bytes.length;
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index 6b838bb..ffe9fa1 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.codecs.bloom;
 
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -45,10 +44,8 @@ import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Accountables;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 
 /**
@@ -380,7 +377,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
     public long ramBytesUsed() {
       long sizeInBytes =  ((delegateFieldsProducer!=null) ? delegateFieldsProducer.ramBytesUsed() : 0);
       for(Map.Entry<String,FuzzySet> entry: bloomsByFieldName.entrySet()) {
-        sizeInBytes += entry.getKey().length() * RamUsageEstimator.NUM_BYTES_CHAR;
+        sizeInBytes += entry.getKey().length() * Character.BYTES;
         sizeInBytes += entry.getValue().ramBytesUsed();
       }
       return sizeInBytes;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index a1fa24d..56b1a36 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.codecs.memory;
 
-
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
@@ -154,7 +153,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
     public long ramBytesUsed() {
       long sizeInBytes = 0;
       for(Map.Entry<String,DirectField> entry: fields.entrySet()) {
-        sizeInBytes += entry.getKey().length() * RamUsageEstimator.NUM_BYTES_CHAR;
+        sizeInBytes += entry.getKey().length() * Character.BYTES;
         sizeInBytes += entry.getValue().ramBytesUsed();
       }
       return sizeInBytes;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index 0fb320c..1427dec 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.codecs.memory;
 
-
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
@@ -50,12 +49,10 @@ import org.apache.lucene.store.RAMOutputStream;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Accountables;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.IntsRefBuilder;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.fst.Builder;
 import org.apache.lucene.util.fst.ByteSequenceOutputs;
 import org.apache.lucene.util.fst.BytesRefFSTEnum;
@@ -1016,7 +1013,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
       public long ramBytesUsed() {
         long sizeInBytes = 0;
         for(Map.Entry<String,TermsReader> entry: fields.entrySet()) {
-          sizeInBytes += (entry.getKey().length() * RamUsageEstimator.NUM_BYTES_CHAR);
+          sizeInBytes += (entry.getKey().length() * Character.BYTES);
           sizeInBytes += entry.getValue().ramBytesUsed();
         }
         return sizeInBytes;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
index a1ac69b..cde8dd9 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis.tokenattributes;
 
-
 import java.nio.CharBuffer;
 
 import org.apache.lucene.util.ArrayUtil;
@@ -24,13 +23,12 @@ import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.AttributeReflector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** Default implementation of {@link CharTermAttribute}. */
 public class CharTermAttributeImpl extends AttributeImpl implements CharTermAttribute, TermToBytesRefAttribute, Cloneable {
   private static int MIN_BUFFER_SIZE = 10;
   
-  private char[] termBuffer = new char[ArrayUtil.oversize(MIN_BUFFER_SIZE, RamUsageEstimator.NUM_BYTES_CHAR)];
+  private char[] termBuffer = new char[ArrayUtil.oversize(MIN_BUFFER_SIZE, Character.BYTES)];
   private int termLength = 0;
   
   /** May be used by subclasses to convert to different charsets / encodings for implementing {@link #getBytesRef()}. */
@@ -56,7 +54,7 @@ public class CharTermAttributeImpl extends AttributeImpl implements CharTermAttr
     if(termBuffer.length < newSize){
       // Not big enough; create a new array with slight
       // over allocation and preserve content
-      final char[] newCharBuffer = new char[ArrayUtil.oversize(newSize, RamUsageEstimator.NUM_BYTES_CHAR)];
+      final char[] newCharBuffer = new char[ArrayUtil.oversize(newSize, Character.BYTES)];
       System.arraycopy(termBuffer, 0, newCharBuffer, 0, termBuffer.length);
       termBuffer = newCharBuffer;
     }
@@ -67,7 +65,7 @@ public class CharTermAttributeImpl extends AttributeImpl implements CharTermAttr
     if(termBuffer.length < newSize){
       // Not big enough; create a new array with slight
       // over allocation:
-      termBuffer = new char[ArrayUtil.oversize(newSize, RamUsageEstimator.NUM_BYTES_CHAR)];
+      termBuffer = new char[ArrayUtil.oversize(newSize, Character.BYTES)];
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java
index 4e24c7a..67027ea 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.codecs.lucene54;
 
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -753,7 +752,7 @@ final class Lucene54DocValuesProducer extends DocValuesProducer implements Close
       addresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, size, false);
       if (!merging) {
         addressInstances.put(field.name, addresses);
-        ramBytesUsed.addAndGet(addresses.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
+        ramBytesUsed.addAndGet(addresses.ramBytesUsed() + Integer.BYTES);
       }
     }
     return addresses;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java
index 25566e0..baadf1e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.codecs.perfield;
 
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.Collection;
@@ -44,7 +43,6 @@ import org.apache.lucene.util.Accountables;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /**
  * Enables per field docvalues support.
@@ -324,8 +322,7 @@ public abstract class PerFieldDocValuesFormat extends DocValuesFormat {
     public long ramBytesUsed() {
       long size = 0;
       for (Map.Entry<String,DocValuesProducer> entry : formats.entrySet()) {
-        size += (entry.getKey().length() * RamUsageEstimator.NUM_BYTES_CHAR) + 
-            entry.getValue().ramBytesUsed();
+        size += (entry.getKey().length() * Character.BYTES) + entry.getValue().ramBytesUsed();
       }
       return size;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
index 1619857..9099cce 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.document;
 
-
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** A double field that is indexed dimensionally such that finding
  *  all documents within an N-dimensional shape or range at search time is
@@ -30,7 +28,7 @@ public final class DoublePoint extends Field {
 
   private static FieldType getType(int numDims) {
     FieldType type = new FieldType();
-    type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_LONG);
+    type.setDimensions(numDims, Double.BYTES);
     type.freeze();
     return type;
   }
@@ -59,8 +57,8 @@ public final class DoublePoint extends Field {
       throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
-    assert bytes.length == RamUsageEstimator.NUM_BYTES_LONG;
-    return NumericUtils.sortableLongToDouble(NumericUtils.bytesToLongDirect(bytes.bytes, bytes.offset));
+    assert bytes.length == Double.BYTES;
+    return decodeDimension(bytes.bytes, bytes.offset);
   }
 
   private static BytesRef pack(double... point) {
@@ -70,10 +68,10 @@ public final class DoublePoint extends Field {
     if (point.length == 0) {
       throw new IllegalArgumentException("point cannot be 0 dimensions");
     }
-    byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_LONG];
+    byte[] packed = new byte[point.length * Double.BYTES];
     
-    for(int dim=0;dim<point.length;dim++) {
-      NumericUtils.longToBytesDirect(NumericUtils.doubleToSortableLong(point[dim]), packed, dim);
+    for (int dim = 0; dim < point.length ; dim++) {
+      encodeDimension(point[dim], packed, dim * Double.BYTES);
     }
 
     return new BytesRef(packed);
@@ -91,28 +89,26 @@ public final class DoublePoint extends Field {
   }
   
   // public helper methods (e.g. for queries)
-  // TODO: try to rectify with pack() above, which works on a single concatenated array...
 
   /** Encode n-dimensional double point into binary encoding */
   public static byte[][] encode(Double value[]) {
     byte[][] encoded = new byte[value.length][];
     for (int i = 0; i < value.length; i++) {
       if (value[i] != null) {
-        encoded[i] = encodeDimension(value[i]);
+        encoded[i] = new byte[Double.BYTES];
+        encodeDimension(value[i], encoded[i], 0);
       }
     }
     return encoded;
   }
   
   /** Encode single double dimension */
-  public static byte[] encodeDimension(Double value) {
-    byte encoded[] = new byte[Long.BYTES];
-    NumericUtils.longToBytesDirect(NumericUtils.doubleToSortableLong(value), encoded, 0);
-    return encoded;
+  public static void encodeDimension(Double value, byte dest[], int offset) {
+    NumericUtils.longToBytesDirect(NumericUtils.doubleToSortableLong(value), dest, offset);
   }
   
-  /** Decode single double value */
-  public static Double decodeDimension(byte value[]) {
-    return NumericUtils.sortableLongToDouble(NumericUtils.bytesToLongDirect(value, 0));
+  /** Decode single double dimension */
+  public static Double decodeDimension(byte value[], int offset) {
+    return NumericUtils.sortableLongToDouble(NumericUtils.bytesToLongDirect(value, offset));
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
index cfd6c7c..c47f4ae 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.document;
 
-
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** A field that is indexed dimensionally such that finding
  *  all documents within an N-dimensional at search time is
@@ -30,7 +28,7 @@ public final class FloatPoint extends Field {
 
   private static FieldType getType(int numDims) {
     FieldType type = new FieldType();
-    type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_INT);
+    type.setDimensions(numDims, Float.BYTES);
     type.freeze();
     return type;
   }
@@ -59,8 +57,8 @@ public final class FloatPoint extends Field {
       throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
-    assert bytes.length == RamUsageEstimator.NUM_BYTES_INT;
-    return NumericUtils.sortableIntToFloat(NumericUtils.bytesToIntDirect(bytes.bytes, bytes.offset));
+    assert bytes.length == Float.BYTES;
+    return decodeDimension(bytes.bytes, bytes.offset);
   }
 
   private static BytesRef pack(float... point) {
@@ -70,10 +68,10 @@ public final class FloatPoint extends Field {
     if (point.length == 0) {
       throw new IllegalArgumentException("point cannot be 0 dimensions");
     }
-    byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_INT];
+    byte[] packed = new byte[point.length * Float.BYTES];
     
-    for(int dim=0;dim<point.length;dim++) {
-      NumericUtils.intToBytesDirect(NumericUtils.floatToSortableInt(point[dim]), packed, dim);
+    for (int dim = 0; dim < point.length; dim++) {
+      encodeDimension(point[dim], packed, dim * Float.BYTES);
     }
 
     return new BytesRef(packed);
@@ -91,28 +89,26 @@ public final class FloatPoint extends Field {
   }
   
   // public helper methods (e.g. for queries)
-  // TODO: try to rectify with pack() above, which works on a single concatenated array...
 
   /** Encode n-dimensional float values into binary encoding */
   public static byte[][] encode(Float value[]) {
     byte[][] encoded = new byte[value.length][];
     for (int i = 0; i < value.length; i++) {
       if (value[i] != null) {
-        encoded[i] = encodeDimension(value[i]);
+        encoded[i] = new byte[Float.BYTES];
+        encodeDimension(value[i], encoded[i], 0);
       }
     }
     return encoded;
   }
   
   /** Encode single float dimension */
-  public static byte[] encodeDimension(Float value) {
-    byte encoded[] = new byte[Integer.BYTES];
-    NumericUtils.intToBytesDirect(NumericUtils.floatToSortableInt(value), encoded, 0);
-    return encoded;
+  public static void encodeDimension(Float value, byte dest[], int offset) {
+    NumericUtils.intToBytesDirect(NumericUtils.floatToSortableInt(value), dest, offset);
   }
   
   /** Decode single float dimension */
-  public static Float decodeDimension(byte value[]) {
-    return NumericUtils.sortableIntToFloat(NumericUtils.bytesToIntDirect(value, 0));
+  public static Float decodeDimension(byte value[], int offset) {
+    return NumericUtils.sortableIntToFloat(NumericUtils.bytesToIntDirect(value, offset));
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/IntPoint.java b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
index 40ddf07..32d2b59 100644
--- a/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.document;
 
-
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** An int field that is indexed dimensionally such that finding
  *  all documents within an N-dimensional shape or range at search time is
@@ -30,7 +28,7 @@ public final class IntPoint extends Field {
 
   private static FieldType getType(int numDims) {
     FieldType type = new FieldType();
-    type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_INT);
+    type.setDimensions(numDims, Integer.BYTES);
     type.freeze();
     return type;
   }
@@ -59,8 +57,8 @@ public final class IntPoint extends Field {
       throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
-    assert bytes.length == RamUsageEstimator.NUM_BYTES_INT;
-    return NumericUtils.bytesToInt(bytes.bytes, bytes.offset);
+    assert bytes.length == Integer.BYTES;
+    return decodeDimension(bytes.bytes, bytes.offset);
   }
 
   private static BytesRef pack(int... point) {
@@ -70,10 +68,10 @@ public final class IntPoint extends Field {
     if (point.length == 0) {
       throw new IllegalArgumentException("point cannot be 0 dimensions");
     }
-    byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_INT];
+    byte[] packed = new byte[point.length * Integer.BYTES];
     
-    for(int dim=0;dim<point.length;dim++) {
-      NumericUtils.intToBytes(point[dim], packed, dim);
+    for (int dim = 0; dim < point.length; dim++) {
+      encodeDimension(point[dim], packed, dim * Integer.BYTES);
     }
 
     return new BytesRef(packed);
@@ -91,28 +89,26 @@ public final class IntPoint extends Field {
   }
 
   // public helper methods (e.g. for queries)
-  // TODO: try to rectify with pack() above, which works on a single concatenated array...
 
   /** Encode n-dimensional integer values into binary encoding */
   public static byte[][] encode(Integer value[]) {
     byte[][] encoded = new byte[value.length][];
     for (int i = 0; i < value.length; i++) {
       if (value[i] != null) {
-        encoded[i] = encodeDimension(value[i]);
+        encoded[i] = new byte[Integer.BYTES];
+        encodeDimension(value[i], encoded[i], 0);
       }
     }
     return encoded;
   }
   
   /** Encode single integer dimension */
-  public static byte[] encodeDimension(Integer value) {
-    byte encoded[] = new byte[Integer.BYTES];
-    NumericUtils.intToBytes(value, encoded, 0);
-    return encoded;
+  public static void encodeDimension(Integer value, byte dest[], int offset) {
+    NumericUtils.intToBytes(value, dest, offset);
   }
   
   /** Decode single integer dimension */
-  public static Integer decodeDimension(byte value[]) {
-    return NumericUtils.bytesToInt(value, 0);
+  public static Integer decodeDimension(byte value[], int offset) {
+    return NumericUtils.bytesToInt(value, offset);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LongPoint.java b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
index 08a904c..b2ec29b 100644
--- a/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.document;
 
-
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** A long field that is indexed dimensionally such that finding
  *  all documents within an N-dimensional shape or range at search time is
@@ -30,7 +28,7 @@ public final class LongPoint extends Field {
 
   private static FieldType getType(int numDims) {
     FieldType type = new FieldType();
-    type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_LONG);
+    type.setDimensions(numDims, Long.BYTES);
     type.freeze();
     return type;
   }
@@ -59,8 +57,8 @@ public final class LongPoint extends Field {
       throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
-    assert bytes.length == RamUsageEstimator.NUM_BYTES_LONG;
-    return NumericUtils.bytesToLong(bytes.bytes, bytes.offset);
+    assert bytes.length == Long.BYTES;
+    return decodeDimension(bytes.bytes, bytes.offset);
   }
 
   private static BytesRef pack(long... point) {
@@ -70,10 +68,10 @@ public final class LongPoint extends Field {
     if (point.length == 0) {
       throw new IllegalArgumentException("point cannot be 0 dimensions");
     }
-    byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_LONG];
+    byte[] packed = new byte[point.length * Long.BYTES];
     
-    for(int dim=0;dim<point.length;dim++) {
-      NumericUtils.longToBytes(point[dim], packed, dim);
+    for (int dim = 0; dim < point.length; dim++) {
+      encodeDimension(point[dim], packed, dim * Long.BYTES);
     }
 
     return new BytesRef(packed);
@@ -91,28 +89,26 @@ public final class LongPoint extends Field {
   }
   
   // public helper methods (e.g. for queries)
-  // TODO: try to rectify with pack() above, which works on a single concatenated array...
 
   /** Encode n-dimensional long values into binary encoding */
   public static byte[][] encode(Long value[]) {
     byte[][] encoded = new byte[value.length][];
     for (int i = 0; i < value.length; i++) {
       if (value[i] != null) {
-        encoded[i] = encodeDimension(value[i]);
+        encoded[i] = new byte[Long.BYTES];
+        encodeDimension(value[i], encoded[i], 0);
       }
     }
     return encoded;
   }
   
   /** Encode single long dimension */
-  public static byte[] encodeDimension(Long value) {
-    byte encoded[] = new byte[Long.BYTES];
-    NumericUtils.longToBytes(value, encoded, 0);
-    return encoded;
+  public static void encodeDimension(Long value, byte dest[], int offset) {
+    NumericUtils.longToBytes(value, dest, offset);
   }
   
   /** Decode single long dimension */
-  public static Long decodeDimension(byte value[]) {
-    return NumericUtils.bytesToLong(value, 0);
+  public static Long decodeDimension(byte value[], int offset) {
+    return NumericUtils.bytesToLong(value, offset);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
index 259547f..6225337 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
@@ -52,19 +52,19 @@ class BufferedUpdates {
      Term's text is String (OBJ_HEADER + 4*INT + POINTER +
      OBJ_HEADER + string.length*CHAR).  Integer is
      OBJ_HEADER + INT. */
-  final static int BYTES_PER_DEL_TERM = 9*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 7*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 10*RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_DEL_TERM = 9*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 7*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 10*Integer.BYTES;
 
   /* Rough logic: del docIDs are List<Integer>.  Say list
      allocates ~2X size (2*POINTER).  Integer is OBJ_HEADER
      + int */
-  final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Integer.BYTES;
 
   /* Rough logic: HashMap has an array[Entry] w/ varying
      load factor (say 2 * POINTER).  Entry is object w/
      Query key, Integer val, int hash, Entry next
      (OBJ_HEADER + 3*POINTER + INT).  Query we often
      undercount (say 24 bytes).  Integer is OBJ_HEADER + INT. */
-  final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24;
+  final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*Integer.BYTES + 24;
 
   /* Rough logic: NumericUpdate calculates its actual size,
    * including the update Term and DV field (String). The 
@@ -82,7 +82,7 @@ class BufferedUpdates {
    */
   final static int BYTES_PER_NUMERIC_FIELD_ENTRY =
       7*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 3*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 
-      RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 5*RamUsageEstimator.NUM_BYTES_INT + RamUsageEstimator.NUM_BYTES_FLOAT;
+      RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 5*Integer.BYTES + Float.BYTES;
       
   /* Rough logic: Incremented when we see another Term for an already updated
    * field.
@@ -93,7 +93,7 @@ class BufferedUpdates {
    * Term (key) is counted only as POINTER.
    * NumericUpdate (val) counts its own size and isn't accounted for here.
    */
-  final static int BYTES_PER_NUMERIC_UPDATE_ENTRY = 7*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_NUMERIC_UPDATE_ENTRY = 7*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Integer.BYTES;
 
   /* Rough logic: BinaryUpdate calculates its actual size,
    * including the update Term and DV field (String). The 
@@ -111,7 +111,7 @@ class BufferedUpdates {
    */
   final static int BYTES_PER_BINARY_FIELD_ENTRY =
       7*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 3*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 
-      RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 5*RamUsageEstimator.NUM_BYTES_INT + RamUsageEstimator.NUM_BYTES_FLOAT;
+      RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 5*Integer.BYTES + Float.BYTES;
       
   /* Rough logic: Incremented when we see another Term for an already updated
    * field.
@@ -122,7 +122,7 @@ class BufferedUpdates {
    * Term (key) is counted only as POINTER.
    * BinaryUpdate (val) counts its own size and isn't accounted for here.
    */
-  final static int BYTES_PER_BINARY_UPDATE_ENTRY = 7*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_BINARY_UPDATE_ENTRY = 7*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Integer.BYTES;
   
   final AtomicInteger numTermDeletes = new AtomicInteger();
   final AtomicInteger numNumericUpdates = new AtomicInteger();
@@ -226,7 +226,7 @@ class BufferedUpdates {
     // is done to respect IndexWriterConfig.setMaxBufferedDeleteTerms.
     numTermDeletes.incrementAndGet();
     if (current == null) {
-      bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length + (RamUsageEstimator.NUM_BYTES_CHAR * term.field().length()));
+      bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length + (Character.BYTES * term.field().length()));
     }
   }
  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java b/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
index e7b1781..c972964 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
@@ -16,16 +16,12 @@
  */
 package org.apache.lucene.index;
 
-
 import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
-import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_CHAR;
-import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_INT;
 import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_HEADER;
 import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
 
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** An in-place update to a DocValues field. */
 abstract class DocValuesUpdate {
@@ -37,7 +33,7 @@ abstract class DocValuesUpdate {
    * String: 2*OBJ_HEADER + 4*INT + PTR + string.length*CHAR
    * T: OBJ_HEADER
    */
-  private static final int RAW_SIZE_IN_BYTES = 8*NUM_BYTES_OBJECT_HEADER + 8*NUM_BYTES_OBJECT_REF + 8*NUM_BYTES_INT;
+  private static final int RAW_SIZE_IN_BYTES = 8*NUM_BYTES_OBJECT_HEADER + 8*NUM_BYTES_OBJECT_REF + 8*Integer.BYTES;
   
   final DocValuesType type;
   final Term term;
@@ -63,9 +59,9 @@ abstract class DocValuesUpdate {
   
   final int sizeInBytes() {
     int sizeInBytes = RAW_SIZE_IN_BYTES;
-    sizeInBytes += term.field.length() * NUM_BYTES_CHAR;
+    sizeInBytes += term.field.length() * Character.BYTES;
     sizeInBytes += term.bytes.bytes.length;
-    sizeInBytes += field.length() * NUM_BYTES_CHAR;
+    sizeInBytes += field.length() * Character.BYTES;
     sizeInBytes += valueSizeInBytes();
     return sizeInBytes;
   }
@@ -79,7 +75,7 @@ abstract class DocValuesUpdate {
   static final class BinaryDocValuesUpdate extends DocValuesUpdate {
     
     /* Size of BytesRef: 2*INT + ARRAY_HEADER + PTR */
-    private static final long RAW_VALUE_SIZE_IN_BYTES = NUM_BYTES_ARRAY_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF;
+    private static final long RAW_VALUE_SIZE_IN_BYTES = NUM_BYTES_ARRAY_HEADER + 2*Integer.BYTES + NUM_BYTES_OBJECT_REF;
     
     BinaryDocValuesUpdate(Term term, String field, BytesRef value) {
       super(DocValuesType.BINARY, term, field, value);
@@ -101,7 +97,7 @@ abstract class DocValuesUpdate {
 
     @Override
     long valueSizeInBytes() {
-      return RamUsageEstimator.NUM_BYTES_LONG;
+      return Long.BYTES;
     }
     
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index e5998de..65d6a14 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import java.io.IOException;
 import java.text.NumberFormat;
 import java.util.Collections;
@@ -40,7 +39,6 @@ import org.apache.lucene.util.Counter;
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.MutableBits;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.Version;
 
@@ -576,14 +574,13 @@ class DocumentsWriterPerThread {
     @Override
     public int[] getIntBlock() {
       int[] b = new int[IntBlockPool.INT_BLOCK_SIZE];
-      bytesUsed.addAndGet(IntBlockPool.INT_BLOCK_SIZE
-          * RamUsageEstimator.NUM_BYTES_INT);
+      bytesUsed.addAndGet(IntBlockPool.INT_BLOCK_SIZE * Integer.BYTES);
       return b;
     }
     
     @Override
     public void recycleIntBlocks(int[][] blocks, int offset, int length) {
-      bytesUsed.addAndGet(-(length * (IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT)));
+      bytesUsed.addAndGet(-(length * (IntBlockPool.INT_BLOCK_SIZE * Integer.BYTES)));
     }
     
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
index 0a7dcfd..28fe872 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
@@ -16,13 +16,11 @@
  */
 package org.apache.lucene.index;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.RamUsageEstimator;
 
 // TODO: break into separate freq and prox writers as
 // codecs; make separate container (tii/tis/skip/*) that can
@@ -257,15 +255,15 @@ final class FreqProxTermsWriterPerField extends TermsHashPerField {
 
     @Override
     int bytesPerPosting() {
-      int bytes = ParallelPostingsArray.BYTES_PER_POSTING + 2 * RamUsageEstimator.NUM_BYTES_INT;
+      int bytes = ParallelPostingsArray.BYTES_PER_POSTING + 2 * Integer.BYTES;
       if (lastPositions != null) {
-        bytes += RamUsageEstimator.NUM_BYTES_INT;
+        bytes += Integer.BYTES;
       }
       if (lastOffsets != null) {
-        bytes += RamUsageEstimator.NUM_BYTES_INT;
+        bytes += Integer.BYTES;
       }
       if (termFreqs != null) {
-        bytes += RamUsageEstimator.NUM_BYTES_INT;
+        bytes += Integer.BYTES;
       }
 
       return bytes;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
index a3a57a3..4f482ad 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -40,7 +39,7 @@ import org.apache.lucene.util.RamUsageEstimator;
 class FrozenBufferedUpdates {
 
   /* Query we often undercount (say 24 bytes), plus int. */
-  final static int BYTES_PER_DEL_QUERY = RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_INT + 24;
+  final static int BYTES_PER_DEL_QUERY = RamUsageEstimator.NUM_BYTES_OBJECT_REF + Integer.BYTES + 24;
   
   // Terms, in sorted order:
   final PrefixCodedTerms terms;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/ParallelPostingsArray.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelPostingsArray.java b/lucene/core/src/java/org/apache/lucene/index/ParallelPostingsArray.java
index 400f441..35e8e4f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ParallelPostingsArray.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ParallelPostingsArray.java
@@ -16,12 +16,10 @@
  */
 package org.apache.lucene.index;
 
-
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
 
 class ParallelPostingsArray {
-  final static int BYTES_PER_POSTING = 3 * RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_POSTING = 3 * Integer.BYTES;
 
   final int size;
   final int[] textStarts;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
index 1008f05..283f7bd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.codecs.PointReader;
@@ -25,7 +24,6 @@ import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Counter;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /** Buffers up pending byte[][] value(s) per doc, then flushes when segment flushes. */
 class PointValuesWriter {
@@ -41,7 +39,7 @@ class PointValuesWriter {
     this.iwBytesUsed = docWriter.bytesUsed;
     this.bytes = new ByteBlockPool(docWriter.byteBlockAllocator);
     docIDs = new int[16];
-    iwBytesUsed.addAndGet(16 * RamUsageEstimator.NUM_BYTES_INT);
+    iwBytesUsed.addAndGet(16 * Integer.BYTES);
     packedValue = new byte[fieldInfo.getPointDimensionCount() * fieldInfo.getPointNumBytes()];
   }
 
@@ -54,7 +52,7 @@ class PointValuesWriter {
     }
     if (docIDs.length == numDocs) {
       docIDs = ArrayUtil.grow(docIDs, numDocs+1);
-      iwBytesUsed.addAndGet((docIDs.length - numDocs) * RamUsageEstimator.NUM_BYTES_INT);
+      iwBytesUsed.addAndGet((docIDs.length - numDocs) * Integer.BYTES);
     }
     bytes.append(value);
     docIDs[numDocs] = docID;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/PrefixCodedTerms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PrefixCodedTerms.java b/lucene/core/src/java/org/apache/lucene/index/PrefixCodedTerms.java
index d4b23fe..87a9ae2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PrefixCodedTerms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PrefixCodedTerms.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import java.io.IOException;
 import java.util.Objects;
 
@@ -27,7 +26,6 @@ import org.apache.lucene.store.RAMOutputStream;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /**
  * Prefix codes term instances (prefixes are shared)
@@ -45,7 +43,7 @@ public class PrefixCodedTerms implements Accountable {
 
   @Override
   public long ramBytesUsed() {
-    return buffer.ramBytesUsed() + 2 * RamUsageEstimator.NUM_BYTES_LONG;
+    return buffer.ramBytesUsed() + 2 * Long.BYTES;
   }
 
   /** Records del gen for this packet. */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesWriter.java
index 3231c60..2d8557b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesWriter.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
 
 import java.io.IOException;
@@ -29,7 +28,6 @@ import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.Counter;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.packed.PackedInts;
 import org.apache.lucene.util.packed.PackedLongValues;
 
@@ -93,7 +91,7 @@ class SortedDocValuesWriter extends DocValuesWriter {
       // 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
       //    TODO: can this same OOM happen in THPF?
       // 2. when flushing, we need 1 int per value (slot in the ordMap).
-      iwBytesUsed.addAndGet(2 * RamUsageEstimator.NUM_BYTES_INT);
+      iwBytesUsed.addAndGet(2 * Integer.BYTES);
     }
     
     pending.add(termID);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
index 52c6b5d..e98fc82 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
 
 import java.io.IOException;
@@ -31,7 +30,6 @@ import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.Counter;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.packed.PackedInts;
 import org.apache.lucene.util.packed.PackedLongValues;
 
@@ -125,14 +123,14 @@ class SortedSetDocValuesWriter extends DocValuesWriter {
       // 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
       //    TODO: can this same OOM happen in THPF?
       // 2. when flushing, we need 1 int per value (slot in the ordMap).
-      iwBytesUsed.addAndGet(2 * RamUsageEstimator.NUM_BYTES_INT);
+      iwBytesUsed.addAndGet(2 * Integer.BYTES);
     }
     
     if (currentUpto == currentValues.length) {
       currentValues = ArrayUtil.grow(currentValues, currentValues.length+1);
       // reserve additional space for max # values per-doc
       // when flushing, we need an int[] to sort the mapped-ords within the doc
-      iwBytesUsed.addAndGet((currentValues.length - currentUpto) * 2 * RamUsageEstimator.NUM_BYTES_INT);
+      iwBytesUsed.addAndGet((currentValues.length - currentUpto) * 2 * Integer.BYTES);
     }
     
     currentValues[currentUpto] = termID;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
index f252147..f69817a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@@ -283,7 +282,7 @@ final class TermVectorsConsumerPerField extends TermsHashPerField {
 
     @Override
     int bytesPerPosting() {
-      return super.bytesPerPosting() + 3 * RamUsageEstimator.NUM_BYTES_INT;
+      return super.bytesPerPosting() + 3 * Integer.BYTES;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index 55fb305..344ec3a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.search;
 
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -24,7 +23,6 @@ import java.util.List;
 
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /**
  * Caches all docs, and optionally also scores, coming from
@@ -233,7 +231,7 @@ public abstract class CachingCollector extends FilterCollector {
           if (docCount >= maxDocsToCache) {
             invalidate();
           } else {
-            final int newLen = Math.min(ArrayUtil.oversize(docCount + 1, RamUsageEstimator.NUM_BYTES_INT), maxDocsToCache);
+            final int newLen = Math.min(ArrayUtil.oversize(docCount + 1, Integer.BYTES), maxDocsToCache);
             grow(newLen);
           }
         }
@@ -329,9 +327,9 @@ public abstract class CachingCollector extends FilterCollector {
    *          scores are cached.
    */
   public static CachingCollector create(Collector other, boolean cacheScores, double maxRAMMB) {
-    int bytesPerDoc = RamUsageEstimator.NUM_BYTES_INT;
+    int bytesPerDoc = Integer.BYTES;
     if (cacheScores) {
-      bytesPerDoc += RamUsageEstimator.NUM_BYTES_FLOAT;
+      bytesPerDoc += Float.BYTES;
     }
     final int maxDocsToCache = (int) ((maxRAMMB * 1024 * 1024) / bytesPerDoc);
     return create(other, cacheScores, maxDocsToCache);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index 949cffe..9fe2a8d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -194,7 +194,7 @@ public class PointRangeQuery extends Query {
     return new PointRangeQuery(field, IntPoint.encode(lowerValue), lowerInclusive, IntPoint.encode(upperValue), upperInclusive) {
       @Override
       protected String toString(byte[] value) {
-        return IntPoint.decodeDimension(value).toString();
+        return IntPoint.decodeDimension(value, 0).toString();
       }
     };
   }
@@ -264,7 +264,7 @@ public class PointRangeQuery extends Query {
     return new PointRangeQuery(field, LongPoint.encode(lowerValue), lowerInclusive, LongPoint.encode(upperValue), upperInclusive) {
       @Override
       protected String toString(byte[] value) {
-        return LongPoint.decodeDimension(value).toString();
+        return LongPoint.decodeDimension(value, 0).toString();
       }
     };
   }
@@ -334,7 +334,7 @@ public class PointRangeQuery extends Query {
     return new PointRangeQuery(field, FloatPoint.encode(lowerValue), lowerInclusive, FloatPoint.encode(upperValue), upperInclusive) {
       @Override
       protected String toString(byte[] value) {
-        return FloatPoint.decodeDimension(value).toString();
+        return FloatPoint.decodeDimension(value, 0).toString();
       }
     };
   }
@@ -404,7 +404,7 @@ public class PointRangeQuery extends Query {
     return new PointRangeQuery(field, DoublePoint.encode(lowerValue), lowerInclusive, DoublePoint.encode(upperValue), upperInclusive) {
       @Override
       protected String toString(byte[] value) {
-        return DoublePoint.decodeDimension(value).toString();
+        return DoublePoint.decodeDimension(value, 0).toString();
       }
     };
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java b/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
index f472f5c..e0917eb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.search;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.index.IndexReader;
@@ -168,7 +167,7 @@ public abstract class ScoringRewrite<B> extends TermCollectingRewrite<B> {
     @Override
     public int[] init() {
       final int[] ord = super.init();
-      boost = new float[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_FLOAT)];
+      boost = new float[ArrayUtil.oversize(ord.length, Float.BYTES)];
       termState = new TermContext[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
       assert termState.length >= ord.length && boost.length >= ord.length;
       return ord;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java b/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
index a826d35..3ab5da2 100644
--- a/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
+++ b/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.util;
 
-
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Comparator;
@@ -248,7 +247,7 @@ public final class ArrayUtil {
   public static short[] grow(short[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      short[] newArray = new short[oversize(minSize, RamUsageEstimator.NUM_BYTES_SHORT)];
+      short[] newArray = new short[oversize(minSize, Short.BYTES)];
       System.arraycopy(array, 0, newArray, 0, array.length);
       return newArray;
     } else
@@ -262,7 +261,7 @@ public final class ArrayUtil {
   public static float[] grow(float[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      float[] newArray = new float[oversize(minSize, RamUsageEstimator.NUM_BYTES_FLOAT)];
+      float[] newArray = new float[oversize(minSize, Float.BYTES)];
       System.arraycopy(array, 0, newArray, 0, array.length);
       return newArray;
     } else
@@ -276,7 +275,7 @@ public final class ArrayUtil {
   public static double[] grow(double[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      double[] newArray = new double[oversize(minSize, RamUsageEstimator.NUM_BYTES_DOUBLE)];
+      double[] newArray = new double[oversize(minSize, Double.BYTES)];
       System.arraycopy(array, 0, newArray, 0, array.length);
       return newArray;
     } else
@@ -289,7 +288,7 @@ public final class ArrayUtil {
 
   public static short[] shrink(short[] array, int targetSize) {
     assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
-    final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_SHORT);
+    final int newSize = getShrinkSize(array.length, targetSize, Short.BYTES);
     if (newSize != array.length) {
       short[] newArray = new short[newSize];
       System.arraycopy(array, 0, newArray, 0, newSize);
@@ -301,7 +300,7 @@ public final class ArrayUtil {
   public static int[] grow(int[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      int[] newArray = new int[oversize(minSize, RamUsageEstimator.NUM_BYTES_INT)];
+      int[] newArray = new int[oversize(minSize, Integer.BYTES)];
       System.arraycopy(array, 0, newArray, 0, array.length);
       return newArray;
     } else
@@ -314,7 +313,7 @@ public final class ArrayUtil {
 
   public static int[] shrink(int[] array, int targetSize) {
     assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
-    final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_INT);
+    final int newSize = getShrinkSize(array.length, targetSize, Integer.BYTES);
     if (newSize != array.length) {
       int[] newArray = new int[newSize];
       System.arraycopy(array, 0, newArray, 0, newSize);
@@ -326,7 +325,7 @@ public final class ArrayUtil {
   public static long[] grow(long[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      long[] newArray = new long[oversize(minSize, RamUsageEstimator.NUM_BYTES_LONG)];
+      long[] newArray = new long[oversize(minSize, Long.BYTES)];
       System.arraycopy(array, 0, newArray, 0, array.length);
       return newArray;
     } else
@@ -339,7 +338,7 @@ public final class ArrayUtil {
 
   public static long[] shrink(long[] array, int targetSize) {
     assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
-    final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_LONG);
+    final int newSize = getShrinkSize(array.length, targetSize, Long.BYTES);
     if (newSize != array.length) {
       long[] newArray = new long[newSize];
       System.arraycopy(array, 0, newArray, 0, newSize);
@@ -401,7 +400,7 @@ public final class ArrayUtil {
   public static char[] grow(char[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      char[] newArray = new char[oversize(minSize, RamUsageEstimator.NUM_BYTES_CHAR)];
+      char[] newArray = new char[oversize(minSize, Character.BYTES)];
       System.arraycopy(array, 0, newArray, 0, array.length);
       return newArray;
     } else
@@ -414,7 +413,7 @@ public final class ArrayUtil {
 
   public static char[] shrink(char[] array, int targetSize) {
     assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
-    final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_CHAR);
+    final int newSize = getShrinkSize(array.length, targetSize, Character.BYTES);
     if (newSize != array.length) {
       char[] newArray = new char[newSize];
       System.arraycopy(array, 0, newArray, 0, newSize);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/util/BytesRefArray.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/BytesRefArray.java b/lucene/core/src/java/org/apache/lucene/util/BytesRefArray.java
index d7394c7..47ca52b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/BytesRefArray.java
+++ b/lucene/core/src/java/org/apache/lucene/util/BytesRefArray.java
@@ -51,8 +51,7 @@ public final class BytesRefArray {
     this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
         bytesUsed));
     pool.nextBuffer();
-    bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
-        + RamUsageEstimator.NUM_BYTES_INT);
+    bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER * Integer.BYTES);
     this.bytesUsed = bytesUsed;
   }
  
@@ -75,8 +74,7 @@ public final class BytesRefArray {
     if (lastElement >= offsets.length) {
       int oldLen = offsets.length;
       offsets = ArrayUtil.grow(offsets, offsets.length + 1);
-      bytesUsed.addAndGet((offsets.length - oldLen)
-          * RamUsageEstimator.NUM_BYTES_INT);
+      bytesUsed.addAndGet((offsets.length - oldLen) * Integer.BYTES);
     }
     pool.append(bytes);
     offsets[lastElement++] = currentOffset;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/util/BytesRefHash.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/BytesRefHash.java b/lucene/core/src/java/org/apache/lucene/util/BytesRefHash.java
index 149e8b7..25b74a6 100644
--- a/lucene/core/src/java/org/apache/lucene/util/BytesRefHash.java
+++ b/lucene/core/src/java/org/apache/lucene/util/BytesRefHash.java
@@ -90,7 +90,7 @@ public final class BytesRefHash {
     this.bytesStartArray = bytesStartArray;
     bytesStart = bytesStartArray.init();
     bytesUsed = bytesStartArray.bytesUsed() == null? Counter.newCounter() : bytesStartArray.bytesUsed();
-    bytesUsed.addAndGet(hashSize * RamUsageEstimator.NUM_BYTES_INT);
+    bytesUsed.addAndGet(hashSize * Integer.BYTES);
   }
 
   /**
@@ -213,7 +213,7 @@ public final class BytesRefHash {
       newSize /= 2;
     }
     if (newSize != hashSize) {
-      bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_INT * -(hashSize - newSize));
+      bytesUsed.addAndGet(Integer.BYTES * -(hashSize - newSize));
       hashSize = newSize;
       ids = new int[hashSize];
       Arrays.fill(ids, -1);
@@ -252,7 +252,7 @@ public final class BytesRefHash {
   public void close() {
     clear(true);
     ids = null;
-    bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_INT * -hashSize);
+    bytesUsed.addAndGet(Integer.BYTES * -hashSize);
   }
 
   /**
@@ -408,7 +408,7 @@ public final class BytesRefHash {
    */
   private void rehash(final int newSize, boolean hashOnData) {
     final int newMask = newSize - 1;
-    bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_INT * (newSize));
+    bytesUsed.addAndGet(Integer.BYTES * (newSize));
     final int[] newHash = new int[newSize];
     Arrays.fill(newHash, -1);
     for (int i = 0; i < hashSize; i++) {
@@ -449,7 +449,7 @@ public final class BytesRefHash {
     }
 
     hashMask = newMask;
-    bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_INT * (-ids.length));
+    bytesUsed.addAndGet(Integer.BYTES * (-ids.length));
     ids = newHash;
     hashSize = newSize;
     hashHalfSize = newSize / 2;
@@ -472,7 +472,7 @@ public final class BytesRefHash {
     
     if (ids == null) {
       ids = new int[hashSize];
-      bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_INT * hashSize);
+      bytesUsed.addAndGet(Integer.BYTES * hashSize);
     }
   }
 
@@ -570,8 +570,7 @@ public final class BytesRefHash {
 
     @Override
     public int[] init() {
-      return bytesStart = new int[ArrayUtil.oversize(initSize,
-          RamUsageEstimator.NUM_BYTES_INT)];
+      return bytesStart = new int[ArrayUtil.oversize(initSize, Integer.BYTES)];
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
index 31a9762..e8b8b98 100644
--- a/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.util;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.search.DocIdSet;
@@ -68,7 +67,7 @@ public final class DocIdSetBuilder {
   private void growBuffer(int minSize) {
     assert minSize < threshold;
     if (buffer.length < minSize) {
-      int nextSize = Math.min(threshold, ArrayUtil.oversize(minSize, RamUsageEstimator.NUM_BYTES_INT));
+      int nextSize = Math.min(threshold, ArrayUtil.oversize(minSize, Integer.BYTES));
       int[] newBuffer = new int[nextSize];
       System.arraycopy(buffer, 0, newBuffer, 0, buffer.length);
       buffer = newBuffer;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ca1a19b/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java b/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
index b66f468..58e25f3 100644
--- a/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
+++ b/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
@@ -155,16 +155,15 @@ public final class NumericUtils {
     return true;
   }
 
-  public static void intToBytes(int x, byte[] dest, int index) {
+  public static void intToBytes(int x, byte[] dest, int offset) {
     // Flip the sign bit, so negative ints sort before positive ints correctly:
     x ^= 0x80000000;
-    intToBytesDirect(x, dest, index);
+    intToBytesDirect(x, dest, offset);
   }
 
-  public static void intToBytesDirect(int x, byte[] dest, int index) {
-    // Flip the sign bit, so negative ints sort before positive ints correctly:
-    for(int i=0;i<4;i++) {
-      dest[4*index+i] = (byte) (x >> 24-i*8);
+  public static void intToBytesDirect(int x, byte[] dest, int offset) {
+    for (int i = 0; i < 4; i++) {
+      dest[offset+i] = (byte) (x >> 24-i*8);
     }
   }
 
@@ -174,22 +173,21 @@ public final class NumericUtils {
     return x ^ 0x80000000;
   }
 
-  public static int bytesToIntDirect(byte[] src, int index) {
+  public static int bytesToIntDirect(byte[] src, int offset) {
     int x = 0;
-    for(int i=0;i<4;i++) {
-      x |= (src[4*index+i] & 0xff) << (24-i*8);
+    for (int i = 0; i < 4; i++) {
+      x |= (src[offset+i] & 0xff) << (24-i*8);
     }
     return x;
   }
 
-  public static void longToBytes(long v, byte[] bytes, int dim) {
+  public static void longToBytes(long v, byte[] bytes, int offset) {
     // Flip the sign bit so negative longs sort before positive longs:
     v ^= 0x8000000000000000L;
-    longToBytesDirect(v, bytes, dim);
+    longToBytesDirect(v, bytes, offset);
   }
 
-  public static void longToBytesDirect(long v, byte[] bytes, int dim) {
-    int offset = 8 * dim;
+  public static void longToBytesDirect(long v, byte[] bytes, int offset) {
     bytes[offset] = (byte) (v >> 56);
     bytes[offset+1] = (byte) (v >> 48);
     bytes[offset+2] = (byte) (v >> 40);
@@ -200,15 +198,14 @@ public final class NumericUtils {
     bytes[offset+7] = (byte) v;
   }
 
-  public static long bytesToLong(byte[] bytes, int index) {
-    long v = bytesToLongDirect(bytes, index);
+  public static long bytesToLong(byte[] bytes, int offset) {
+    long v = bytesToLongDirect(bytes, offset);
     // Flip the sign bit back
     v ^= 0x8000000000000000L;
     return v;
   }
 
-  public static long bytesToLongDirect(byte[] bytes, int index) {
-    int offset = 8 * index;
+  public static long bytesToLongDirect(byte[] bytes, int offset) {
     long v = ((bytes[offset] & 0xffL) << 56) |
       ((bytes[offset+1] & 0xffL) << 48) |
       ((bytes[offset+2] & 0xffL) << 40) |