You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2017/07/04 17:59:57 UTC

[07/23] lucene-solr:jira/solr-10879: Changes for version upgrade to 8.0 on master for the 7.0 release

Changes for version upgrade to 8.0 on master for the 7.0 release


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/189f2b3f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/189f2b3f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/189f2b3f

Branch: refs/heads/jira/solr-10879
Commit: 189f2b3f6e0dfb3c6f8e14a1a66c7bbaa4000970
Parents: 93fbfc2
Author: Anshum Gupta <an...@apple.com>
Authored: Fri Jun 30 11:18:28 2017 -0700
Committer: Anshum Gupta <an...@apple.com>
Committed: Fri Jun 30 11:18:28 2017 -0700

----------------------------------------------------------------------
 .../analysis/custom/TestCustomAnalyzer.java     |    8 +-
 .../lucene50/Lucene50SegmentInfoFormat.java     |   90 -
 .../apache/lucene/codecs/lucene50/package.html  |   25 -
 .../codecs/lucene53/Lucene53NormsFormat.java    |   91 -
 .../codecs/lucene53/Lucene53NormsProducer.java  |  236 ---
 .../lucene/codecs/lucene53/package-info.java    |   23 -
 .../lucene54/Lucene54DocValuesConsumer.java     |  797 --------
 .../lucene54/Lucene54DocValuesFormat.java       |  186 --
 .../lucene54/Lucene54DocValuesProducer.java     | 1803 ------------------
 .../lucene/codecs/lucene54/package-info.java    |  403 ----
 .../lucene50/Lucene50RWSegmentInfoFormat.java   |  125 --
 .../lucene50/TestLucene50SegmentInfoFormat.java |   50 -
 .../codecs/lucene53/Lucene53NormsConsumer.java  |  159 --
 .../codecs/lucene53/Lucene53RWNormsFormat.java  |   31 -
 .../lucene53/TestLucene53NormsFormat.java       |   44 -
 .../lucene54/TestLucene54DocValuesFormat.java   |  640 -------
 .../index/IndexFormatTooOldException.java       |    2 +-
 .../java/org/apache/lucene/util/Version.java    |   77 -
 .../org/apache/lucene/util/TestVersion.java     |   71 +-
 19 files changed, 41 insertions(+), 4820 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/analysis/common/src/test/org/apache/lucene/analysis/custom/TestCustomAnalyzer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/custom/TestCustomAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/custom/TestCustomAnalyzer.java
index d929bfd..d9ea43c 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/custom/TestCustomAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/custom/TestCustomAnalyzer.java
@@ -107,7 +107,7 @@ public class TestCustomAnalyzer extends BaseTokenStreamTestCase {
 
   public void testFactoryHtmlStripClassicFolding() throws Exception {
     CustomAnalyzer a = CustomAnalyzer.builder()
-        .withDefaultMatchVersion(Version.LUCENE_6_0_0)
+        .withDefaultMatchVersion(Version.LUCENE_7_0_0)
         .addCharFilter(HTMLStripCharFilterFactory.class)
         .withTokenizer(ClassicTokenizerFactory.class)
         .addTokenFilter(ASCIIFoldingFilterFactory.class, "preserveOriginal", "true")
@@ -126,7 +126,7 @@ public class TestCustomAnalyzer extends BaseTokenStreamTestCase {
     assertSame(LowerCaseFilterFactory.class, tokenFilters.get(1).getClass());
     assertEquals(100, a.getPositionIncrementGap("dummy"));
     assertEquals(1000, a.getOffsetGap("dummy"));
-    assertSame(Version.LUCENE_6_0_0, a.getVersion());
+    assertSame(Version.LUCENE_7_0_0, a.getVersion());
 
     assertAnalyzesTo(a, "<p>foo bar</p> FOO BAR", 
         new String[] { "foo", "bar", "foo", "bar" },
@@ -139,7 +139,7 @@ public class TestCustomAnalyzer extends BaseTokenStreamTestCase {
   
   public void testHtmlStripClassicFolding() throws Exception {
     CustomAnalyzer a = CustomAnalyzer.builder()
-        .withDefaultMatchVersion(Version.LUCENE_6_0_0)
+        .withDefaultMatchVersion(Version.LUCENE_7_0_0)
         .addCharFilter("htmlstrip")
         .withTokenizer("classic")
         .addTokenFilter("asciifolding", "preserveOriginal", "true")
@@ -158,7 +158,7 @@ public class TestCustomAnalyzer extends BaseTokenStreamTestCase {
     assertSame(LowerCaseFilterFactory.class, tokenFilters.get(1).getClass());
     assertEquals(100, a.getPositionIncrementGap("dummy"));
     assertEquals(1000, a.getOffsetGap("dummy"));
-    assertSame(Version.LUCENE_6_0_0, a.getVersion());
+    assertSame(Version.LUCENE_7_0_0, a.getVersion());
 
     assertAnalyzesTo(a, "<p>foo bar</p> FOO BAR", 
         new String[] { "foo", "bar", "foo", "bar" },

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
deleted file mode 100644
index d2a384e..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.SegmentInfoFormat;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentInfo; // javadocs
-import org.apache.lucene.store.ChecksumIndexInput;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.util.Version;
-
-/**
- * Lucene 5.0 Segment info format.
- * @deprecated Only for reading old 5.0-6.0 segments
- */
-@Deprecated
-public class Lucene50SegmentInfoFormat extends SegmentInfoFormat {
-
-  /** Sole constructor. */
-  public Lucene50SegmentInfoFormat() {
-  }
-  
-  @Override
-  public SegmentInfo read(Directory dir, String segment, byte[] segmentID, IOContext context) throws IOException {
-    final String fileName = IndexFileNames.segmentFileName(segment, "", Lucene50SegmentInfoFormat.SI_EXTENSION);
-    try (ChecksumIndexInput input = dir.openChecksumInput(fileName, context)) {
-      Throwable priorE = null;
-      SegmentInfo si = null;
-      try {
-        CodecUtil.checkIndexHeader(input, Lucene50SegmentInfoFormat.CODEC_NAME,
-                                          Lucene50SegmentInfoFormat.VERSION_START,
-                                          Lucene50SegmentInfoFormat.VERSION_CURRENT,
-                                          segmentID, "");
-        final Version version = Version.fromBits(input.readInt(), input.readInt(), input.readInt());
-        
-        final int docCount = input.readInt();
-        if (docCount < 0) {
-          throw new CorruptIndexException("invalid docCount: " + docCount, input);
-        }
-        final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
-        
-        final Map<String,String> diagnostics = input.readMapOfStrings();
-        final Set<String> files = input.readSetOfStrings();
-        final Map<String,String> attributes = input.readMapOfStrings();
-        
-        si = new SegmentInfo(dir, version, null, segment, docCount, isCompoundFile, null, diagnostics, segmentID, attributes, null);
-        si.setFiles(files);
-      } catch (Throwable exception) {
-        priorE = exception;
-      } finally {
-        CodecUtil.checkFooter(input, priorE);
-      }
-      return si;
-    }
-  }
-
-  @Override
-  public void write(Directory dir, SegmentInfo si, IOContext ioContext) throws IOException {
-    throw new UnsupportedOperationException("this codec can only be used for reading");
-  }
-
-  /** File extension used to store {@link SegmentInfo}. */
-  public final static String SI_EXTENSION = "si";
-  static final String CODEC_NAME = "Lucene50SegmentInfo";
-  static final int VERSION_SAFE_MAPS = 1;
-  static final int VERSION_START = VERSION_SAFE_MAPS;
-  static final int VERSION_CURRENT = VERSION_SAFE_MAPS;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/package.html
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/package.html b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/package.html
deleted file mode 100644
index 352cef4..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/package.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html>
-<head>
-   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-</head>
-<body>
-Lucene 5.0 file format.
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsFormat.java
deleted file mode 100644
index 1f7928f..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsFormat.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene53;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.NormsConsumer;
-import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.NormsProducer;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.store.DataOutput;
-
-/**
- * Lucene 5.3 Score normalization format.
- * <p>
- * Encodes normalization values by encoding each value with the minimum
- * number of bytes needed to represent the range (which can be zero).
- * <p>
- * Files:
- * <ol>
- *   <li><tt>.nvd</tt>: Norms data</li>
- *   <li><tt>.nvm</tt>: Norms metadata</li>
- * </ol>
- * <ol>
- *   <li><a name="nvm"></a>
- *   <p>The Norms metadata or .nvm file.</p>
- *   <p>For each norms field, this stores metadata, such as the offset into the 
- *      Norms data (.nvd)</p>
- *   <p>Norms metadata (.dvm) --&gt; Header,&lt;Entry&gt;<sup>NumFields</sup>,Footer</p>
- *   <ul>
- *     <li>Header --&gt; {@link CodecUtil#writeIndexHeader IndexHeader}</li>
- *     <li>Entry --&gt; FieldNumber,BytesPerValue, Address</li>
- *     <li>FieldNumber --&gt; {@link DataOutput#writeVInt vInt}</li>
- *     <li>BytesPerValue --&gt; {@link DataOutput#writeByte byte}</li>
- *     <li>Offset --&gt; {@link DataOutput#writeLong Int64}</li>
- *     <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li>
- *   </ul>
- *   <p>FieldNumber of -1 indicates the end of metadata.</p>
- *   <p>Offset is the pointer to the start of the data in the norms data (.nvd), or the singleton value 
- *      when BytesPerValue = 0</p>
- *   <li><a name="nvd"></a>
- *   <p>The Norms data or .nvd file.</p>
- *   <p>For each Norms field, this stores the actual per-document data (the heavy-lifting)</p>
- *   <p>Norms data (.nvd) --&gt; Header,&lt; Data &gt;<sup>NumFields</sup>,Footer</p>
- *   <ul>
- *     <li>Header --&gt; {@link CodecUtil#writeIndexHeader IndexHeader}</li>
- *     <li>Data --&gt; {@link DataOutput#writeByte(byte) byte}<sup>MaxDoc * BytesPerValue</sup></li>
- *     <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li>
- *   </ul>
- * </ol>
- * @lucene.experimental
- */
-public class Lucene53NormsFormat extends NormsFormat {
-
-  /** Sole Constructor */
-  public Lucene53NormsFormat() {}
-  
-  @Override
-  public NormsConsumer normsConsumer(SegmentWriteState state) throws IOException {
-    throw new UnsupportedOperationException("This format can only be used for reading");
-  }
-
-  @Override
-  public NormsProducer normsProducer(SegmentReadState state) throws IOException {
-    return new Lucene53NormsProducer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION);
-  }
-  
-  static final String DATA_CODEC = "Lucene53NormsData";
-  static final String DATA_EXTENSION = "nvd";
-  static final String METADATA_CODEC = "Lucene53NormsMetadata";
-  static final String METADATA_EXTENSION = "nvm";
-  static final int VERSION_START = 0;
-  static final int VERSION_CURRENT = VERSION_START;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsProducer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsProducer.java
deleted file mode 100644
index 718fcd6..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53NormsProducer.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene53;
-
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.NormsProducer;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.store.ChecksumIndexInput;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.RandomAccessInput;
-import org.apache.lucene.util.IOUtils;
-
-import static org.apache.lucene.codecs.lucene53.Lucene53NormsFormat.VERSION_CURRENT;
-import static org.apache.lucene.codecs.lucene53.Lucene53NormsFormat.VERSION_START;
-
-/**
- * Reader for {@link Lucene53NormsFormat}
- */
-class Lucene53NormsProducer extends NormsProducer {
-  // metadata maps (just file pointers and minimal stuff)
-  private final Map<Integer,NormsEntry> norms = new HashMap<>();
-  private final IndexInput data;
-  private final int maxDoc;
-
-  Lucene53NormsProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
-    maxDoc = state.segmentInfo.maxDoc();
-    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
-    int version = -1;
-
-    // read in the entries from the metadata file.
-    try (ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context)) {
-      Throwable priorE = null;
-      try {
-        version = CodecUtil.checkIndexHeader(in, metaCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
-        readFields(in, state.fieldInfos);
-      } catch (Throwable exception) {
-        priorE = exception;
-      } finally {
-        CodecUtil.checkFooter(in, priorE);
-      }
-    }
-
-    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
-    data = state.directory.openInput(dataName, state.context);
-    boolean success = false;
-    try {
-      final int version2 = CodecUtil.checkIndexHeader(data, dataCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
-      if (version != version2) {
-        throw new CorruptIndexException("Format versions mismatch: meta=" + version + ",data=" + version2, data);
-      }
-
-      // NOTE: data file is too costly to verify checksum against all the bytes on open,
-      // but for now we at least verify proper structure of the checksum footer: which looks
-      // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
-      // such as file truncation.
-      CodecUtil.retrieveChecksum(data);
-
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(this.data);
-      }
-    }
-  }
-
-  private void readFields(IndexInput meta, FieldInfos infos) throws IOException {
-    int fieldNumber = meta.readVInt();
-    while (fieldNumber != -1) {
-      FieldInfo info = infos.fieldInfo(fieldNumber);
-      if (info == null) {
-        throw new CorruptIndexException("Invalid field number: " + fieldNumber, meta);
-      } else if (!info.hasNorms()) {
-        throw new CorruptIndexException("Invalid field: " + info.name, meta);
-      }
-      NormsEntry entry = new NormsEntry();
-      entry.bytesPerValue = meta.readByte();
-      switch (entry.bytesPerValue) {
-        case 0: case 1: case 2: case 4: case 8:
-          break;
-        default:
-          throw new CorruptIndexException("Invalid bytesPerValue: " + entry.bytesPerValue + ", field: " + info.name, meta);
-      }
-      entry.offset = meta.readLong();
-      norms.put(info.number, entry);
-      fieldNumber = meta.readVInt();
-    }
-  }
-
-  @Override
-  public NumericDocValues getNorms(FieldInfo field) throws IOException {
-    final NormsEntry entry = norms.get(field.number);
-
-    if (entry.bytesPerValue == 0) {
-      final long value = entry.offset;
-      return new NormsIterator(maxDoc) {
-          @Override
-          public long longValue() {
-            return value;
-          }
-        };
-    } else {
-      RandomAccessInput slice;
-      synchronized (data) {
-        switch (entry.bytesPerValue) {
-        case 1: 
-          slice = data.randomAccessSlice(entry.offset, maxDoc);
-          return new NormsIterator(maxDoc) {
-            @Override
-            public long longValue() throws IOException {
-              return slice.readByte(docID);
-            }
-          };
-        case 2: 
-          slice = data.randomAccessSlice(entry.offset, maxDoc * 2L);
-          return new NormsIterator(maxDoc) {
-            @Override
-            public long longValue() throws IOException {
-              return slice.readShort(((long)docID) << 1L);
-            }
-          };
-        case 4: 
-          slice = data.randomAccessSlice(entry.offset, maxDoc * 4L);
-          return new NormsIterator(maxDoc) {
-            @Override
-            public long longValue() throws IOException {
-              return slice.readInt(((long)docID) << 2L);
-            }
-          };
-        case 8: 
-          slice = data.randomAccessSlice(entry.offset, maxDoc * 8L);
-          return new NormsIterator(maxDoc) {
-            @Override
-            public long longValue() throws IOException {
-              return slice.readLong(((long)docID) << 3L);
-            }
-          };
-        default:
-          throw new AssertionError();
-        }
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    data.close();
-  }
-
-  @Override
-  public long ramBytesUsed() {
-    return 64L * norms.size(); // good enough
-  }
-
-  @Override
-  public void checkIntegrity() throws IOException {
-    CodecUtil.checksumEntireFile(data);
-  }
-
-  static class NormsEntry {
-    byte bytesPerValue;
-    long offset;
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName() + "(fields=" + norms.size() + ")";
-  }
-
-  private static abstract class NormsIterator extends NumericDocValues {
-    private final int maxDoc;
-    protected int docID = -1;
-  
-    public NormsIterator(int maxDoc) {
-      this.maxDoc = maxDoc;
-    }
-
-    @Override
-    public int docID() {
-      return docID;
-    }
-
-    @Override
-    public int nextDoc() {
-      docID++;
-      if (docID == maxDoc) {
-        docID = NO_MORE_DOCS;
-      }
-      return docID;
-    }
-
-    @Override
-    public int advance(int target) {
-      docID = target;
-      if (docID >= maxDoc) {
-        docID = NO_MORE_DOCS;
-      }
-      return docID;
-    }
-
-    @Override
-    public boolean advanceExact(int target) throws IOException {
-      docID = target;
-      return true;
-    }
-
-    @Override
-    public long cost() {
-      // TODO
-      return 0;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package-info.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package-info.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package-info.java
deleted file mode 100644
index 93fefb8..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Components from the Lucene 5.3 index format
- * See {@link org.apache.lucene.codecs.lucene53} for an overview
- * of the index format.
- */
-package org.apache.lucene.codecs.lucene53;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesConsumer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesConsumer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesConsumer.java
deleted file mode 100644
index 217cd43..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesConsumer.java
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene54;
-
-
-import java.io.Closeable; // javadocs
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.stream.StreamSupport;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.DocValuesConsumer;
-import org.apache.lucene.codecs.DocValuesProducer;
-import org.apache.lucene.codecs.LegacyDocValuesIterables;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMOutputStream;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LongsRef;
-import org.apache.lucene.util.MathUtil;
-import org.apache.lucene.util.PagedBytes.PagedBytesDataInput;
-import org.apache.lucene.util.PagedBytes;
-import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util.packed.DirectMonotonicWriter;
-import org.apache.lucene.util.packed.DirectWriter;
-import org.apache.lucene.util.packed.MonotonicBlockPackedWriter;
-import org.apache.lucene.util.packed.PackedInts;
-
-import static org.apache.lucene.codecs.lucene54.Lucene54DocValuesFormat.*;
-
-/** writer for {@link Lucene54DocValuesFormat} */
-final class Lucene54DocValuesConsumer extends DocValuesConsumer implements Closeable {
-
-  enum NumberType {
-    /** Dense ordinals */
-    ORDINAL,
-    /** Random long values */
-    VALUE;
-  }
-
-  IndexOutput data, meta;
-  final int maxDoc;
-  
-  /** expert: Creates a new writer */
-  public Lucene54DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
-    boolean success = false;
-    try {
-      String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
-      data = state.directory.createOutput(dataName, state.context);
-      CodecUtil.writeIndexHeader(data, dataCodec, Lucene54DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
-      String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
-      meta = state.directory.createOutput(metaName, state.context);
-      CodecUtil.writeIndexHeader(meta, metaCodec, Lucene54DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
-      maxDoc = state.segmentInfo.maxDoc();
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(this);
-      }
-    }
-  }
-  
-  @Override
-  public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
-    addNumericField(field, LegacyDocValuesIterables.numericIterable(field, valuesProducer, maxDoc), NumberType.VALUE);
-  }
-
-  void addNumericField(FieldInfo field, Iterable<Number> values, NumberType numberType) throws IOException {
-    long count = 0;
-    long minValue = Long.MAX_VALUE;
-    long maxValue = Long.MIN_VALUE;
-    long gcd = 0;
-    long missingCount = 0;
-    long zeroCount = 0;
-    // TODO: more efficient?
-    HashSet<Long> uniqueValues = null;
-    long missingOrdCount = 0;
-    if (numberType == NumberType.VALUE) {
-      uniqueValues = new HashSet<>();
-
-      for (Number nv : values) {
-        final long v;
-        if (nv == null) {
-          v = 0;
-          missingCount++;
-          zeroCount++;
-        } else {
-          v = nv.longValue();
-          if (v == 0) {
-            zeroCount++;
-          }
-        }
-
-        if (gcd != 1) {
-          if (v < Long.MIN_VALUE / 2 || v > Long.MAX_VALUE / 2) {
-            // in that case v - minValue might overflow and make the GCD computation return
-            // wrong results. Since these extreme values are unlikely, we just discard
-            // GCD computation for them
-            gcd = 1;
-          } else if (count != 0) { // minValue needs to be set first
-            gcd = MathUtil.gcd(gcd, v - minValue);
-          }
-        }
-
-        minValue = Math.min(minValue, v);
-        maxValue = Math.max(maxValue, v);
-
-        if (uniqueValues != null) {
-          if (uniqueValues.add(v)) {
-            if (uniqueValues.size() > 256) {
-              uniqueValues = null;
-            }
-          }
-        }
-
-        ++count;
-      }
-    } else {
-      for (Number nv : values) {
-        long v = nv.longValue();
-        if (v == -1L) {
-          missingOrdCount++;
-        }
-        minValue = Math.min(minValue, v);
-        maxValue = Math.max(maxValue, v);
-        ++count;
-      }
-    }
-    
-    final long delta = maxValue - minValue;
-    final int deltaBitsRequired = DirectWriter.unsignedBitsRequired(delta);
-    final int tableBitsRequired = uniqueValues == null
-        ? Integer.MAX_VALUE
-        : DirectWriter.bitsRequired(uniqueValues.size() - 1);
-
-    final boolean sparse; // 1% of docs or less have a value
-    switch (numberType) {
-      case VALUE:
-        sparse = (double) missingCount / count >= 0.99;
-        break;
-      case ORDINAL:
-        sparse = (double) missingOrdCount / count >= 0.99;
-        break;
-      default:
-        throw new AssertionError();
-    }
-
-    final int format;
-    if (uniqueValues != null 
-        && count <= Integer.MAX_VALUE
-        && (uniqueValues.size() == 1
-           || (uniqueValues.size() == 2 && missingCount > 0 && zeroCount == missingCount))) {
-      // either one unique value C or two unique values: "missing" and C
-      format = CONST_COMPRESSED;
-    } else if (sparse && count >= 1024) {
-      // require at least 1024 docs to avoid flipping back and forth when doing NRT search
-      format = SPARSE_COMPRESSED;
-    } else if (uniqueValues != null && tableBitsRequired < deltaBitsRequired) {
-      format = TABLE_COMPRESSED;
-    } else if (gcd != 0 && gcd != 1) {
-      final long gcdDelta = (maxValue - minValue) / gcd;
-      final long gcdBitsRequired = DirectWriter.unsignedBitsRequired(gcdDelta);
-      format = gcdBitsRequired < deltaBitsRequired ? GCD_COMPRESSED : DELTA_COMPRESSED;
-    } else {
-      format = DELTA_COMPRESSED;
-    }
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.NUMERIC);
-    meta.writeVInt(format);
-    if (format == SPARSE_COMPRESSED) {
-      meta.writeLong(data.getFilePointer());
-      final long numDocsWithValue;
-      switch (numberType) {
-        case VALUE:
-          numDocsWithValue = count - missingCount;
-          break;
-        case ORDINAL:
-          numDocsWithValue = count - missingOrdCount;
-          break;
-        default:
-          throw new AssertionError();
-      }
-      final long maxDoc = writeSparseMissingBitset(values, numberType, numDocsWithValue);
-      assert maxDoc == count;
-    } else if (missingCount == 0) {
-      meta.writeLong(ALL_LIVE);
-    } else if (missingCount == count) {
-      meta.writeLong(ALL_MISSING);
-    } else {
-      meta.writeLong(data.getFilePointer());
-      writeMissingBitset(values);
-    }
-    meta.writeLong(data.getFilePointer());
-    meta.writeVLong(count);
-
-    switch (format) {
-      case CONST_COMPRESSED:
-        // write the constant (nonzero value in the n=2 case, singleton value otherwise)
-        meta.writeLong(minValue < 0 ? Collections.min(uniqueValues) : Collections.max(uniqueValues));
-        break;
-      case GCD_COMPRESSED:
-        meta.writeLong(minValue);
-        meta.writeLong(gcd);
-        final long maxDelta = (maxValue - minValue) / gcd;
-        final int bits = DirectWriter.unsignedBitsRequired(maxDelta);
-        meta.writeVInt(bits);
-        final DirectWriter quotientWriter = DirectWriter.getInstance(data, count, bits);
-        for (Number nv : values) {
-          long value = nv == null ? 0 : nv.longValue();
-          quotientWriter.add((value - minValue) / gcd);
-        }
-        quotientWriter.finish();
-        break;
-      case DELTA_COMPRESSED:
-        final long minDelta = delta < 0 ? 0 : minValue;
-        meta.writeLong(minDelta);
-        meta.writeVInt(deltaBitsRequired);
-        final DirectWriter writer = DirectWriter.getInstance(data, count, deltaBitsRequired);
-        for (Number nv : values) {
-          long v = nv == null ? 0 : nv.longValue();
-          writer.add(v - minDelta);
-        }
-        writer.finish();
-        break;
-      case TABLE_COMPRESSED:
-        final Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]);
-        Arrays.sort(decode);
-        final HashMap<Long,Integer> encode = new HashMap<>();
-        meta.writeVInt(decode.length);
-        for (int i = 0; i < decode.length; i++) {
-          meta.writeLong(decode[i]);
-          encode.put(decode[i], i);
-        }
-        meta.writeVInt(tableBitsRequired);
-        final DirectWriter ordsWriter = DirectWriter.getInstance(data, count, tableBitsRequired);
-        for (Number nv : values) {
-          ordsWriter.add(encode.get(nv == null ? 0 : nv.longValue()));
-        }
-        ordsWriter.finish();
-        break;
-      case SPARSE_COMPRESSED:
-        final Iterable<Number> filteredMissingValues;
-        switch (numberType) {
-          case VALUE:
-            meta.writeByte((byte) 0);
-            filteredMissingValues = new Iterable<Number>() {
-              @Override
-              public Iterator<Number> iterator() {
-                return StreamSupport
-                    .stream(values.spliterator(), false)
-                    .filter(value -> value != null)
-                    .iterator();
-              }
-            };
-            break;
-          case ORDINAL:
-            meta.writeByte((byte) 1);
-            filteredMissingValues = new Iterable<Number>() {
-              @Override
-              public Iterator<Number> iterator() {
-                return StreamSupport
-                    .stream(values.spliterator(), false)
-                    .filter(value -> value.longValue() != -1L)
-                    .iterator();
-              }
-            };
-            break;
-          default:
-            throw new AssertionError();
-        }
-        // Write non-missing values as a numeric field
-        addNumericField(field, filteredMissingValues, numberType);
-        break;
-      default:
-        throw new AssertionError();
-    }
-    meta.writeLong(data.getFilePointer());
-  }
-  
-  // TODO: in some cases representing missing with minValue-1 wouldn't take up additional space and so on,
-  // but this is very simple, and algorithms only check this for values of 0 anyway (doesnt slow down normal decode)
-  void writeMissingBitset(Iterable<?> values) throws IOException {
-    byte bits = 0;
-    int count = 0;
-    for (Object v : values) {
-      if (count == 8) {
-        data.writeByte(bits);
-        count = 0;
-        bits = 0;
-      }
-      if (v != null) {
-        bits |= 1 << (count & 7);
-      }
-      count++;
-    }
-    if (count > 0) {
-      data.writeByte(bits);
-    }
-  }
-
-  long writeSparseMissingBitset(Iterable<Number> values, NumberType numberType, long numDocsWithValue) throws IOException {
-    meta.writeVLong(numDocsWithValue);
-
-    // Write doc IDs that have a value
-    meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT);
-    final DirectMonotonicWriter docIdsWriter = DirectMonotonicWriter.getInstance(meta, data, numDocsWithValue, DIRECT_MONOTONIC_BLOCK_SHIFT);
-    long docID = 0;
-    for (Number nv : values) {
-      switch (numberType) {
-        case VALUE:
-          if (nv != null) {
-            docIdsWriter.add(docID);
-          }
-          break;
-        case ORDINAL:
-          if (nv.longValue() != -1L) {
-            docIdsWriter.add(docID);
-          }
-          break;
-        default:
-          throw new AssertionError();
-      }
-      docID++;
-    }
-    docIdsWriter.finish();
-    return docID;
-  }
-
-  @Override
-  public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
-    addBinaryField(field, LegacyDocValuesIterables.binaryIterable(field, valuesProducer, maxDoc));
-  }
-  
-  private void addBinaryField(FieldInfo field, Iterable<BytesRef> values) throws IOException {
-    // write the byte[] data
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.BINARY);
-    int minLength = Integer.MAX_VALUE;
-    int maxLength = Integer.MIN_VALUE;
-    final long startFP = data.getFilePointer();
-    long count = 0;
-    long missingCount = 0;
-    for(BytesRef v : values) {
-      final int length;
-      if (v == null) {
-        length = 0;
-        missingCount++;
-      } else {
-        length = v.length;
-      }
-      minLength = Math.min(minLength, length);
-      maxLength = Math.max(maxLength, length);
-      if (v != null) {
-        data.writeBytes(v.bytes, v.offset, v.length);
-      }
-      count++;
-    }
-    meta.writeVInt(minLength == maxLength ? BINARY_FIXED_UNCOMPRESSED : BINARY_VARIABLE_UNCOMPRESSED);
-    if (missingCount == 0) {
-      meta.writeLong(ALL_LIVE);
-    } else if (missingCount == count) {
-      meta.writeLong(ALL_MISSING);
-    } else {
-      meta.writeLong(data.getFilePointer());
-      writeMissingBitset(values);
-    }
-    meta.writeVInt(minLength);
-    meta.writeVInt(maxLength);
-    meta.writeVLong(count);
-    meta.writeLong(startFP);
-    
-    // if minLength == maxLength, it's a fixed-length byte[], we are done (the addresses are implicit)
-    // otherwise, we need to record the length fields...
-    if (minLength != maxLength) {
-      meta.writeLong(data.getFilePointer());
-      meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT);
-
-      final DirectMonotonicWriter writer = DirectMonotonicWriter.getInstance(meta, data, count + 1, DIRECT_MONOTONIC_BLOCK_SHIFT);
-      long addr = 0;
-      writer.add(addr);
-      for (BytesRef v : values) {
-        if (v != null) {
-          addr += v.length;
-        }
-        writer.add(addr);
-      }
-      writer.finish();
-      meta.writeLong(data.getFilePointer());
-    }
-  }
-  
-  /** expert: writes a value dictionary for a sorted/sortedset field */
-  private void addTermsDict(FieldInfo field, final Iterable<BytesRef> values) throws IOException {
-    // first check if it's a "fixed-length" terms dict, and compressibility if so
-    int minLength = Integer.MAX_VALUE;
-    int maxLength = Integer.MIN_VALUE;
-    long numValues = 0;
-    BytesRefBuilder previousValue = new BytesRefBuilder();
-    long prefixSum = 0; // only valid for fixed-width data, as we have a choice there
-    for (BytesRef v : values) {
-      minLength = Math.min(minLength, v.length);
-      maxLength = Math.max(maxLength, v.length);
-      if (minLength == maxLength) {
-        int termPosition = (int) (numValues & INTERVAL_MASK);
-        if (termPosition == 0) {
-          // first term in block, save it away to compare against the last term later
-          previousValue.copyBytes(v);
-        } else if (termPosition == INTERVAL_COUNT - 1) {
-          // last term in block, accumulate shared prefix against first term
-          prefixSum += StringHelper.bytesDifference(previousValue.get(), v);
-        }
-      }
-      numValues++;
-    }
-    // for fixed width data, look at the avg(shared prefix) before deciding how to encode:
-    // prefix compression "costs" worst case 2 bytes per term because we must store suffix lengths.
-    // so if we share at least 3 bytes on average, always compress.
-    if (minLength == maxLength && prefixSum <= 3*(numValues >> INTERVAL_SHIFT)) {
-      // no index needed: not very compressible, direct addressing by mult
-      addBinaryField(field, values);
-    } else if (numValues < REVERSE_INTERVAL_COUNT) {
-      // low cardinality: waste a few KB of ram, but can't really use fancy index etc
-      addBinaryField(field, values);
-    } else {
-      assert numValues > 0; // we don't have to handle the empty case
-      // header
-      meta.writeVInt(field.number);
-      meta.writeByte(Lucene54DocValuesFormat.BINARY);
-      meta.writeVInt(BINARY_PREFIX_COMPRESSED);
-      meta.writeLong(-1L);
-      // now write the bytes: sharing prefixes within a block
-      final long startFP = data.getFilePointer();
-      // currently, we have to store the delta from expected for every 1/nth term
-      // we could avoid this, but it's not much and less overall RAM than the previous approach!
-      RAMOutputStream addressBuffer = new RAMOutputStream();
-      MonotonicBlockPackedWriter termAddresses = new MonotonicBlockPackedWriter(addressBuffer, MONOTONIC_BLOCK_SIZE);
-      // buffers up 16 terms
-      RAMOutputStream bytesBuffer = new RAMOutputStream();
-      // buffers up block header
-      RAMOutputStream headerBuffer = new RAMOutputStream();
-      BytesRefBuilder lastTerm = new BytesRefBuilder();
-      lastTerm.grow(maxLength);
-      long count = 0;
-      int suffixDeltas[] = new int[INTERVAL_COUNT];
-      for (BytesRef v : values) {
-        int termPosition = (int) (count & INTERVAL_MASK);
-        if (termPosition == 0) {
-          termAddresses.add(data.getFilePointer() - startFP);
-          // abs-encode first term
-          headerBuffer.writeVInt(v.length);
-          headerBuffer.writeBytes(v.bytes, v.offset, v.length);
-          lastTerm.copyBytes(v);
-        } else {
-          // prefix-code: we only share at most 255 characters, to encode the length as a single
-          // byte and have random access. Larger terms just get less compression.
-          int sharedPrefix = Math.min(255, StringHelper.bytesDifference(lastTerm.get(), v));
-          bytesBuffer.writeByte((byte) sharedPrefix);
-          bytesBuffer.writeBytes(v.bytes, v.offset + sharedPrefix, v.length - sharedPrefix);
-          // we can encode one smaller, because terms are unique.
-          suffixDeltas[termPosition] = v.length - sharedPrefix - 1;
-        }
-        
-        count++;
-        // flush block
-        if ((count & INTERVAL_MASK) == 0) {
-          flushTermsDictBlock(headerBuffer, bytesBuffer, suffixDeltas);
-        }
-      }
-      // flush trailing crap
-      int leftover = (int) (count & INTERVAL_MASK);
-      if (leftover > 0) {
-        Arrays.fill(suffixDeltas, leftover, suffixDeltas.length, 0);
-        flushTermsDictBlock(headerBuffer, bytesBuffer, suffixDeltas);
-      }
-      final long indexStartFP = data.getFilePointer();
-      // write addresses of indexed terms
-      termAddresses.finish();
-      addressBuffer.writeTo(data);
-      addressBuffer = null;
-      termAddresses = null;
-      meta.writeVInt(minLength);
-      meta.writeVInt(maxLength);
-      meta.writeVLong(count);
-      meta.writeLong(startFP);
-      meta.writeLong(indexStartFP);
-      meta.writeVInt(PackedInts.VERSION_CURRENT);
-      meta.writeVInt(MONOTONIC_BLOCK_SIZE);
-      addReverseTermIndex(field, values, maxLength);
-    }
-  }
-  // writes term dictionary "block"
-  // first term is absolute encoded as vint length + bytes.
-  // lengths of subsequent N terms are encoded as either N bytes or N shorts.
-  // in the double-byte case, the first byte is indicated with -1.
-  // subsequent terms are encoded as byte suffixLength + bytes.
-  private void flushTermsDictBlock(RAMOutputStream headerBuffer, RAMOutputStream bytesBuffer, int suffixDeltas[]) throws IOException {
-    boolean twoByte = false;
-    for (int i = 1; i < suffixDeltas.length; i++) {
-      if (suffixDeltas[i] > 254) {
-        twoByte = true;
-      }
-    }
-    if (twoByte) {
-      headerBuffer.writeByte((byte)255);
-      for (int i = 1; i < suffixDeltas.length; i++) {
-        headerBuffer.writeShort((short) suffixDeltas[i]);
-      }
-    } else {
-      for (int i = 1; i < suffixDeltas.length; i++) {
-        headerBuffer.writeByte((byte) suffixDeltas[i]);
-      }
-    }
-    headerBuffer.writeTo(data);
-    headerBuffer.reset();
-    bytesBuffer.writeTo(data);
-    bytesBuffer.reset();
-  }
-  
-  // writes reverse term index: used for binary searching a term into a range of 64 blocks
-  // for every 64 blocks (1024 terms) we store a term, trimming any suffix unnecessary for comparison
-  // terms are written as a contiguous byte[], but never spanning 2^15 byte boundaries.
-  private void addReverseTermIndex(FieldInfo field, final Iterable<BytesRef> values, int maxLength) throws IOException {
-    long count = 0;
-    BytesRefBuilder priorTerm = new BytesRefBuilder();
-    priorTerm.grow(maxLength);
-    BytesRef indexTerm = new BytesRef();
-    long startFP = data.getFilePointer();
-    PagedBytes pagedBytes = new PagedBytes(15);
-    MonotonicBlockPackedWriter addresses = new MonotonicBlockPackedWriter(data, MONOTONIC_BLOCK_SIZE);
-    
-    for (BytesRef b : values) {
-      int termPosition = (int) (count & REVERSE_INTERVAL_MASK);
-      if (termPosition == 0) {
-        int len = StringHelper.sortKeyLength(priorTerm.get(), b);
-        indexTerm.bytes = b.bytes;
-        indexTerm.offset = b.offset;
-        indexTerm.length = len;
-        addresses.add(pagedBytes.copyUsingLengthPrefix(indexTerm));
-      } else if (termPosition == REVERSE_INTERVAL_MASK) {
-        priorTerm.copyBytes(b);
-      }
-      count++;
-    }
-    addresses.finish();
-    long numBytes = pagedBytes.getPointer();
-    pagedBytes.freeze(true);
-    PagedBytesDataInput in = pagedBytes.getDataInput();
-    meta.writeLong(startFP);
-    data.writeVLong(numBytes);
-    data.copyBytes(in, numBytes);
-  }
-
-  @Override
-  public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.SORTED);
-    addTermsDict(field, LegacyDocValuesIterables.valuesIterable(valuesProducer.getSorted(field)));
-    addNumericField(field, LegacyDocValuesIterables.sortedOrdIterable(valuesProducer, field, maxDoc), NumberType.ORDINAL);
-  }
-
-  private void addSortedField(FieldInfo field, Iterable<BytesRef> values, Iterable<Number> ords) throws IOException {
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.SORTED);
-    addTermsDict(field, values);
-    addNumericField(field, ords, NumberType.ORDINAL);
-  }
-
-  @Override
-  public void addSortedNumericField(FieldInfo field, final DocValuesProducer valuesProducer) throws IOException {
-
-    final Iterable<Number> docToValueCount = LegacyDocValuesIterables.sortedNumericToDocCount(valuesProducer, field, maxDoc);
-    final Iterable<Number> values = LegacyDocValuesIterables.sortedNumericToValues(valuesProducer, field);
-    
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.SORTED_NUMERIC);
-    if (isSingleValued(docToValueCount)) {
-      meta.writeVInt(SORTED_SINGLE_VALUED);
-      // The field is single-valued, we can encode it as NUMERIC
-      addNumericField(field, singletonView(docToValueCount, values, null), NumberType.VALUE);
-    } else {
-      final SortedSet<LongsRef> uniqueValueSets = uniqueValueSets(docToValueCount, values);
-      if (uniqueValueSets != null) {
-        meta.writeVInt(SORTED_SET_TABLE);
-
-        // write the set_id -> values mapping
-        writeDictionary(uniqueValueSets);
-
-        // write the doc -> set_id as a numeric field
-        addNumericField(field, docToSetId(uniqueValueSets, docToValueCount, values), NumberType.ORDINAL);
-      } else {
-        meta.writeVInt(SORTED_WITH_ADDRESSES);
-        // write the stream of values as a numeric field
-        addNumericField(field, values, NumberType.VALUE);
-        // write the doc -> ord count as a absolute index to the stream
-        addOrdIndex(field, docToValueCount);
-      }
-    }
-  }
-
-  @Override
-  public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
-
-    Iterable<BytesRef> values = LegacyDocValuesIterables.valuesIterable(valuesProducer.getSortedSet(field));
-    Iterable<Number> docToOrdCount = LegacyDocValuesIterables.sortedSetOrdCountIterable(valuesProducer, field, maxDoc);
-    Iterable<Number> ords = LegacyDocValuesIterables.sortedSetOrdsIterable(valuesProducer, field);
-
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.SORTED_SET);
-
-    if (isSingleValued(docToOrdCount)) {
-      meta.writeVInt(SORTED_SINGLE_VALUED);
-      // The field is single-valued, we can encode it as SORTED
-      addSortedField(field, values, singletonView(docToOrdCount, ords, -1L));
-    } else {
-      final SortedSet<LongsRef> uniqueValueSets = uniqueValueSets(docToOrdCount, ords);
-      if (uniqueValueSets != null) {
-        meta.writeVInt(SORTED_SET_TABLE);
-
-        // write the set_id -> ords mapping
-        writeDictionary(uniqueValueSets);
-
-        // write the ord -> byte[] as a binary field
-        addTermsDict(field, values);
-
-        // write the doc -> set_id as a numeric field
-        addNumericField(field, docToSetId(uniqueValueSets, docToOrdCount, ords), NumberType.ORDINAL);
-      } else {
-        meta.writeVInt(SORTED_WITH_ADDRESSES);
-
-        // write the ord -> byte[] as a binary field
-        addTermsDict(field, values);
-
-        // write the stream of ords as a numeric field
-        // NOTE: we could return an iterator that delta-encodes these within a doc
-        addNumericField(field, ords, NumberType.ORDINAL);
-
-        // write the doc -> ord count as a absolute index to the stream
-        addOrdIndex(field, docToOrdCount);
-      }
-    }
-  }
-
-  private SortedSet<LongsRef> uniqueValueSets(Iterable<Number> docToValueCount, Iterable<Number> values) {
-    Set<LongsRef> uniqueValueSet = new HashSet<>();
-    LongsRef docValues = new LongsRef(256);
-
-    Iterator<Number> valueCountIterator = docToValueCount.iterator();
-    Iterator<Number> valueIterator = values.iterator();
-    int totalDictSize = 0;
-    while (valueCountIterator.hasNext()) {
-      docValues.length = valueCountIterator.next().intValue();
-      if (docValues.length > 256) {
-        return null;
-      }
-      for (int i = 0; i < docValues.length; ++i) {
-        docValues.longs[i] = valueIterator.next().longValue();
-      }
-      if (uniqueValueSet.contains(docValues)) {
-        continue;
-      }
-      totalDictSize += docValues.length;
-      if (totalDictSize > 256) {
-        return null;
-      }
-      uniqueValueSet.add(new LongsRef(Arrays.copyOf(docValues.longs, docValues.length), 0, docValues.length));
-    }
-    assert valueIterator.hasNext() == false;
-    return new TreeSet<>(uniqueValueSet);
-  }
-
-  private void writeDictionary(SortedSet<LongsRef> uniqueValueSets) throws IOException {
-    int lengthSum = 0;
-    for (LongsRef longs : uniqueValueSets) {
-      lengthSum += longs.length;
-    }
-
-    meta.writeInt(lengthSum);
-    for (LongsRef valueSet : uniqueValueSets) {
-      for (int  i = 0; i < valueSet.length; ++i) {
-        meta.writeLong(valueSet.longs[valueSet.offset + i]);
-      }
-    }
-
-    meta.writeInt(uniqueValueSets.size());
-    for (LongsRef valueSet : uniqueValueSets) {
-      meta.writeInt(valueSet.length);
-    }
-  }
-
-  private Iterable<Number> docToSetId(SortedSet<LongsRef> uniqueValueSets, Iterable<Number> docToValueCount, Iterable<Number> values) {
-    final Map<LongsRef, Integer> setIds = new HashMap<>();
-    int i = 0;
-    for (LongsRef set : uniqueValueSets) {
-      setIds.put(set, i++);
-    }
-    assert i == uniqueValueSets.size();
-
-    return new Iterable<Number>() {
-
-      @Override
-      public Iterator<Number> iterator() {
-        final Iterator<Number> valueCountIterator = docToValueCount.iterator();
-        final Iterator<Number> valueIterator = values.iterator();
-        final LongsRef docValues = new LongsRef(256);
-        return new Iterator<Number>() {
-
-          @Override
-          public boolean hasNext() {
-            return valueCountIterator.hasNext();
-          }
-
-          @Override
-          public Number next() {
-            docValues.length = valueCountIterator.next().intValue();
-            for (int i = 0; i < docValues.length; ++i) {
-              docValues.longs[i] = valueIterator.next().longValue();
-            }
-            final Integer id = setIds.get(docValues);
-            assert id != null;
-            return id;
-          }
-
-        };
-
-      }
-    };
-  }
-
-  // writes addressing information as MONOTONIC_COMPRESSED integer
-  private void addOrdIndex(FieldInfo field, Iterable<Number> values) throws IOException {
-    meta.writeVInt(field.number);
-    meta.writeByte(Lucene54DocValuesFormat.NUMERIC);
-    meta.writeVInt(MONOTONIC_COMPRESSED);
-    meta.writeLong(-1L);
-    meta.writeLong(data.getFilePointer());
-    meta.writeVLong(maxDoc);
-    meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT);
-
-    final DirectMonotonicWriter writer = DirectMonotonicWriter.getInstance(meta, data, maxDoc + 1, DIRECT_MONOTONIC_BLOCK_SHIFT);
-    long addr = 0;
-    writer.add(addr);
-    for (Number v : values) {
-      addr += v.longValue();
-      writer.add(addr);
-    }
-    writer.finish();
-    meta.writeLong(data.getFilePointer());
-  }
-
-  @Override
-  public void close() throws IOException {
-    boolean success = false;
-    try {
-      if (meta != null) {
-        meta.writeVInt(-1); // write EOF marker
-        CodecUtil.writeFooter(meta); // write checksum
-      }
-      if (data != null) {
-        CodecUtil.writeFooter(data); // write checksum
-      }
-      success = true;
-    } finally {
-      if (success) {
-        IOUtils.close(data, meta);
-      } else {
-        IOUtils.closeWhileHandlingException(data, meta);
-      }
-      meta = data = null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/189f2b3f/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesFormat.java
deleted file mode 100644
index 91ccfe2..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesFormat.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene54;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.codecs.DocValuesConsumer;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.DocValuesProducer;
-import org.apache.lucene.codecs.lucene70.Lucene70DocValuesFormat;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.util.SmallFloat;
-import org.apache.lucene.util.packed.DirectWriter;
-
-/**
- * Lucene 5.4 DocValues format.
- * <p>
- * Encodes the five per-document value types (Numeric,Binary,Sorted,SortedSet,SortedNumeric) with these strategies:
- * <p>
- * {@link DocValuesType#NUMERIC NUMERIC}:
- * <ul>
- *    <li>Delta-compressed: per-document integers written as deltas from the minimum value,
- *        compressed with bitpacking. For more information, see {@link DirectWriter}.
- *    <li>Table-compressed: when the number of unique values is very small (&lt; 256), and
- *        when there are unused "gaps" in the range of values used (such as {@link SmallFloat}), 
- *        a lookup table is written instead. Each per-document entry is instead the ordinal 
- *        to this table, and those ordinals are compressed with bitpacking ({@link DirectWriter}). 
- *    <li>GCD-compressed: when all numbers share a common divisor, such as dates, the greatest
- *        common denominator (GCD) is computed, and quotients are stored using Delta-compressed Numerics.
- *    <li>Monotonic-compressed: when all numbers are monotonically increasing offsets, they are written
- *        as blocks of bitpacked integers, encoding the deviation from the expected delta.
- *    <li>Const-compressed: when there is only one possible non-missing value, only the missing
- *        bitset is encoded.
- *    <li>Sparse-compressed: only documents with a value are stored, and lookups are performed
- *        using binary search.
- * </ul>
- * <p>
- * {@link DocValuesType#BINARY BINARY}:
- * <ul>
- *    <li>Fixed-width Binary: one large concatenated byte[] is written, along with the fixed length.
- *        Each document's value can be addressed directly with multiplication ({@code docID * length}). 
- *    <li>Variable-width Binary: one large concatenated byte[] is written, along with end addresses 
- *        for each document. The addresses are written as Monotonic-compressed numerics.
- *    <li>Prefix-compressed Binary: values are written in chunks of 16, with the first value written
- *        completely and other values sharing prefixes. chunk addresses are written as Monotonic-compressed
- *        numerics. A reverse lookup index is written from a portion of every 1024th term.
- * </ul>
- * <p>
- * {@link DocValuesType#SORTED SORTED}:
- * <ul>
- *    <li>Sorted: a mapping of ordinals to deduplicated terms is written as Binary, 
- *        along with the per-document ordinals written using one of the numeric strategies above.
- * </ul>
- * <p>
- * {@link DocValuesType#SORTED_SET SORTED_SET}:
- * <ul>
- *    <li>Single: if all documents have 0 or 1 value, then data are written like SORTED.
- *    <li>SortedSet table: when there are few unique sets of values (&lt; 256) then each set is assigned
- *        an id, a lookup table is written and the mapping from document to set id is written using the
- *        numeric strategies above.
- *    <li>SortedSet: a mapping of ordinals to deduplicated terms is written as Binary, 
- *        an ordinal list and per-document index into this list are written using the numeric strategies 
- *        above.
- * </ul>
- * <p>
- * {@link DocValuesType#SORTED_NUMERIC SORTED_NUMERIC}:
- * <ul>
- *    <li>Single: if all documents have 0 or 1 value, then data are written like NUMERIC.
- *    <li>SortedSet table: when there are few unique sets of values (&lt; 256) then each set is assigned
- *        an id, a lookup table is written and the mapping from document to set id is written using the
- *        numeric strategies above.
- *    <li>SortedNumeric: a value list and per-document index into this list are written using the numeric
- *        strategies above.
- * </ul>
- * <p>
- * Files:
- * <ol>
- *   <li><tt>.dvd</tt>: DocValues data</li>
- *   <li><tt>.dvm</tt>: DocValues metadata</li>
- * </ol>
- * @lucene.experimental
- * @deprecated Use {@link Lucene70DocValuesFormat}.
- */
-@Deprecated
-public final class Lucene54DocValuesFormat extends DocValuesFormat {
-
-  /** Sole Constructor */
-  public Lucene54DocValuesFormat() {
-    super("Lucene54");
-  }
-
-  @Override
-  public DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
-    return new Lucene54DocValuesConsumer(state, DATA_CODEC, DATA_EXTENSION, META_CODEC, META_EXTENSION);
-  }
-
-  @Override
-  public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException {
-    return new Lucene54DocValuesProducer(state, DATA_CODEC, DATA_EXTENSION, META_CODEC, META_EXTENSION);
-  }
-  
-  static final String DATA_CODEC = "Lucene54DocValuesData";
-  static final String DATA_EXTENSION = "dvd";
-  static final String META_CODEC = "Lucene54DocValuesMetadata";
-  static final String META_EXTENSION = "dvm";
-  static final int VERSION_START = 0;
-  static final int VERSION_CURRENT = VERSION_START;
-  
-  // indicates docvalues type
-  static final byte NUMERIC = 0;
-  static final byte BINARY = 1;
-  static final byte SORTED = 2;
-  static final byte SORTED_SET = 3;
-  static final byte SORTED_NUMERIC = 4;
-  
-  // address terms in blocks of 16 terms
-  static final int INTERVAL_SHIFT = 4;
-  static final int INTERVAL_COUNT = 1 << INTERVAL_SHIFT;
-  static final int INTERVAL_MASK = INTERVAL_COUNT - 1;
-  
-  // build reverse index from every 1024th term
-  static final int REVERSE_INTERVAL_SHIFT = 10;
-  static final int REVERSE_INTERVAL_COUNT = 1 << REVERSE_INTERVAL_SHIFT;
-  static final int REVERSE_INTERVAL_MASK = REVERSE_INTERVAL_COUNT - 1;
-  
-  // for conversion from reverse index to block
-  static final int BLOCK_INTERVAL_SHIFT = REVERSE_INTERVAL_SHIFT - INTERVAL_SHIFT;
-  static final int BLOCK_INTERVAL_COUNT = 1 << BLOCK_INTERVAL_SHIFT;
-  static final int BLOCK_INTERVAL_MASK = BLOCK_INTERVAL_COUNT - 1;
-
-  /** Compressed using packed blocks of ints. */
-  static final int DELTA_COMPRESSED = 0;
-  /** Compressed by computing the GCD. */
-  static final int GCD_COMPRESSED = 1;
-  /** Compressed by giving IDs to unique values. */
-  static final int TABLE_COMPRESSED = 2;
-  /** Compressed with monotonically increasing values */
-  static final int MONOTONIC_COMPRESSED = 3;
-  /** Compressed with constant value (uses only missing bitset) */
-  static final int CONST_COMPRESSED = 4;
-  /** Compressed with sparse arrays. */
-  static final int SPARSE_COMPRESSED = 5;
-
-  /** Uncompressed binary, written directly (fixed length). */
-  static final int BINARY_FIXED_UNCOMPRESSED = 0;
-  /** Uncompressed binary, written directly (variable length). */
-  static final int BINARY_VARIABLE_UNCOMPRESSED = 1;
-  /** Compressed binary with shared prefixes */
-  static final int BINARY_PREFIX_COMPRESSED = 2;
-
-  /** Standard storage for sorted set values with 1 level of indirection:
-   *  {@code docId -> address -> ord}. */
-  static final int SORTED_WITH_ADDRESSES = 0;
-  /** Single-valued sorted set values, encoded as sorted values, so no level
-   *  of indirection: {@code docId -> ord}. */
-  static final int SORTED_SINGLE_VALUED = 1;
-  /** Compressed giving IDs to unique sets of values:
-   * {@code docId -> setId -> ords} */
-  static final int SORTED_SET_TABLE = 2;
-  
-  /** placeholder for missing offset that means there are no missing values */
-  static final int ALL_LIVE = -1;
-  /** placeholder for missing offset that means all values are missing */
-  static final int ALL_MISSING = -2;
-  
-  // addressing uses 16k blocks
-  static final int MONOTONIC_BLOCK_SIZE = 16384;
-  static final int DIRECT_MONOTONIC_BLOCK_SHIFT = 16;
-}