You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2016/09/07 09:56:49 UTC
[01/50] [abbrv] lucene-solr:apiv2: make javadoc less confusing
Repository: lucene-solr
Updated Branches:
refs/heads/apiv2 92411981a -> b49d9027b
make javadoc less confusing
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/312f4568
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/312f4568
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/312f4568
Branch: refs/heads/apiv2
Commit: 312f45685ec794ab27285a9d42d08d31ecebd17b
Parents: 4c0ff4e
Author: Robert Muir <rm...@apache.org>
Authored: Wed Aug 24 13:23:02 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Wed Aug 24 13:23:02 2016 -0400
----------------------------------------------------------------------
.../src/java/org/apache/lucene/document/InetAddressPoint.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/312f4568/lucene/sandbox/src/java/org/apache/lucene/document/InetAddressPoint.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/document/InetAddressPoint.java b/lucene/sandbox/src/java/org/apache/lucene/document/InetAddressPoint.java
index 88684f6..5cda742 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/document/InetAddressPoint.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/document/InetAddressPoint.java
@@ -41,7 +41,7 @@ import org.apache.lucene.util.StringHelper;
* <li>{@link #newExactQuery(String, InetAddress)} for matching an exact network address.
* <li>{@link #newPrefixQuery(String, InetAddress, int)} for matching a network based on CIDR prefix.
* <li>{@link #newRangeQuery(String, InetAddress, InetAddress)} for matching arbitrary network address ranges.
- * <li>{@link #newSetQuery(String, InetAddress...)} for matching a set of 1D values.
+ * <li>{@link #newSetQuery(String, InetAddress...)} for matching a set of network addresses.
* </ul>
* <p>
* This field supports both IPv4 and IPv6 addresses: IPv4 addresses are converted
[07/50] [abbrv] lucene-solr:apiv2: remove unnecessary deprecated
classes
Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsFormat.java
deleted file mode 100644
index 786dcf2..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsFormat.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.codecs.NormsConsumer;
-import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.NormsProducer;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.SegmentWriteState;
-
-/**
- * Lucene 5.0 Score normalization format.
- * @deprecated Only for reading old 5.0-5.2 segments
- */
-@Deprecated
-class Lucene50NormsFormat extends NormsFormat {
-
- /** Sole Constructor */
- public Lucene50NormsFormat() {}
-
- @Override
- public NormsConsumer normsConsumer(SegmentWriteState state) throws IOException {
- throw new UnsupportedOperationException("this codec can only be used for reading");
- }
-
- @Override
- public NormsProducer normsProducer(SegmentReadState state) throws IOException {
- return new Lucene50NormsProducer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION);
- }
-
- static final String DATA_CODEC = "Lucene50NormsData";
- static final String DATA_EXTENSION = "nvd";
- static final String METADATA_CODEC = "Lucene50NormsMetadata";
- static final String METADATA_EXTENSION = "nvm";
- static final int VERSION_START = 0;
- static final int VERSION_CURRENT = VERSION_START;
-
- static final byte DELTA_COMPRESSED = 0;
- static final byte TABLE_COMPRESSED = 1;
- static final byte CONST_COMPRESSED = 2;
- static final byte UNCOMPRESSED = 3;
- static final byte INDIRECT = 4;
- static final byte PATCHED_BITSET = 5;
- static final byte PATCHED_TABLE = 6;
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsProducer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsProducer.java
deleted file mode 100644
index e087e27..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsProducer.java
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.CONST_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.DELTA_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.INDIRECT;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.PATCHED_BITSET;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.PATCHED_TABLE;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.TABLE_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.UNCOMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.VERSION_CURRENT;
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.VERSION_START;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.NormsProducer;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.store.ChecksumIndexInput;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.util.Accountable;
-import org.apache.lucene.util.Accountables;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.RamUsageEstimator;
-import org.apache.lucene.util.SparseFixedBitSet;
-import org.apache.lucene.util.packed.BlockPackedReader;
-import org.apache.lucene.util.packed.MonotonicBlockPackedReader;
-import org.apache.lucene.util.packed.PackedInts;
-
-/**
- * Reader for {@link Lucene50NormsFormat}
- * @deprecated Only for reading old 5.0-5.2 segments
- */
-@Deprecated
-final class Lucene50NormsProducer extends NormsProducer {
- // metadata maps (just file pointers and minimal stuff)
- private final Map<String,NormsEntry> norms = new HashMap<>();
- private final IndexInput data;
-
- // ram instances we have already loaded
- final Map<String,Norms> instances = new HashMap<>();
-
- private final AtomicLong ramBytesUsed;
- private final AtomicInteger activeCount = new AtomicInteger();
- private final int maxDoc;
-
- private final boolean merging;
-
- // clone for merge: when merging we don't do any instances.put()s
- Lucene50NormsProducer(Lucene50NormsProducer original) {
- assert Thread.holdsLock(original);
- norms.putAll(original.norms);
- data = original.data.clone();
- instances.putAll(original.instances);
- ramBytesUsed = new AtomicLong(original.ramBytesUsed.get());
- activeCount.set(original.activeCount.get());
- maxDoc = original.maxDoc;
- merging = true;
- }
-
- Lucene50NormsProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
- merging = false;
- maxDoc = state.segmentInfo.maxDoc();
- String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
- ramBytesUsed = new AtomicLong(RamUsageEstimator.shallowSizeOfInstance(getClass()));
- int version = -1;
-
- // read in the entries from the metadata file.
- try (ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context)) {
- Throwable priorE = null;
- try {
- version = CodecUtil.checkIndexHeader(in, metaCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
- readFields(in, state.fieldInfos);
- } catch (Throwable exception) {
- priorE = exception;
- } finally {
- CodecUtil.checkFooter(in, priorE);
- }
- }
-
- String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
- this.data = state.directory.openInput(dataName, state.context);
- boolean success = false;
- try {
- final int version2 = CodecUtil.checkIndexHeader(data, dataCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
- if (version != version2) {
- throw new CorruptIndexException("Format versions mismatch: meta=" + version + ",data=" + version2, data);
- }
-
- // NOTE: data file is too costly to verify checksum against all the bytes on open,
- // but for now we at least verify proper structure of the checksum footer: which looks
- // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
- // such as file truncation.
- CodecUtil.retrieveChecksum(data);
-
- success = true;
- } finally {
- if (!success) {
- IOUtils.closeWhileHandlingException(this.data);
- }
- }
- }
-
- private void readFields(IndexInput meta, FieldInfos infos) throws IOException {
- int fieldNumber = meta.readVInt();
- while (fieldNumber != -1) {
- FieldInfo info = infos.fieldInfo(fieldNumber);
- if (info == null) {
- throw new CorruptIndexException("Invalid field number: " + fieldNumber, meta);
- } else if (!info.hasNorms()) {
- throw new CorruptIndexException("Invalid field: " + info.name, meta);
- }
- NormsEntry entry = readEntry(info, meta);
- norms.put(info.name, entry);
- fieldNumber = meta.readVInt();
- }
- }
-
- private NormsEntry readEntry(FieldInfo info, IndexInput meta) throws IOException {
- NormsEntry entry = new NormsEntry();
- entry.count = meta.readVInt();
- entry.format = meta.readByte();
- entry.offset = meta.readLong();
- switch(entry.format) {
- case CONST_COMPRESSED:
- case UNCOMPRESSED:
- case TABLE_COMPRESSED:
- case DELTA_COMPRESSED:
- break;
- case PATCHED_BITSET:
- case PATCHED_TABLE:
- case INDIRECT:
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("indirect norms entry for field: " + info.name + " is corrupt", meta);
- }
- entry.nested = readEntry(info, meta);
- break;
- default:
- throw new CorruptIndexException("Unknown format: " + entry.format, meta);
- }
- return entry;
- }
-
- @Override
- public synchronized NumericDocValues getNorms(FieldInfo field) throws IOException {
- Norms instance = instances.get(field.name);
- if (instance == null) {
- instance = loadNorms(norms.get(field.name));
- if (!merging) {
- instances.put(field.name, instance);
- activeCount.incrementAndGet();
- ramBytesUsed.addAndGet(instance.ramBytesUsed());
- }
- }
- return instance;
- }
-
- @Override
- public long ramBytesUsed() {
- return ramBytesUsed.get();
- }
-
- @Override
- public synchronized Collection<Accountable> getChildResources() {
- return Accountables.namedAccountables("field", instances);
- }
-
- @Override
- public void checkIntegrity() throws IOException {
- CodecUtil.checksumEntireFile(data);
- }
-
- private Norms loadNorms(NormsEntry entry) throws IOException {
- switch(entry.format) {
- case CONST_COMPRESSED: {
- final long v = entry.offset;
- return new Norms() {
- @Override
- public long get(int docID) {
- return v;
- }
-
- @Override
- public long ramBytesUsed() {
- return 8;
- }
-
- @Override
- public String toString() {
- return "constant";
- }
- };
- }
- case UNCOMPRESSED: {
- data.seek(entry.offset);
- final byte bytes[] = new byte[entry.count];
- data.readBytes(bytes, 0, bytes.length);
- return new Norms() {
- @Override
- public long get(int docID) {
- return bytes[docID];
- }
-
- @Override
- public long ramBytesUsed() {
- return RamUsageEstimator.sizeOf(bytes);
- }
-
- @Override
- public String toString() {
- return "byte array";
- }
- };
- }
- case DELTA_COMPRESSED: {
- data.seek(entry.offset);
- int packedIntsVersion = data.readVInt();
- int blockSize = data.readVInt();
- final BlockPackedReader reader = new BlockPackedReader(data, packedIntsVersion, blockSize, entry.count, false);
- return new Norms() {
- @Override
- public long get(int docID) {
- return reader.get(docID);
- }
-
- @Override
- public long ramBytesUsed() {
- return reader.ramBytesUsed();
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- return Collections.singleton(reader);
- }
-
- @Override
- public String toString() {
- return "delta compressed";
- }
- };
- }
- case TABLE_COMPRESSED: {
- data.seek(entry.offset);
- int packedIntsVersion = data.readVInt();
- final int formatID = data.readVInt();
- final int bitsPerValue = data.readVInt();
-
- if (bitsPerValue != 1 && bitsPerValue != 2 && bitsPerValue != 4) {
- throw new CorruptIndexException("TABLE_COMPRESSED only supports bpv=1, bpv=2 and bpv=4, got=" + bitsPerValue, data);
- }
- int size = 1 << bitsPerValue;
- final byte decode[] = new byte[size];
- final int ordsSize = data.readVInt();
- for (int i = 0; i < ordsSize; ++i) {
- decode[i] = data.readByte();
- }
- for (int i = ordsSize; i < size; ++i) {
- decode[i] = 0;
- }
-
- final PackedInts.Reader ordsReader = PackedInts.getReaderNoHeader(data, PackedInts.Format.byId(formatID), packedIntsVersion, entry.count, bitsPerValue);
- return new Norms() {
- @Override
- public long get(int docID) {
- return decode[(int)ordsReader.get(docID)];
- }
-
- @Override
- public long ramBytesUsed() {
- return RamUsageEstimator.sizeOf(decode) + ordsReader.ramBytesUsed();
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- return Collections.singleton(ordsReader);
- }
-
- @Override
- public String toString() {
- return "table compressed";
- }
- };
- }
- case INDIRECT: {
- data.seek(entry.offset);
- final long common = data.readLong();
- int packedIntsVersion = data.readVInt();
- int blockSize = data.readVInt();
- final MonotonicBlockPackedReader live = MonotonicBlockPackedReader.of(data, packedIntsVersion, blockSize, entry.count, false);
- final Norms nestedInstance = loadNorms(entry.nested);
- final int upperBound = entry.count-1;
- return new Norms() {
- @Override
- public long get(int docID) {
- int low = 0;
- int high = upperBound;
- while (low <= high) {
- int mid = (low + high) >>> 1;
- long doc = live.get(mid);
-
- if (doc < docID) {
- low = mid + 1;
- } else if (doc > docID) {
- high = mid - 1;
- } else {
- return nestedInstance.get(mid);
- }
- }
- return common;
- }
-
- @Override
- public long ramBytesUsed() {
- return live.ramBytesUsed() + nestedInstance.ramBytesUsed();
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- List<Accountable> children = new ArrayList<>();
- children.add(Accountables.namedAccountable("keys", live));
- children.add(Accountables.namedAccountable("values", nestedInstance));
- return Collections.unmodifiableList(children);
- }
-
- @Override
- public String toString() {
- return "indirect";
- }
- };
- }
- case PATCHED_BITSET: {
- data.seek(entry.offset);
- final long common = data.readLong();
- int packedIntsVersion = data.readVInt();
- int blockSize = data.readVInt();
- MonotonicBlockPackedReader live = MonotonicBlockPackedReader.of(data, packedIntsVersion, blockSize, entry.count, true);
- final SparseFixedBitSet set = new SparseFixedBitSet(maxDoc);
- for (int i = 0; i < live.size(); i++) {
- int doc = (int) live.get(i);
- set.set(doc);
- }
- Norms nestedInstance = loadNorms(entry.nested);
- return new Norms() {
- @Override
- public long get(int docID) {
- if (set.get(docID)) {
- return nestedInstance.get(docID);
- } else {
- return common;
- }
- }
-
- @Override
- public long ramBytesUsed() {
- return set.ramBytesUsed() + nestedInstance.ramBytesUsed();
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- List<Accountable> children = new ArrayList<>();
- children.add(Accountables.namedAccountable("keys", set));
- children.add(Accountables.namedAccountable("values", nestedInstance));
- return Collections.unmodifiableList(children);
- }
-
- @Override
- public String toString() {
- return "patched bitset";
- }
- };
- }
- case PATCHED_TABLE: {
- data.seek(entry.offset);
- int packedIntsVersion = data.readVInt();
- final int formatID = data.readVInt();
- final int bitsPerValue = data.readVInt();
-
- if (bitsPerValue != 2 && bitsPerValue != 4) {
- throw new CorruptIndexException("PATCHED_TABLE only supports bpv=2 and bpv=4, got=" + bitsPerValue, data);
- }
- final int size = 1 << bitsPerValue;
- final int ordsSize = data.readVInt();
- final byte decode[] = new byte[ordsSize];
- assert ordsSize + 1 == size;
- for (int i = 0; i < ordsSize; ++i) {
- decode[i] = data.readByte();
- }
-
- final PackedInts.Reader ordsReader = PackedInts.getReaderNoHeader(data, PackedInts.Format.byId(formatID), packedIntsVersion, entry.count, bitsPerValue);
- final Norms nestedInstance = loadNorms(entry.nested);
-
- return new Norms() {
- @Override
- public long get(int docID) {
- int ord = (int)ordsReader.get(docID);
- try {
- // doing a try/catch here eliminates a seemingly unavoidable branch in hotspot...
- return decode[ord];
- } catch (IndexOutOfBoundsException e) {
- return nestedInstance.get(docID);
- }
- }
-
- @Override
- public long ramBytesUsed() {
- return RamUsageEstimator.sizeOf(decode) + ordsReader.ramBytesUsed() + nestedInstance.ramBytesUsed();
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- List<Accountable> children = new ArrayList<>();
- children.add(Accountables.namedAccountable("common", ordsReader));
- children.add(Accountables.namedAccountable("uncommon", nestedInstance));
- return Collections.unmodifiableList(children);
- }
-
- @Override
- public String toString() {
- return "patched table";
- }
- };
- }
- default:
- throw new AssertionError();
- }
- }
-
- @Override
- public void close() throws IOException {
- data.close();
- }
-
- static class NormsEntry {
- byte format;
- long offset;
- int count;
- NormsEntry nested;
- }
-
- static abstract class Norms extends NumericDocValues implements Accountable {
- }
-
- @Override
- public synchronized NormsProducer getMergeInstance() throws IOException {
- return new Lucene50NormsProducer(this);
- }
-
- @Override
- public String toString() {
- return getClass().getSimpleName() + "(fields=" + norms.size() + ",active=" + activeCount.get() + ")";
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java
deleted file mode 100644
index 7630194..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene53;
-
-
-import java.util.Objects;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.CompoundFormat;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.FieldInfosFormat;
-import org.apache.lucene.codecs.FilterCodec;
-import org.apache.lucene.codecs.LiveDocsFormat;
-import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.PointsFormat;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.SegmentInfoFormat;
-import org.apache.lucene.codecs.StoredFieldsFormat;
-import org.apache.lucene.codecs.TermVectorsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50FieldInfosFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
-import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
-
-/**
- * Implements the Lucene 5.3 index format, with configurable per-field postings
- * and docvalues formats.
- * <p>
- * If you want to reuse functionality of this codec in another codec, extend
- * {@link FilterCodec}.
- *
- * @see org.apache.lucene.codecs.lucene53 package documentation for file format details.
- * @deprecated Only for reading old 5.3 segments
- */
-@Deprecated
-public class Lucene53Codec extends Codec {
- private final TermVectorsFormat vectorsFormat = new Lucene50TermVectorsFormat();
- private final FieldInfosFormat fieldInfosFormat = new Lucene50FieldInfosFormat();
- private final SegmentInfoFormat segmentInfosFormat = new Lucene50SegmentInfoFormat();
- private final LiveDocsFormat liveDocsFormat = new Lucene50LiveDocsFormat();
- private final CompoundFormat compoundFormat = new Lucene50CompoundFormat();
-
- private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() {
- @Override
- public PostingsFormat getPostingsFormatForField(String field) {
- return Lucene53Codec.this.getPostingsFormatForField(field);
- }
- };
-
- private final DocValuesFormat docValuesFormat = new PerFieldDocValuesFormat() {
- @Override
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return Lucene53Codec.this.getDocValuesFormatForField(field);
- }
- };
-
- private final StoredFieldsFormat storedFieldsFormat;
-
- /**
- * Instantiates a new codec.
- */
- public Lucene53Codec() {
- this(Mode.BEST_SPEED);
- }
-
- /**
- * Instantiates a new codec, specifying the stored fields compression
- * mode to use.
- * @param mode stored fields compression mode to use for newly
- * flushed/merged segments.
- */
- public Lucene53Codec(Mode mode) {
- super("Lucene53");
- this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Objects.requireNonNull(mode));
- }
-
- @Override
- public final StoredFieldsFormat storedFieldsFormat() {
- return storedFieldsFormat;
- }
-
- @Override
- public final TermVectorsFormat termVectorsFormat() {
- return vectorsFormat;
- }
-
- @Override
- public final PostingsFormat postingsFormat() {
- return postingsFormat;
- }
-
- @Override
- public final FieldInfosFormat fieldInfosFormat() {
- return fieldInfosFormat;
- }
-
- @Override
- public final SegmentInfoFormat segmentInfoFormat() {
- return segmentInfosFormat;
- }
-
- @Override
- public final LiveDocsFormat liveDocsFormat() {
- return liveDocsFormat;
- }
-
- @Override
- public final CompoundFormat compoundFormat() {
- return compoundFormat;
- }
-
- /** Returns the postings format that should be used for writing
- * new segments of <code>field</code>.
- *
- * The default implementation always returns "Lucene50".
- * <p>
- * <b>WARNING:</b> if you subclass, you are responsible for index
- * backwards compatibility: future version of Lucene are only
- * guaranteed to be able to read the default implementation.
- */
- public PostingsFormat getPostingsFormatForField(String field) {
- return defaultFormat;
- }
-
- /** Returns the docvalues format that should be used for writing
- * new segments of <code>field</code>.
- *
- * The default implementation always returns "Lucene50".
- * <p>
- * <b>WARNING:</b> if you subclass, you are responsible for index
- * backwards compatibility: future version of Lucene are only
- * guaranteed to be able to read the default implementation.
- */
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return defaultDVFormat;
- }
-
- @Override
- public final DocValuesFormat docValuesFormat() {
- return docValuesFormat;
- }
-
- @Override
- public final PointsFormat pointsFormat() {
- return PointsFormat.EMPTY;
- }
-
- private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50");
- private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene50");
-
- private final NormsFormat normsFormat = new Lucene53NormsFormat();
-
- @Override
- public final NormsFormat normsFormat() {
- return normsFormat;
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package.html
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package.html b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package.html
deleted file mode 100644
index 325555c..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/package.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html>
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-</head>
-<body>
-Lucene 5.3 file format.
-</body>
-</html>
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java
deleted file mode 100644
index d982d3b..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene54;
-
-
-import java.util.Objects;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.CompoundFormat;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.FieldInfosFormat;
-import org.apache.lucene.codecs.FilterCodec;
-import org.apache.lucene.codecs.LiveDocsFormat;
-import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.PointsFormat;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.SegmentInfoFormat;
-import org.apache.lucene.codecs.StoredFieldsFormat;
-import org.apache.lucene.codecs.TermVectorsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50FieldInfosFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.lucene53.Lucene53NormsFormat;
-import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
-import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
-
-/**
- * Implements the Lucene 5.4 index format, with configurable per-field postings
- * and docvalues formats.
- * <p>
- * If you want to reuse functionality of this codec in another codec, extend
- * {@link FilterCodec}.
- *
- * @see org.apache.lucene.codecs.lucene54 package documentation for file format details.
- * @lucene.experimental
- * @deprecated Only for 5.x back compat
- */
-@Deprecated
-public class Lucene54Codec extends Codec {
- private final TermVectorsFormat vectorsFormat = new Lucene50TermVectorsFormat();
- private final FieldInfosFormat fieldInfosFormat = new Lucene50FieldInfosFormat();
- private final SegmentInfoFormat segmentInfosFormat = new Lucene50SegmentInfoFormat();
- private final LiveDocsFormat liveDocsFormat = new Lucene50LiveDocsFormat();
- private final CompoundFormat compoundFormat = new Lucene50CompoundFormat();
-
- private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() {
- @Override
- public PostingsFormat getPostingsFormatForField(String field) {
- return Lucene54Codec.this.getPostingsFormatForField(field);
- }
- };
-
- private final DocValuesFormat docValuesFormat = new PerFieldDocValuesFormat() {
- @Override
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return Lucene54Codec.this.getDocValuesFormatForField(field);
- }
- };
-
- private final StoredFieldsFormat storedFieldsFormat;
-
- /**
- * Instantiates a new codec.
- */
- public Lucene54Codec() {
- this(Mode.BEST_SPEED);
- }
-
- /**
- * Instantiates a new codec, specifying the stored fields compression
- * mode to use.
- * @param mode stored fields compression mode to use for newly
- * flushed/merged segments.
- */
- public Lucene54Codec(Mode mode) {
- super("Lucene54");
- this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Objects.requireNonNull(mode));
- }
-
- @Override
- public final StoredFieldsFormat storedFieldsFormat() {
- return storedFieldsFormat;
- }
-
- @Override
- public final TermVectorsFormat termVectorsFormat() {
- return vectorsFormat;
- }
-
- @Override
- public final PostingsFormat postingsFormat() {
- return postingsFormat;
- }
-
- @Override
- public final FieldInfosFormat fieldInfosFormat() {
- return fieldInfosFormat;
- }
-
- @Override
- public final SegmentInfoFormat segmentInfoFormat() {
- return segmentInfosFormat;
- }
-
- @Override
- public final LiveDocsFormat liveDocsFormat() {
- return liveDocsFormat;
- }
-
- @Override
- public final CompoundFormat compoundFormat() {
- return compoundFormat;
- }
-
- /** Returns the postings format that should be used for writing
- * new segments of <code>field</code>.
- *
- * The default implementation always returns "Lucene50".
- * <p>
- * <b>WARNING:</b> if you subclass, you are responsible for index
- * backwards compatibility: future version of Lucene are only
- * guaranteed to be able to read the default implementation.
- */
- public PostingsFormat getPostingsFormatForField(String field) {
- return defaultFormat;
- }
-
- /** Returns the docvalues format that should be used for writing
- * new segments of <code>field</code>.
- *
- * The default implementation always returns "Lucene54".
- * <p>
- * <b>WARNING:</b> if you subclass, you are responsible for index
- * backwards compatibility: future version of Lucene are only
- * guaranteed to be able to read the default implementation.
- */
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return defaultDVFormat;
- }
-
- @Override
- public final DocValuesFormat docValuesFormat() {
- return docValuesFormat;
- }
-
- @Override
- public final PointsFormat pointsFormat() {
- return PointsFormat.EMPTY;
- }
-
- private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50");
- private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene54");
-
- private final NormsFormat normsFormat = new Lucene53NormsFormat();
-
- @Override
- public final NormsFormat normsFormat() {
- return normsFormat;
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/package.html
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/package.html b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/package.html
deleted file mode 100644
index f60abbe..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/package.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html>
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-</head>
-<body>
-Lucene 5.4 file format.
-</body>
-</html>
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.Codec b/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
index 71aa938..875aba5 100644
--- a/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
+++ b/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
@@ -13,7 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-org.apache.lucene.codecs.lucene50.Lucene50Codec
-org.apache.lucene.codecs.lucene53.Lucene53Codec
-org.apache.lucene.codecs.lucene54.Lucene54Codec
org.apache.lucene.codecs.lucene60.Lucene60Codec
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat b/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
index c5d3207..4a812de 100644
--- a/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
+++ b/lucene/backward-codecs/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
@@ -13,4 +13,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-org.apache.lucene.codecs.lucene50.Lucene50DocValuesFormat
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50NormsConsumer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50NormsConsumer.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50NormsConsumer.java
deleted file mode 100644
index 5c779de..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50NormsConsumer.java
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Iterator;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.NormsConsumer;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.FilterIterator;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.InPlaceMergeSorter;
-import org.apache.lucene.util.packed.BlockPackedWriter;
-import org.apache.lucene.util.packed.MonotonicBlockPackedWriter;
-import org.apache.lucene.util.packed.PackedInts;
-import org.apache.lucene.util.packed.PackedInts.FormatAndBits;
-
-import static org.apache.lucene.codecs.lucene50.Lucene50NormsFormat.VERSION_CURRENT;
-
-/**
- * Writer for {@link Lucene50NormsFormat}
- * @deprecated Only for testing old 5.0-5.2 segments
- */
-@Deprecated
-final class Lucene50NormsConsumer extends NormsConsumer {
- static final int BLOCK_SIZE = 1 << 14;
-
- // threshold for indirect encoding, computed as 1 - 1/log2(maxint)
- // norms are only read for matching postings... so this is the threshold
- // where n log n operations < maxdoc (e.g. it performs similar to other fields)
- static final float INDIRECT_THRESHOLD = 1 - 1 / 31F;
-
- IndexOutput data, meta;
-
- Lucene50NormsConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
- boolean success = false;
- try {
- String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
- data = state.directory.createOutput(dataName, state.context);
- CodecUtil.writeIndexHeader(data, dataCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
- String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
- meta = state.directory.createOutput(metaName, state.context);
- CodecUtil.writeIndexHeader(meta, metaCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
- success = true;
- } finally {
- if (!success) {
- IOUtils.closeWhileHandlingException(this);
- }
- }
- }
-
- // we explicitly use only certain bits per value and a specified format, so we statically check this will work
- static {
- assert PackedInts.Format.PACKED_SINGLE_BLOCK.isSupported(1);
- assert PackedInts.Format.PACKED_SINGLE_BLOCK.isSupported(2);
- assert PackedInts.Format.PACKED_SINGLE_BLOCK.isSupported(4);
- }
-
- @Override
- public void addNormsField(FieldInfo field, Iterable<Number> values) throws IOException {
- writeNormsField(field, values, 0);
- }
-
- private void writeNormsField(FieldInfo field, Iterable<Number> values, int level) throws IOException {
- assert level <= 1; // we only "recurse" once in the indirect case
- meta.writeVInt(field.number);
- NormMap uniqueValues = new NormMap();
- int count = 0;
-
- for (Number nv : values) {
- if (nv == null) {
- throw new IllegalStateException("illegal norms data for field " + field.name + ", got null for value: " + count);
- }
- final long v = nv.longValue();
-
- if (uniqueValues != null) {
- if (v >= Byte.MIN_VALUE && v <= Byte.MAX_VALUE) {
- if (uniqueValues.add((byte) v)) {
- if (uniqueValues.size > 256) {
- uniqueValues = null;
- }
- }
- } else {
- // anything outside an 8 bit float comes from a custom scorer, which is an extreme edge case
- uniqueValues = null;
- }
- }
- count++;
- }
-
- if (uniqueValues == null) {
- addDeltaCompressed(values, count);
- } else if (uniqueValues.size == 1) {
- // 0 bpv
- addConstant(uniqueValues.values[0]);
- } else {
- // small number of unique values: this is the typical case
- uniqueValues.optimizeOrdinals();
-
- int numCommonValues = -1;
- int commonValuesCount = 0;
- if (level == 0 && count > 256) {
- float threshold_count = count * INDIRECT_THRESHOLD;
- if (uniqueValues.freqs[0] > threshold_count) {
- numCommonValues = 1;
- } else if ((commonValuesCount = sum(uniqueValues.freqs, 0, 3)) > threshold_count && uniqueValues.size > 4) {
- numCommonValues = 3;
- } else if ((commonValuesCount = sum(uniqueValues.freqs, 0, 15)) > threshold_count && uniqueValues.size > 16) {
- numCommonValues = 15;
- }
- }
-
- if (numCommonValues == -1) {
- // no pattern in values, just find the most efficient way to pack the values
- FormatAndBits compression = fastestFormatAndBits(uniqueValues.size - 1);
- if (compression.bitsPerValue == 8) {
- addUncompressed(values, count);
- } else {
- addTableCompressed(values, compression, count, uniqueValues);
- }
-
- } else if (numCommonValues == 1) {
- byte commonValue = uniqueValues.values[0];
- if (commonValue == 0) {
- // if the common value is missing, don't waste RAM on a bitset, since we won't be searching those docs
- addIndirect(field, values, count, uniqueValues, 0);
- } else {
- // otherwise, write a sparse bitset, where 1 indicates 'uncommon value'.
- addPatchedBitset(field, values, count, uniqueValues);
- }
- } else {
- addPatchedTable(field, values, numCommonValues, commonValuesCount, count, uniqueValues);
- }
- }
- }
-
- private int sum(int[] freqs, int start, int end) {
- int accum = 0;
- for (int i = start; i < end; ++i) {
- accum += freqs[i];
- }
- return accum;
- }
-
- private FormatAndBits fastestFormatAndBits(int max) {
- // we only use bpv=1,2,4,8
- PackedInts.Format format = PackedInts.Format.PACKED_SINGLE_BLOCK;
- int bitsPerValue = PackedInts.bitsRequired(max);
- if (bitsPerValue == 3) {
- bitsPerValue = 4;
- } else if (bitsPerValue > 4) {
- bitsPerValue = 8;
- }
- return new FormatAndBits(format, bitsPerValue);
- }
-
- private void addConstant(byte constant) throws IOException {
- meta.writeVInt(0);
- meta.writeByte(Lucene50NormsFormat.CONST_COMPRESSED);
- meta.writeLong(constant);
- }
-
- private void addUncompressed(Iterable<Number> values, int count) throws IOException {
- meta.writeVInt(count);
- meta.writeByte(Lucene50NormsFormat.UNCOMPRESSED); // uncompressed byte[]
- meta.writeLong(data.getFilePointer());
- for (Number nv : values) {
- data.writeByte(nv.byteValue());
- }
- }
-
- private void addTableCompressed(Iterable<Number> values, FormatAndBits compression, int count, NormMap uniqueValues) throws IOException {
- meta.writeVInt(count);
- meta.writeByte(Lucene50NormsFormat.TABLE_COMPRESSED); // table-compressed
- meta.writeLong(data.getFilePointer());
-
- writeTable(values, compression, count, uniqueValues, uniqueValues.size);
- }
-
- private void writeTable(Iterable<Number> values, FormatAndBits compression, int count, NormMap uniqueValues, int numOrds) throws IOException {
- data.writeVInt(PackedInts.VERSION_CURRENT);
- data.writeVInt(compression.format.getId());
- data.writeVInt(compression.bitsPerValue);
-
- data.writeVInt(numOrds);
- for (int i = 0; i < numOrds; i++) {
- data.writeByte(uniqueValues.values[i]);
- }
-
- final PackedInts.Writer writer = PackedInts.getWriterNoHeader(data, compression.format, count, compression.bitsPerValue, PackedInts.DEFAULT_BUFFER_SIZE);
- for(Number nv : values) {
- int ord = uniqueValues.ord(nv.byteValue());
- if (ord < numOrds) {
- writer.add(ord);
- } else {
- writer.add(numOrds); // collapses all ords >= numOrds into a single value
- }
- }
- writer.finish();
- }
-
- private void addDeltaCompressed(Iterable<Number> values, int count) throws IOException {
- meta.writeVInt(count);
- meta.writeByte(Lucene50NormsFormat.DELTA_COMPRESSED); // delta-compressed
- meta.writeLong(data.getFilePointer());
- data.writeVInt(PackedInts.VERSION_CURRENT);
- data.writeVInt(BLOCK_SIZE);
-
- final BlockPackedWriter writer = new BlockPackedWriter(data, BLOCK_SIZE);
- for (Number nv : values) {
- writer.add(nv.longValue());
- }
- writer.finish();
- }
-
- // encodes only uncommon values in a sparse bitset
- // access is constant time, and the common case is predictable
- // exceptions nest either to CONST (if there are only 2 values), or INDIRECT (if there are > 2 values)
- private void addPatchedBitset(FieldInfo field, final Iterable<Number> values, int count, NormMap uniqueValues) throws IOException {
- int commonCount = uniqueValues.freqs[0];
-
- meta.writeVInt(count - commonCount);
- meta.writeByte(Lucene50NormsFormat.PATCHED_BITSET);
- meta.writeLong(data.getFilePointer());
-
- // write docs with value
- writeDocsWithValue(values, uniqueValues, 0);
-
- // write exceptions: only two cases make sense
- // bpv = 1 (folded into sparse bitset already)
- // bpv > 1 (add indirect exception table)
- meta.writeVInt(field.number);
- if (uniqueValues.size == 2) {
- // special case: implicit in bitset
- addConstant(uniqueValues.values[1]);
- } else {
- // exception table
- addIndirect(field, values, count, uniqueValues, 0);
- }
- }
-
- // encodes common values in a table, and the rest of the values as exceptions using INDIRECT.
- // the exceptions should not be accessed very often, since the values are uncommon
- private void addPatchedTable(FieldInfo field, final Iterable<Number> values, final int numCommonValues, int commonValuesCount, int count, final NormMap uniqueValues) throws IOException {
- meta.writeVInt(count);
- meta.writeByte(Lucene50NormsFormat.PATCHED_TABLE);
- meta.writeLong(data.getFilePointer());
-
- assert numCommonValues == 3 || numCommonValues == 15;
- FormatAndBits compression = fastestFormatAndBits(numCommonValues);
-
- writeTable(values, compression, count, uniqueValues, numCommonValues);
-
- meta.writeVInt(field.number);
- addIndirect(field, values, count - commonValuesCount, uniqueValues, numCommonValues);
- }
-
- // encodes values as sparse array: keys[] and values[]
- // access is log(N) where N = keys.length (slow!)
- // so this is only appropriate as an exception table for patched, or when common value is 0 (wont be accessed by searching)
- private void addIndirect(FieldInfo field, final Iterable<Number> values, int count, final NormMap uniqueValues, final int minOrd) throws IOException {
- int commonCount = uniqueValues.freqs[minOrd];
-
- meta.writeVInt(count - commonCount);
- meta.writeByte(Lucene50NormsFormat.INDIRECT);
- meta.writeLong(data.getFilePointer());
-
- // write docs with value
- writeDocsWithValue(values, uniqueValues, minOrd);
-
- // write actual values
- writeNormsField(field, new Iterable<Number>() {
- @Override
- public Iterator<Number> iterator() {
- return new FilterIterator<Number, Number>(values.iterator()) {
- @Override
- protected boolean predicateFunction(Number value) {
- return uniqueValues.ord(value.byteValue()) > minOrd;
- }
- };
- }
- }, 1);
- }
-
- private void writeDocsWithValue(final Iterable<Number> values, NormMap uniqueValues, int minOrd) throws IOException {
- data.writeLong(uniqueValues.values[minOrd]);
- data.writeVInt(PackedInts.VERSION_CURRENT);
- data.writeVInt(BLOCK_SIZE);
-
- // write docs with value
- final MonotonicBlockPackedWriter writer = new MonotonicBlockPackedWriter(data, BLOCK_SIZE);
- int doc = 0;
- for (Number n : values) {
- int ord = uniqueValues.ord(n.byteValue());
- if (ord > minOrd) {
- writer.add(doc);
- }
- doc++;
- }
- writer.finish();
- }
-
- @Override
- public void close() throws IOException {
- boolean success = false;
- try {
- if (meta != null) {
- meta.writeVInt(-1); // write EOF marker
- CodecUtil.writeFooter(meta); // write checksum
- }
- if (data != null) {
- CodecUtil.writeFooter(data); // write checksum
- }
- success = true;
- } finally {
- if (success) {
- IOUtils.close(data, meta);
- } else {
- IOUtils.closeWhileHandlingException(data, meta);
- }
- meta = data = null;
- }
- }
-
- // specialized deduplication of long->ord for norms: 99.99999% of the time this will be a single-byte range.
- static class NormMap {
- // we use short: at most we will add 257 values to this map before it's rejected as too big above.
- private final short[] ords = new short[256];
- final int[] freqs = new int[257];
- final byte[] values = new byte[257];
- int size;
-
- {
- Arrays.fill(ords, (short)-1);
- }
-
- // adds an item to the mapping. returns true if actually added
- public boolean add(byte l) {
- assert size <= 256; // once we add > 256 values, we nullify the map in addNumericField and don't use this strategy
- int index = (int)l + 128;
- short previous = ords[index];
- if (previous < 0) {
- short slot = (short)size;
- ords[index] = slot;
- freqs[slot]++;
- values[slot] = l;
- size++;
- return true;
- } else {
- freqs[previous]++;
- return false;
- }
- }
-
- public int ord(byte value) {
- return ords[(int)value + 128];
- }
-
- // reassign ordinals so higher frequencies have lower ordinals
- public void optimizeOrdinals() {
- new InPlaceMergeSorter() {
- @Override
- protected int compare(int i, int j) {
- return freqs[j] - freqs[i]; // sort descending
- }
- @Override
- protected void swap(int i, int j) {
- // swap ordinal i with ordinal j
- ords[(int)values[i] + 128] = (short)j;
- ords[(int)values[j] + 128] = (short)i;
-
- int tmpFreq = freqs[i];
- byte tmpValue = values[i];
- freqs[i] = freqs[j];
- values[i] = values[j];
- freqs[j] = tmpFreq;
- values[j] = tmpValue;
- }
- }.sort(0, size);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWCodec.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWCodec.java
deleted file mode 100644
index 8fdeb20..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWCodec.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.SegmentInfoFormat;
-
-/**
- * Codec for testing 5.0 index format
- * @deprecated Only for testing old 5.0-5.2 segments
- */
-@Deprecated
-final class Lucene50RWCodec extends Lucene50Codec {
- private final NormsFormat normsFormat = new Lucene50RWNormsFormat();
- private final SegmentInfoFormat segmentInfoFormat = new Lucene50RWSegmentInfoFormat();
-
- @Override
- public NormsFormat normsFormat() {
- return normsFormat;
- }
-
- @Override
- public SegmentInfoFormat segmentInfoFormat() {
- return segmentInfoFormat;
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWNormsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWNormsFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWNormsFormat.java
deleted file mode 100644
index fcf07ef..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWNormsFormat.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.codecs.NormsConsumer;
-import org.apache.lucene.index.SegmentWriteState;
-
-/**
- * Read-write version of 5.0 norms format for testing
- * @deprecated for test purposes only
- */
-@Deprecated
-final class Lucene50RWNormsFormat extends Lucene50NormsFormat {
-
- @Override
- public NormsConsumer normsConsumer(SegmentWriteState state) throws IOException {
- return new Lucene50NormsConsumer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION);
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
deleted file mode 100644
index 9f174dd..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.asserting.AssertingCodec;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.SerialMergeScheduler;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.TestUtil;
-
-/**
- * Tests Lucene50DocValuesFormat
- */
-public class TestLucene50DocValuesFormat extends BaseCompressingDocValuesFormatTestCase {
- private final Codec codec = TestUtil.alwaysDocValuesFormat(new Lucene50DocValuesFormat());
-
- @Override
- protected Codec getCodec() {
- return codec;
- }
-
- // TODO: these big methods can easily blow up some of the other ram-hungry codecs...
- // for now just keep them here, as we want to test this for this format.
-
- @Slow
- public void testSortedSetVariableLengthBigVsStoredFields() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestSortedSetVsStoredFields(atLeast(300), 1, 32766, 16, 100);
- }
- }
-
- @Nightly
- public void testSortedSetVariableLengthManyVsStoredFields() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestSortedSetVsStoredFields(TestUtil.nextInt(random(), 1024, 2049), 1, 500, 16, 100);
- }
- }
-
- @Slow
- public void testSortedVariableLengthBigVsStoredFields() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestSortedVsStoredFields(atLeast(300), 1, 32766);
- }
- }
-
- @Nightly
- public void testSortedVariableLengthManyVsStoredFields() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestSortedVsStoredFields(TestUtil.nextInt(random(), 1024, 2049), 1, 500);
- }
- }
-
- @Slow
- public void testTermsEnumFixedWidth() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestTermsEnumRandom(TestUtil.nextInt(random(), 1025, 5121), 10, 10);
- }
- }
-
- @Slow
- public void testTermsEnumVariableWidth() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestTermsEnumRandom(TestUtil.nextInt(random(), 1025, 5121), 1, 500);
- }
- }
-
- @Nightly
- public void testTermsEnumRandomMany() throws Exception {
- int numIterations = atLeast(1);
- for (int i = 0; i < numIterations; i++) {
- doTestTermsEnumRandom(TestUtil.nextInt(random(), 1025, 8121), 1, 500);
- }
- }
-
- // TODO: try to refactor this and some termsenum tests into the base class.
- // to do this we need to fix the test class to get a DVF not a Codec so we can setup
- // the postings format correctly.
- private void doTestTermsEnumRandom(int numDocs, int minLength, int maxLength) throws Exception {
- Directory dir = newFSDirectory(createTempDir());
- IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
- conf.setMergeScheduler(new SerialMergeScheduler());
- // set to duel against a codec which has ordinals:
- final PostingsFormat pf = TestUtil.getPostingsFormatWithOrds(random());
- final DocValuesFormat dv = new Lucene50DocValuesFormat();
- conf.setCodec(new AssertingCodec() {
- @Override
- public PostingsFormat getPostingsFormatForField(String field) {
- return pf;
- }
-
- @Override
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return dv;
- }
- });
- RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-
- // index some docs
- for (int i = 0; i < numDocs; i++) {
- Document doc = new Document();
- Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
- doc.add(idField);
- final int length = TestUtil.nextInt(random(), minLength, maxLength);
- int numValues = random().nextInt(17);
- // create a random list of strings
- List<String> values = new ArrayList<>();
- for (int v = 0; v < numValues; v++) {
- values.add(TestUtil.randomSimpleString(random(), minLength, length));
- }
-
- // add in any order to the indexed field
- ArrayList<String> unordered = new ArrayList<>(values);
- Collections.shuffle(unordered, random());
- for (String v : values) {
- doc.add(newStringField("indexed", v, Field.Store.NO));
- }
-
- // add in any order to the dv field
- ArrayList<String> unordered2 = new ArrayList<>(values);
- Collections.shuffle(unordered2, random());
- for (String v : unordered2) {
- doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
- }
-
- writer.addDocument(doc);
- if (random().nextInt(31) == 0) {
- writer.commit();
- }
- }
-
- // delete some docs
- int numDeletions = random().nextInt(numDocs/10);
- for (int i = 0; i < numDeletions; i++) {
- int id = random().nextInt(numDocs);
- writer.deleteDocuments(new Term("id", Integer.toString(id)));
- }
-
- // compare per-segment
- DirectoryReader ir = writer.getReader();
- for (LeafReaderContext context : ir.leaves()) {
- LeafReader r = context.reader();
- Terms terms = r.terms("indexed");
- if (terms != null) {
- SortedSetDocValues ssdv = r.getSortedSetDocValues("dv");
- assertEquals(terms.size(), ssdv.getValueCount());
- TermsEnum expected = terms.iterator();
- TermsEnum actual = r.getSortedSetDocValues("dv").termsEnum();
- assertEquals(terms.size(), expected, actual);
-
- doTestSortedSetEnumAdvanceIndependently(ssdv);
- }
- }
- ir.close();
-
- writer.forceMerge(1);
-
- // now compare again after the merge
- ir = writer.getReader();
- LeafReader ar = getOnlyLeafReader(ir);
- Terms terms = ar.terms("indexed");
- if (terms != null) {
- assertEquals(terms.size(), ar.getSortedSetDocValues("dv").getValueCount());
- TermsEnum expected = terms.iterator();
- TermsEnum actual = ar.getSortedSetDocValues("dv").termsEnum();
- assertEquals(terms.size(), expected, actual);
- }
- ir.close();
-
- writer.close();
- dir.close();
- }
-
- private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
- BytesRef ref;
-
- // sequential next() through all terms
- while ((ref = expected.next()) != null) {
- assertEquals(ref, actual.next());
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
- assertNull(actual.next());
-
- // sequential seekExact(ord) through all terms
- for (long i = 0; i < numOrds; i++) {
- expected.seekExact(i);
- actual.seekExact(i);
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
-
- // sequential seekExact(BytesRef) through all terms
- for (long i = 0; i < numOrds; i++) {
- expected.seekExact(i);
- assertTrue(actual.seekExact(expected.term()));
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
-
- // sequential seekCeil(BytesRef) through all terms
- for (long i = 0; i < numOrds; i++) {
- expected.seekExact(i);
- assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
-
- // random seekExact(ord)
- for (long i = 0; i < numOrds; i++) {
- long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
- expected.seekExact(randomOrd);
- actual.seekExact(randomOrd);
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
-
- // random seekExact(BytesRef)
- for (long i = 0; i < numOrds; i++) {
- long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
- expected.seekExact(randomOrd);
- actual.seekExact(expected.term());
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
-
- // random seekCeil(BytesRef)
- for (long i = 0; i < numOrds; i++) {
- BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random()));
- SeekStatus expectedStatus = expected.seekCeil(target);
- assertEquals(expectedStatus, actual.seekCeil(target));
- if (expectedStatus != SeekStatus.END) {
- assertEquals(expected.ord(), actual.ord());
- assertEquals(expected.term(), actual.term());
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50NormsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50NormsFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50NormsFormat.java
deleted file mode 100644
index 5ea076d..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/TestLucene50NormsFormat.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.lucene50.Lucene50NormsConsumer.NormMap;
-import org.apache.lucene.index.BaseNormsFormatTestCase;
-import org.apache.lucene.util.TestUtil;
-
-/**
- * Tests Lucene50NormsFormat
- */
-public class TestLucene50NormsFormat extends BaseNormsFormatTestCase {
- private final Codec codec = new Lucene50RWCodec();
-
- @Override
- protected Codec getCodec() {
- return codec;
- }
-
- // NormMap is rather complicated, doing domain encoding / tracking frequencies etc.
- // test it directly some here...
-
- public void testNormMapSimple() {
- NormMap map = new NormMap();
- map.add((byte)4);
- map.add((byte) 10);
- map.add((byte) 5);
- map.add((byte)10);
- assertEquals(3, map.size);
-
- // first come, first serve ord assignment
- assertEquals(0, map.ord((byte) 4));
- assertEquals(1, map.ord((byte) 10));
- assertEquals(2, map.ord((byte) 5));
-
- assertEquals(4, map.values[0]);
- assertEquals(10, map.values[1]);
- assertEquals(5, map.values[2]);
-
- assertEquals(1, map.freqs[0]);
- assertEquals(2, map.freqs[1]);
- assertEquals(1, map.freqs[2]);
-
- // optimizing reorders the ordinals
- map.optimizeOrdinals();
- assertEquals(0, map.ord((byte)10));
- assertEquals(1, map.ord((byte)4));
- assertEquals(2, map.ord((byte)5));
-
- assertEquals(10, map.values[0]);
- assertEquals(4, map.values[1]);
- assertEquals(5, map.values[2]);
-
- assertEquals(2, map.freqs[0]);
- assertEquals(1, map.freqs[1]);
- assertEquals(1, map.freqs[2]);
- }
-
- public void testNormMapRandom() {
-
- Set<Byte> uniqueValuesSet = new HashSet<>();
- int numUniqValues = TestUtil.nextInt(random(), 1, 256);
- for (int i = 0; i < numUniqValues; i++) {
- uniqueValuesSet.add(Byte.valueOf((byte)TestUtil.nextInt(random(), Byte.MIN_VALUE, Byte.MAX_VALUE)));
- }
- Byte uniqueValues[] = uniqueValuesSet.toArray(new Byte[uniqueValuesSet.size()]);
-
- Map<Byte,Integer> freqs = new HashMap<>();
- NormMap map = new NormMap();
- int numdocs = TestUtil.nextInt(random(), 1, 100000);
- for (int i = 0; i < numdocs; i++) {
- byte value = uniqueValues[random().nextInt(uniqueValues.length)];
- // now add to both expected and actual
- map.add(value);
- if (freqs.containsKey(value)) {
- freqs.put(value, freqs.get(value) + 1);
- } else {
- freqs.put(value, 1);
- }
- }
-
- assertEquals(freqs.size(), map.size);
- for (Map.Entry<Byte,Integer> kv : freqs.entrySet()) {
- byte value = kv.getKey();
- int freq = kv.getValue();
- int ord = map.ord(value);
- assertEquals(freq, map.freqs[ord]);
- assertEquals(value, map.values[ord]);
- }
-
- // optimizing should reorder ordinals from greatest to least frequency
- map.optimizeOrdinals();
- // recheck consistency
- assertEquals(freqs.size(), map.size);
- for (Map.Entry<Byte,Integer> kv : freqs.entrySet()) {
- byte value = kv.getKey();
- int freq = kv.getValue();
- int ord = map.ord(value);
- assertEquals(freq, map.freqs[ord]);
- assertEquals(value, map.values[ord]);
- }
- // also check descending freq
- int prevFreq = map.freqs[0];
- for (int i = 1; i < map.size; ++i) {
- assertTrue(prevFreq >= map.freqs[i]);
- prevFreq = map.freqs[i];
- }
- }
-}
[33/50] [abbrv] lucene-solr:apiv2: SOLR-9452: JsonRecordReader should
not deep copy document before handler.handle()
Posted by no...@apache.org.
SOLR-9452: JsonRecordReader should not deep copy document before handler.handle()
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f0f92d87
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f0f92d87
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f0f92d87
Branch: refs/heads/apiv2
Commit: f0f92d875ecab8a9acc01e959b852faf99a41e8e
Parents: 26262f4
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Aug 31 18:12:02 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Aug 31 18:12:02 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../solr/common/util/JsonRecordReader.java | 25 ++++----------------
2 files changed, 6 insertions(+), 21 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f0f92d87/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f33138d..cc90e6e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -103,6 +103,8 @@ Optimizations
* SOLR-9447: Do not clone SolrInputDocument if update processor chain does not contain custom processors.
(shalin)
+* SOLR-9452: JsonRecordReader should not deep copy document before handler.handle(). (noble, shalin)
+
Other Changes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f0f92d87/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java b/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
index 2025197..782c25d 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
@@ -119,9 +119,8 @@ public class JsonRecordReader {
*/
public List<Map<String, Object>> getAllRecords(Reader r) throws IOException {
final List<Map<String, Object>> results = new ArrayList<>();
- streamRecords(r, (record, path) -> {
- results.add(record);
- });
+ // Deep copy is required here because the stream might hold on to the map
+ streamRecords(r, (record, path) -> results.add(Utils.getDeepCopy(record, 2)));
return results;
}
@@ -279,23 +278,6 @@ public class JsonRecordReader {
return n;
}
- /**
- * Copies a supplied Map to a new Map which is returned. Used to copy a
- * records values. If a fields value is a List then they have to be
- * deep-copied for thread safety
- */
- private static Map<String, Object> getDeepCopy(Map<String, Object> values) {
- Map<String, Object> result = new LinkedHashMap<>();
- for (Map.Entry<String, Object> entry : values.entrySet()) {
- if (entry.getValue() instanceof List) {
- result.put(entry.getKey(), new ArrayList((List) entry.getValue()));
- } else {
- result.put(entry.getKey(), entry.getValue());
- }
- }
- return result;
- }
-
private void parse(JSONParser parser,
Handler handler,
Map<String, Object> values) throws IOException {
@@ -394,7 +376,7 @@ public class JsonRecordReader {
int event = parser.nextEvent();
if (event == OBJECT_END) {
if (isRecord()) {
- handler.handle(getDeepCopy(values), splitPath);
+ handler.handle(values, splitPath);
}
return;
}
@@ -456,6 +438,7 @@ public class JsonRecordReader {
}
private void addChildDoc2ParentDoc(Map<String, Object> record, Map<String, Object> values) {
+ record = Utils.getDeepCopy(record, 2);
Object oldVal = values.get(null);
if (oldVal == null) {
values.put(null, record);
[47/50] [abbrv] lucene-solr:apiv2: Refactor out the serialization &
deserialization
Posted by no...@apache.org.
Refactor out the serialization & deserialization
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6ca9aeb5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6ca9aeb5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6ca9aeb5
Branch: refs/heads/apiv2
Commit: 6ca9aeb510fb4b1fb06de04c29f2e71d66981efa
Parents: 757c245
Author: Noble Paul <no...@apache.org>
Authored: Tue Sep 6 11:39:04 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Tue Sep 6 11:39:04 2016 +0530
----------------------------------------------------------------------
.../solr/common/util/TestJavaBinCodec.java | 104 ++++++++++---------
1 file changed, 56 insertions(+), 48 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6ca9aeb5/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
index cb6f9ed..96ddc8b 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
@@ -175,33 +175,36 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
InputStream is = getClass().getResourceAsStream(SOLRJ_JAVABIN_BACKCOMPAT_BIN);
List<Object> unmarshaledObj = (List<Object>) javabin.unmarshal(is);
List<Object> matchObj = generateAllDataTypes();
-
- assertEquals(unmarshaledObj.size(), matchObj.size());
- for(int i=0; i < unmarshaledObj.size(); i++) {
-
- if(unmarshaledObj.get(i) instanceof byte[] && matchObj.get(i) instanceof byte[]) {
- byte[] b1 = (byte[]) unmarshaledObj.get(i);
- byte[] b2 = (byte[]) matchObj.get(i);
- assertTrue(Arrays.equals(b1, b2));
- } else if(unmarshaledObj.get(i) instanceof SolrDocument && matchObj.get(i) instanceof SolrDocument ) {
- assertTrue(compareSolrDocument(unmarshaledObj.get(i), matchObj.get(i)));
- } else if(unmarshaledObj.get(i) instanceof SolrDocumentList && matchObj.get(i) instanceof SolrDocumentList ) {
- assertTrue(compareSolrDocumentList(unmarshaledObj.get(i), matchObj.get(i)));
- } else if(unmarshaledObj.get(i) instanceof SolrInputDocument && matchObj.get(i) instanceof SolrInputDocument) {
- assertTrue(compareSolrInputDocument(unmarshaledObj.get(i), matchObj.get(i)));
- } else if(unmarshaledObj.get(i) instanceof SolrInputField && matchObj.get(i) instanceof SolrInputField) {
- assertTrue(assertSolrInputFieldEquals(unmarshaledObj.get(i), matchObj.get(i)));
- } else {
- assertEquals(unmarshaledObj.get(i), matchObj.get(i));
- }
-
- }
+ compareObjects(unmarshaledObj, matchObj);
} catch (IOException e) {
throw e;
}
}
+ private void compareObjects(List unmarshaledObj, List matchObj) {
+ assertEquals(unmarshaledObj.size(), matchObj.size());
+ for (int i = 0; i < unmarshaledObj.size(); i++) {
+
+ if (unmarshaledObj.get(i) instanceof byte[] && matchObj.get(i) instanceof byte[]) {
+ byte[] b1 = (byte[]) unmarshaledObj.get(i);
+ byte[] b2 = (byte[]) matchObj.get(i);
+ assertTrue(Arrays.equals(b1, b2));
+ } else if (unmarshaledObj.get(i) instanceof SolrDocument && matchObj.get(i) instanceof SolrDocument) {
+ assertTrue(compareSolrDocument(unmarshaledObj.get(i), matchObj.get(i)));
+ } else if (unmarshaledObj.get(i) instanceof SolrDocumentList && matchObj.get(i) instanceof SolrDocumentList) {
+ assertTrue(compareSolrDocumentList(unmarshaledObj.get(i), matchObj.get(i)));
+ } else if (unmarshaledObj.get(i) instanceof SolrInputDocument && matchObj.get(i) instanceof SolrInputDocument) {
+ assertTrue(compareSolrInputDocument(unmarshaledObj.get(i), matchObj.get(i)));
+ } else if (unmarshaledObj.get(i) instanceof SolrInputField && matchObj.get(i) instanceof SolrInputField) {
+ assertTrue(assertSolrInputFieldEquals(unmarshaledObj.get(i), matchObj.get(i)));
+ } else {
+ assertEquals(unmarshaledObj.get(i), matchObj.get(i));
+ }
+
+ }
+ }
+
@Test
public void testBackCompatForSolrDocumentWithChildDocs() throws IOException {
JavaBinCodec javabin = new JavaBinCodec(){
@@ -267,14 +270,33 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
}
@Test
- public void testResponseChildDocuments() throws IOException {
+ public void testAllTypes() throws IOException {
+ List<Object> obj = generateAllDataTypes();
+ compareObjects(
+ (List) getObject(getBytes(obj)),
+ (List) obj
+ );
+ }
+ private static Object serializeAndDeserialize(Object o) throws IOException {
+ return getObject(getBytes(o));
+ }
+ private static byte[] getBytes(Object o) throws IOException {
JavaBinCodec javabin = new JavaBinCodec();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
- javabin.marshal(generateSolrDocumentWithChildDocs(), baos);
+ javabin.marshal(o, baos);
+ return baos.toByteArray();
+ }
+
+ private static Object getObject(byte[] bytes) throws IOException {
+ return new JavaBinCodec().unmarshal(new ByteArrayInputStream(bytes));
+ }
- SolrDocument result = (SolrDocument) javabin.unmarshal(new ByteArrayInputStream(baos.toByteArray()));
+
+ @Test
+ public void testResponseChildDocuments() throws IOException {
+ SolrDocument result = (SolrDocument) serializeAndDeserialize(generateSolrDocumentWithChildDocs());
assertEquals(2, result.size());
assertEquals("1", result.getFieldValue("id"));
assertEquals("parentDocument", result.getFieldValue("subject"));
@@ -305,13 +327,11 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
@Test
public void testStringCaching() throws Exception {
Map<String, Object> m = Utils.makeMap("key1", "val1", "key2", "val2");
+ byte[] b1 = getBytes(m);//copy 1
+ byte[] b2 = getBytes(m);//copy 2
+ Map m1 = (Map) getObject(b1);
+ Map m2 = (Map) getObject(b1);
- ByteArrayOutputStream os1 = new ByteArrayOutputStream();
- new JavaBinCodec().marshal(m, os1);
- Map m1 = (Map) new JavaBinCodec().unmarshal(new ByteArrayInputStream(os1.toByteArray()));
- ByteArrayOutputStream os2 = new ByteArrayOutputStream();
- new JavaBinCodec().marshal(m, os2);
- Map m2 = (Map) new JavaBinCodec().unmarshal(new ByteArrayInputStream(os2.toByteArray()));
List l1 = new ArrayList<>(m1.keySet());
List l2 = new ArrayList<>(m2.keySet());
@@ -346,8 +366,8 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
});
- m1 = (Map) new JavaBinCodec(null, stringCache).unmarshal(new ByteArrayInputStream(os1.toByteArray()));
- m2 = (Map) new JavaBinCodec(null, stringCache).unmarshal(new ByteArrayInputStream(os2.toByteArray()));
+ m1 = (Map) new JavaBinCodec(null, stringCache).unmarshal(new ByteArrayInputStream(b1));
+ m2 = (Map) new JavaBinCodec(null, stringCache).unmarshal(new ByteArrayInputStream(b2));
l1 = new ArrayList<>(m1.keySet());
l2 = new ArrayList<>(m2.keySet());
assertTrue(l1.get(0).equals(l2.get(0)));
@@ -359,26 +379,19 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
}
public void genBinaryFiles() throws IOException {
- JavaBinCodec javabin = new JavaBinCodec();
- ByteArrayOutputStream os = new ByteArrayOutputStream();
-
+
Object data = generateAllDataTypes();
-
- javabin.marshal(data, os);
- byte[] out = os.toByteArray();
+ byte[] out = getBytes(data);
FileOutputStream fs = new FileOutputStream(new File(BIN_FILE_LOCATION));
BufferedOutputStream bos = new BufferedOutputStream(fs);
bos.write(out);
bos.close();
//Binary file with child documents
- javabin = new JavaBinCodec();
SolrDocument sdoc = generateSolrDocumentWithChildDocs();
- os = new ByteArrayOutputStream();
- javabin.marshal(sdoc, os);
fs = new FileOutputStream(new File(BIN_FILE_LOCATION_CHILD_DOCS));
bos = new BufferedOutputStream(fs);
- bos.write(os.toByteArray());
+ bos.write(getBytes(sdoc));
bos.close();
}
@@ -553,12 +566,7 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
sdoc.put("some_boolean", ""+r.nextBoolean());
sdoc.put("another_boolean", ""+r.nextBoolean());
-
- JavaBinCodec javabin = new JavaBinCodec();
- ByteArrayOutputStream os = new ByteArrayOutputStream();
- javabin.marshal(sdoc, os);
- os.toByteArray();
- buffers[bufnum] = os.toByteArray();
+ buffers[bufnum] = getBytes(sdoc);
}
int ret = 0;
[24/50] [abbrv] lucene-solr:apiv2: SOLR-9445: Fix failures in
TestLocalFSCloudBackupRestore due to changed code path which return
SolrExceptions instead of SolrServerExceptions
Posted by no...@apache.org.
SOLR-9445: Fix failures in TestLocalFSCloudBackupRestore due to changed code path which return SolrExceptions instead of SolrServerExceptions
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/df9a642a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/df9a642a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/df9a642a
Branch: refs/heads/apiv2
Commit: df9a642a8918ea626579a590f573f7205a081cd7
Parents: 44c30f0
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sun Aug 28 00:38:53 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Sun Aug 28 00:39:16 2016 +0530
----------------------------------------------------------------------
.../solr/cloud/AbstractCloudBackupRestoreTestCase.java | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/df9a642a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
index fd74eaf..8e7a4b0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
@@ -33,6 +33,7 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.DocCollection;
@@ -147,9 +148,8 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
try {
backup.process(solrClient);
fail("This request should have failed since the cluster property value for backup location property is invalid.");
- } catch (SolrServerException ex) {
- assertTrue(ex.getCause() instanceof RemoteSolrException);
- assertEquals(ErrorCode.SERVER_ERROR.code, ((RemoteSolrException)ex.getCause()).code());
+ } catch (SolrException ex) {
+ assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
}
String restoreCollectionName = collectionName + "_invalidrequest";
@@ -158,9 +158,8 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
try {
restore.process(solrClient);
fail("This request should have failed since the cluster property value for backup location property is invalid.");
- } catch (SolrServerException ex) {
- assertTrue(ex.getCause() instanceof RemoteSolrException);
- assertEquals(ErrorCode.SERVER_ERROR.code, ((RemoteSolrException)ex.getCause()).code());
+ } catch (SolrException ex) {
+ assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
}
}
[30/50] [abbrv] lucene-solr:apiv2: SOLR-9455: Deleting a sub-shard in
recovery state can mark parent shard as inactive
Posted by no...@apache.org.
SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2700b952
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2700b952
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2700b952
Branch: refs/heads/apiv2
Commit: 2700b952119feb2d53a163d3374f56c85a0de339
Parents: 757c245
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Aug 30 00:01:17 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Aug 30 00:01:17 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/DeleteShardCmd.java | 14 ++++++++++++++
2 files changed, 16 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2700b952/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 70c9f1e..5f8694f 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -89,6 +89,8 @@ Bug Fixes
* SOLR-9188: blockUnknown property makes inter-node communication impossible (noble)
+* SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive. (shalin)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2700b952/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
index f2ae5ca..a7f6d5b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
@@ -23,6 +23,7 @@ import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
+import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@@ -73,6 +74,19 @@ public class DeleteShardCmd implements Cmd {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
+ ". Only non-active (or custom-hashed) slices can be deleted.");
}
+
+ if (state == Slice.State.RECOVERY) {
+ // mark the slice as 'construction' and only then try to delete the cores
+ // see SOLR-9455
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+ propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
+ propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ inQueue.offer(Utils.toJSON(m));
+ }
+
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
String asyncId = message.getStr(ASYNC);
[42/50] [abbrv] lucene-solr:apiv2: LUCENE-7433: remove unused
LogMergePolicy.SegmentInfoAndLevel.index private member
Posted by no...@apache.org.
LUCENE-7433: remove unused LogMergePolicy.SegmentInfoAndLevel.index private member
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/abd4cfb6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/abd4cfb6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/abd4cfb6
Branch: refs/heads/apiv2
Commit: abd4cfb69433e019e7ea111a1a406c66417fea34
Parents: c56d832
Author: Christine Poerschke <cp...@apache.org>
Authored: Fri Sep 2 16:13:30 2016 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Fri Sep 2 16:13:30 2016 +0100
----------------------------------------------------------------------
.../core/src/java/org/apache/lucene/index/LogMergePolicy.java | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/abd4cfb6/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
index 11869dc..4d0ce51 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
@@ -429,12 +429,10 @@ public abstract class LogMergePolicy extends MergePolicy {
private static class SegmentInfoAndLevel implements Comparable<SegmentInfoAndLevel> {
SegmentCommitInfo info;
float level;
- int index;
- public SegmentInfoAndLevel(SegmentCommitInfo info, float level, int index) {
+ public SegmentInfoAndLevel(SegmentCommitInfo info, float level) {
this.info = info;
this.level = level;
- this.index = index;
}
// Sorts largest to smallest
@@ -475,7 +473,7 @@ public abstract class LogMergePolicy extends MergePolicy {
size = 1;
}
- final SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float) Math.log(size)/norm, i);
+ final SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float) Math.log(size)/norm);
levels.add(infoLevel);
if (verbose(writer)) {
[25/50] [abbrv] lucene-solr:apiv2: SOLR-9445: Removed unused import
Posted by no...@apache.org.
SOLR-9445: Removed unused import
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4f316bc1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4f316bc1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4f316bc1
Branch: refs/heads/apiv2
Commit: 4f316bc199359188d4fcad534f1f49cb260bd877
Parents: df9a642
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sun Aug 28 00:39:46 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Sun Aug 28 00:39:46 2016 +0530
----------------------------------------------------------------------
.../org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f316bc1/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
index 8e7a4b0..c7b2745 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
@@ -29,7 +29,6 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
import org.apache.solr.client.solrj.response.RequestStatusState;
[10/50] [abbrv] lucene-solr:apiv2: LUCENE-7424: GeoPolygon
computation of intersection bounds was incorrect.
Posted by no...@apache.org.
LUCENE-7424: GeoPolygon computation of intersection bounds was incorrect.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/884aa160
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/884aa160
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/884aa160
Branch: refs/heads/apiv2
Commit: 884aa1609a72e273307a93a6a07f181eab25faad
Parents: e325973
Author: Karl Wright <Da...@gmail.com>
Authored: Thu Aug 25 18:09:50 2016 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Thu Aug 25 18:09:50 2016 -0400
----------------------------------------------------------------------
.../spatial3d/geom/GeoConcavePolygon.java | 51 ++++++++------------
.../lucene/spatial3d/geom/GeoConvexPolygon.java | 47 ++++++++----------
.../lucene/spatial3d/geom/GeoPolygonTest.java | 39 +++++++++++++++
3 files changed, 79 insertions(+), 58 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/884aa160/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java
index 1abc06c..e2a4c1e 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java
@@ -50,10 +50,10 @@ class GeoConcavePolygon extends GeoBasePolygon {
protected boolean isDone = false;
/** A bounds object for each sided plane */
protected Map<SidedPlane, Membership> eitherBounds = null;
- /** Edge plane for one side of intersection */
- protected Map<SidedPlane, Plane> edgePlanes = null;
- /** Intersection bounds */
- protected Map<SidedPlane, Membership> intersectionBounds = null;
+ /** Map from edge to its previous non-coplanar brother */
+ protected Map<SidedPlane, SidedPlane> prevBrotherMap = null;
+ /** Map from edge to its next non-coplanar brother */
+ protected Map<SidedPlane, SidedPlane> nextBrotherMap = null;
/**
* Create a concave polygon from a list of points. The first point must be on the
@@ -214,8 +214,8 @@ class GeoConcavePolygon extends GeoBasePolygon {
// For each edge, create a bounds object.
eitherBounds = new HashMap<>(edges.length);
- intersectionBounds = new HashMap<>(edges.length);
- edgePlanes = new HashMap<>(edges.length);
+ prevBrotherMap = new HashMap<>(edges.length);
+ nextBrotherMap = new HashMap<>(edges.length);
for (int edgeIndex = 0; edgeIndex < edges.length; edgeIndex++) {
final SidedPlane edge = edges[edgeIndex];
final SidedPlane invertedEdge = invertedEdges[edgeIndex];
@@ -224,16 +224,6 @@ class GeoConcavePolygon extends GeoBasePolygon {
bound1Index++;
}
int bound2Index = legalIndex(edgeIndex-1);
- int otherIndex = bound2Index;
- final SidedPlane otherEdge;
- final SidedPlane otherInvertedEdge;
- if (invertedEdges[legalIndex(otherIndex)].isNumericallyIdentical(invertedEdge)) {
- otherInvertedEdge = null;
- otherEdge = null;
- } else {
- otherInvertedEdge = invertedEdges[legalIndex(otherIndex)];
- otherEdge = edges[legalIndex(otherIndex)];
- }
while (invertedEdges[legalIndex(bound2Index)].isNumericallyIdentical(invertedEdge)) {
bound2Index--;
}
@@ -252,15 +242,10 @@ class GeoConcavePolygon extends GeoBasePolygon {
}
}
eitherBounds.put(edge, new EitherBound(invertedEdges[bound1Index], invertedEdges[bound2Index]));
- // For intersections, we look at the point at the intersection between the previous edge and this one. We need to locate the
- // Intersection bounds needs to look even further forwards/backwards
- if (otherInvertedEdge != null) {
- while (invertedEdges[legalIndex(otherIndex)].isNumericallyIdentical(otherInvertedEdge)) {
- otherIndex--;
- }
- intersectionBounds.put(edge, new EitherBound(invertedEdges[legalIndex(otherIndex)], invertedEdges[legalIndex(bound2Index)]));
- edgePlanes.put(edge, otherEdge);
- }
+ // When we are done with this cycle, we'll need to build the intersection bound for each edge and its brother.
+ // For now, keep track of the relationships.
+ nextBrotherMap.put(invertedEdge, invertedEdges[bound1Index]);
+ prevBrotherMap.put(invertedEdge, invertedEdges[bound2Index]);
}
// Pick an edge point arbitrarily from the outer polygon. Glom this together with all edge points from
@@ -383,7 +368,7 @@ class GeoConcavePolygon extends GeoBasePolygon {
/** A membership implementation representing polygon edges that must apply.
*/
- protected class EitherBound implements Membership {
+ protected static class EitherBound implements Membership {
protected final SidedPlane sideBound1;
protected final SidedPlane sideBound2;
@@ -406,6 +391,12 @@ class GeoConcavePolygon extends GeoBasePolygon {
public boolean isWithin(final double x, final double y, final double z) {
return sideBound1.isWithin(x,y,z) && sideBound2.isWithin(x,y,z);
}
+
+ @Override
+ public String toString() {
+ return "(" + sideBound1 + "," + sideBound2 + ")";
+ }
+
}
@Override
@@ -442,10 +433,10 @@ class GeoConcavePolygon extends GeoBasePolygon {
// Add planes with membership.
for (final SidedPlane edge : edges) {
bounds.addPlane(planetModel, edge, eitherBounds.get(edge));
- final Membership m = intersectionBounds.get(edge);
- if (m != null) {
- bounds.addIntersection(planetModel, edgePlanes.get(edge), edge, m);
- }
+ }
+ for (final SidedPlane invertedEdge : invertedEdges) {
+ final SidedPlane nextEdge = nextBrotherMap.get(invertedEdge);
+ bounds.addIntersection(planetModel, invertedEdge, nextEdge, prevBrotherMap.get(invertedEdge), nextBrotherMap.get(nextEdge));
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/884aa160/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java
index dbf8f9f..6bd0aad 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java
@@ -48,10 +48,10 @@ class GeoConvexPolygon extends GeoBasePolygon {
protected boolean isDone = false;
/** A bounds object for each sided plane */
protected Map<SidedPlane, Membership> eitherBounds = null;
- /** Edge plane for one side of intersection */
- protected Map<SidedPlane, Plane> edgePlanes = null;
- /** Intersection bounds */
- protected Map<SidedPlane, Membership> intersectionBounds = null;
+ /** Map from edge to its previous non-coplanar brother */
+ protected Map<SidedPlane, SidedPlane> prevBrotherMap = null;
+ /** Map from edge to its next non-coplanar brother */
+ protected Map<SidedPlane, SidedPlane> nextBrotherMap = null;
/**
* Create a convex polygon from a list of points. The first point must be on the
@@ -210,8 +210,8 @@ class GeoConvexPolygon extends GeoBasePolygon {
// For each edge, create a bounds object.
eitherBounds = new HashMap<>(edges.length);
- intersectionBounds = new HashMap<>(edges.length);
- edgePlanes = new HashMap<>(edges.length);
+ prevBrotherMap = new HashMap<>(edges.length);
+ nextBrotherMap = new HashMap<>(edges.length);
for (int edgeIndex = 0; edgeIndex < edges.length; edgeIndex++) {
final SidedPlane edge = edges[edgeIndex];
int bound1Index = legalIndex(edgeIndex+1);
@@ -219,13 +219,6 @@ class GeoConvexPolygon extends GeoBasePolygon {
bound1Index++;
}
int bound2Index = legalIndex(edgeIndex-1);
- int otherIndex = bound2Index;
- final SidedPlane otherEdge;
- if (edges[legalIndex(otherIndex)].isNumericallyIdentical(edge)) {
- otherEdge = null;
- } else {
- otherEdge = edges[legalIndex(otherIndex)];
- }
// Look for bound2
while (edges[legalIndex(bound2Index)].isNumericallyIdentical(edge)) {
bound2Index--;
@@ -245,17 +238,12 @@ class GeoConvexPolygon extends GeoBasePolygon {
}
}
eitherBounds.put(edge, new EitherBound(edges[bound1Index], edges[bound2Index]));
- // For intersections, we look at the point at the intersection between the previous edge and this one. We need to locate the
- // Intersection bounds needs to look even further forwards/backwards
- if (otherEdge != null) {
- while (edges[legalIndex(otherIndex)].isNumericallyIdentical(otherEdge)) {
- otherIndex--;
- }
- intersectionBounds.put(edge, new EitherBound(edges[legalIndex(otherIndex)], edges[legalIndex(bound2Index)]));
- edgePlanes.put(edge, otherEdge);
- }
+ // When we are done with this cycle, we'll need to build the intersection bound for each edge and its brother.
+ // For now, keep track of the relationships.
+ nextBrotherMap.put(edge, edges[bound1Index]);
+ prevBrotherMap.put(edge, edges[bound2Index]);
}
-
+
// Pick an edge point arbitrarily from the outer polygon. Glom this together with all edge points from
// inner polygons.
int edgePointCount = 1;
@@ -370,7 +358,7 @@ class GeoConvexPolygon extends GeoBasePolygon {
/** A membership implementation representing polygon edges that must apply.
*/
- protected class EitherBound implements Membership {
+ protected static class EitherBound implements Membership {
protected final SidedPlane sideBound1;
protected final SidedPlane sideBound2;
@@ -393,6 +381,11 @@ class GeoConvexPolygon extends GeoBasePolygon {
public boolean isWithin(final double x, final double y, final double z) {
return sideBound1.isWithin(x,y,z) && sideBound2.isWithin(x,y,z);
}
+
+ @Override
+ public String toString() {
+ return "(" + sideBound1 + "," + sideBound2 + ")";
+ }
}
@@ -428,10 +421,8 @@ class GeoConvexPolygon extends GeoBasePolygon {
// Add planes with membership.
for (final SidedPlane edge : edges) {
bounds.addPlane(planetModel, edge, eitherBounds.get(edge));
- final Membership m = intersectionBounds.get(edge);
- if (m != null) {
- bounds.addIntersection(planetModel, edgePlanes.get(edge), edge, m);
- }
+ final SidedPlane nextEdge = nextBrotherMap.get(edge);
+ bounds.addIntersection(planetModel, edge, nextEdge, prevBrotherMap.get(edge), nextBrotherMap.get(nextEdge));
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/884aa160/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index 6745060..8527e99 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -927,5 +927,44 @@ shape:
assertTrue(!result);
}
+
+ @Test
+ public void testPolygonFailureCase2() {
+ /*
+ [junit4] 1> shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[
+ [lat=1.079437865394857, lon=-1.720224083538152E-11([X=0.47111944719262044, Y=-8.104310192839264E-12, Z=0.8803759987367299])],
+ [lat=-1.5707963267948966, lon=0.017453291479645996([X=6.108601474971234E-17, Y=1.066260290095308E-18, Z=-0.997762292022105])],
+ [lat=0.017453291479645996, lon=2.4457272005608357E-47([X=1.0009653513901666, Y=2.448088186713865E-47, Z=0.01747191415779267])]], internalEdges={2}},
+ GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[
+ [lat=1.079437865394857, lon=-1.720224083538152E-11([X=0.47111944719262044, Y=-8.104310192839264E-12, Z=0.8803759987367299])],
+ [lat=0.017453291479645996, lon=2.4457272005608357E-47([X=1.0009653513901666, Y=2.448088186713865E-47, Z=0.01747191415779267])],
+ [lat=0.0884233366943164, lon=0.4323234231678824([X=0.9054355304510789, Y=0.4178006803188124, Z=0.08840463683725623])]], internalEdges={0}}]}
+ */
+ final List<GeoPoint> poly1List = new ArrayList<>();
+ poly1List.add(new GeoPoint(PlanetModel.WGS84, 1.079437865394857, -1.720224083538152E-11));
+ poly1List.add(new GeoPoint(PlanetModel.WGS84, -1.5707963267948966, 0.017453291479645996));
+ poly1List.add(new GeoPoint(PlanetModel.WGS84, 0.017453291479645996, 2.4457272005608357E-47));
+
+ final GeoConvexPolygon poly1 = new GeoConvexPolygon(PlanetModel.WGS84, poly1List);
+
+ /*
+ [junit4] 1> unquantized=[lat=-1.5316724989005415, lon=3.141592653589793([X=-0.03902652216795768, Y=4.779370545484258E-18, Z=-0.9970038705813589])]
+ [junit4] 1> quantized=[X=-0.03902652216283731, Y=2.3309121299774915E-10, Z=-0.9970038706538652]
+ */
+
+ final GeoPoint point = new GeoPoint(PlanetModel.WGS84, -1.5316724989005415, 3.141592653589793);
+
+ assertTrue(poly1.isWithin(point));
+
+ final XYZBounds actualBounds1 = new XYZBounds();
+ poly1.getBounds(actualBounds1);
+
+ final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84,
+ actualBounds1.getMinimumX(), actualBounds1.getMaximumX(),
+ actualBounds1.getMinimumY(), actualBounds1.getMaximumY(),
+ actualBounds1.getMinimumZ(), actualBounds1.getMaximumZ());
+
+ assertTrue(solid.isWithin(point));
+ }
}
[03/50] [abbrv] lucene-solr:apiv2: LUCENE-7422: fix bugs in test's
retry loop
Posted by no...@apache.org.
LUCENE-7422: fix bugs in test's retry loop
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/18306628
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/18306628
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/18306628
Branch: refs/heads/apiv2
Commit: 18306628a9658fe6b9fcb1e409e63a5876257e84
Parents: 9811802
Author: Robert Muir <rm...@apache.org>
Authored: Thu Aug 25 08:20:53 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Thu Aug 25 08:20:53 2016 -0400
----------------------------------------------------------------------
.../org/apache/lucene/index/TestAllFilesCheckIndexHeader.java | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/18306628/lucene/core/src/test/org/apache/lucene/index/TestAllFilesCheckIndexHeader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesCheckIndexHeader.java b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesCheckIndexHeader.java
index dfb2f1b..b4c6368 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesCheckIndexHeader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesCheckIndexHeader.java
@@ -28,6 +28,8 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
import org.apache.lucene.util.LuceneTestCase;
@@ -84,7 +86,8 @@ public class TestAllFilesCheckIndexHeader extends LuceneTestCase {
}
private void checkOneFile(Directory dir, String victim) throws IOException {
- try (BaseDirectoryWrapper dirCopy = newDirectory()) {
+ // use ramdir explicit, as we do evil things like try to generate broken files, deletes must work.
+ try (BaseDirectoryWrapper dirCopy = new MockDirectoryWrapper(random(), new RAMDirectory())) {
dirCopy.setCheckIndexOnClose(false);
long victimLength = dir.fileLength(victim);
@@ -117,6 +120,8 @@ public class TestAllFilesCheckIndexHeader extends LuceneTestCase {
break;
}
}
+ // we have to try again, delete the first attempt and retry the loop
+ dirCopy.deleteFile(name);
}
}
dirCopy.sync(Collections.singleton(name));
[32/50] [abbrv] lucene-solr:apiv2: SOLR-9447: Do not clone
SolrInputDocument if update processor chain does not contain custom
processors.
Posted by no...@apache.org.
SOLR-9447: Do not clone SolrInputDocument if update processor chain does not contain custom processors.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/26262f40
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/26262f40
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/26262f40
Branch: refs/heads/apiv2
Commit: 26262f4074d43c3167efcfb83452daf081b0d9b9
Parents: 02b97a2
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Aug 31 01:19:42 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Aug 31 01:19:42 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +++
.../processor/DistributedUpdateProcessor.java | 22 ++++++++++++++++++--
2 files changed, 23 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/26262f40/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 15071db..f33138d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -100,6 +100,9 @@ Optimizations
* SOLR-9449: Example schemas do not index _version_ field anymore because the field
has DocValues enabled already. (shalin)
+* SOLR-9447: Do not clone SolrInputDocument if update processor chain does not contain custom processors.
+ (shalin)
+
Other Changes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/26262f40/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index f19352d..b8bdd16 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -266,6 +266,11 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
//used for keeping track of replicas that have processed an add/update from the leader
private RequestReplicationTracker replicationTracker = null;
+ // should we clone the document before sending it to replicas?
+ // this is set to true in the constructor if the next processors in the chain
+ // are custom and may modify the SolrInputDocument racing with its serialization for replication
+ private final boolean cloneRequiredOnLeader;
+
public DistributedUpdateProcessor(SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) {
this(req, rsp, new AtomicUpdateDocumentMerger(req), next);
}
@@ -314,6 +319,19 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
collection = null;
}
+ boolean shouldClone = false;
+ UpdateRequestProcessor nextInChain = next;
+ while (nextInChain != null) {
+ Class<? extends UpdateRequestProcessor> klass = nextInChain.getClass();
+ if (klass != LogUpdateProcessorFactory.LogUpdateProcessor.class
+ && klass != RunUpdateProcessor.class
+ && klass != TolerantUpdateProcessor.class) {
+ shouldClone = true;
+ break;
+ }
+ nextInChain = nextInChain.next;
+ }
+ cloneRequiredOnLeader = shouldClone;
}
private List<Node> setupRequest(String id, SolrInputDocument doc) {
@@ -1086,14 +1104,14 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
boolean willDistrib = isLeader && nodes != null && nodes.size() > 0;
SolrInputDocument clonedDoc = null;
- if (willDistrib) {
+ if (willDistrib && cloneRequiredOnLeader) {
clonedDoc = cmd.solrDoc.deepCopy();
}
// TODO: possibly set checkDeleteByQueries as a flag on the command?
doLocalAdd(cmd);
- if (willDistrib) {
+ if (willDistrib && cloneRequiredOnLeader) {
cmd.solrDoc = clonedDoc;
}
[34/50] [abbrv] lucene-solr:apiv2: SOLR-9452: Altered javadocs to
reflect the new behavior
Posted by no...@apache.org.
SOLR-9452: Altered javadocs to reflect the new behavior
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/23825b24
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/23825b24
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/23825b24
Branch: refs/heads/apiv2
Commit: 23825b248039c907d7eccc9b9fe381f836076539
Parents: f0f92d8
Author: Noble Paul <no...@apache.org>
Authored: Wed Aug 31 18:47:43 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Wed Aug 31 18:47:43 2016 +0530
----------------------------------------------------------------------
.../src/java/org/apache/solr/common/util/JsonRecordReader.java | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23825b24/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java b/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
index 782c25d..b9766fb 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/JsonRecordReader.java
@@ -533,6 +533,8 @@ public class JsonRecordReader {
* @param record The record map. The key is the field name as provided in
* the addField() methods. The value can be a single String (for single
* valued fields) or a List<String> (for multiValued).
+ * This map is mutable. DO NOT alter the map or store it for later use.
+ * If it must be stored, make a deep copy before doing so
* @param path The forEach path for which this record is being emitted
* If there is any change all parsing will be aborted and the Exception
* is propagated up
[31/50] [abbrv] lucene-solr:apiv2: SOLR-9439: The delete shard API
has been made more resilient against failures resulting from non-existent
cores.
Posted by no...@apache.org.
SOLR-9439: The delete shard API has been made more resilient against failures resulting from non-existent cores.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/02b97a29
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/02b97a29
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/02b97a29
Branch: refs/heads/apiv2
Commit: 02b97a29b747e439bba8ad95a0269f959bea965e
Parents: 2700b95
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Aug 30 23:44:22 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Aug 30 23:44:22 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +-
.../org/apache/solr/cloud/DeleteShardCmd.java | 80 +++++++++++++++-----
.../org/apache/solr/cloud/SplitShardCmd.java | 24 +-----
.../org/apache/solr/core/CoreContainer.java | 4 +-
4 files changed, 64 insertions(+), 47 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/02b97a29/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 5f8694f..15071db 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -81,7 +81,8 @@ Bug Fixes
* SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure. (shalin)
-* SOLR-9439: Shard split clean up logic for older failed splits is faulty. (shalin)
+* SOLR-9439: Shard split clean up logic for older failed splits is faulty. The delete shard API
+ has also been made more resilient against failures resulting from non-existent cores. (shalin)
* SOLR-9430: Fix locale lookup in DIH <propertyWriter/> to use BCP47 language tags
to be consistent with other places in Solr. Language names still work for backwards
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/02b97a29/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
index a7f6d5b..41b74d5 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
@@ -16,10 +16,14 @@
* limitations under the License.
*/
package org.apache.solr.cloud;
+
import java.lang.invoke.MethodHandles;
-import java.util.Collections;
+import java.util.ArrayList;
import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
import java.util.Map;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
@@ -27,18 +31,23 @@ import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
@@ -87,24 +96,42 @@ public class DeleteShardCmd implements Cmd {
inQueue.offer(Utils.toJSON(m));
}
- ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
String asyncId = message.getStr(ASYNC);
- Map<String, String> requestMap = null;
- if (asyncId != null) {
- requestMap = new HashMap<>(slice.getReplicas().size(), 1.0f);
- }
try {
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
- params.set(CoreAdminParams.DELETE_INDEX, message.getBool(CoreAdminParams.DELETE_INDEX, true));
- params.set(CoreAdminParams.DELETE_INSTANCE_DIR, message.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, true));
- params.set(CoreAdminParams.DELETE_DATA_DIR, message.getBool(CoreAdminParams.DELETE_DATA_DIR, true));
-
- ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
-
- ocmh.processResponses(results, shardHandler, true, "Failed to delete shard", asyncId, requestMap, Collections.emptySet());
+ List<ZkNodeProps> replicas = getReplicasForSlice(collectionName, slice);
+ CountDownLatch cleanupLatch = new CountDownLatch(replicas.size());
+ for (ZkNodeProps r : replicas) {
+ final ZkNodeProps replica = r.plus(message.getProperties()).plus("parallel", "true").plus(ASYNC, asyncId);
+ log.info("Deleting replica for collection={} shard={} on node={}", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(CoreAdminParams.NODE));
+ NamedList deleteResult = new NamedList();
+ try {
+ ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, replica, deleteResult, () -> {
+ cleanupLatch.countDown();
+ if (deleteResult.get("failure") != null) {
+ synchronized (results) {
+ results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
+ " on node=%s", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(NODE_NAME_PROP)));
+ }
+ }
+ SimpleOrderedMap success = (SimpleOrderedMap) deleteResult.get("success");
+ if (success != null) {
+ synchronized (results) {
+ results.add("success", success);
+ }
+ }
+ });
+ } catch (KeeperException e) {
+ log.warn("Error deleting replica: " + r, e);
+ cleanupLatch.countDown();
+ } catch (Exception e) {
+ log.warn("Error deleting replica: " + r, e);
+ cleanupLatch.countDown();
+ throw e;
+ }
+ }
+ log.debug("Waiting for delete shard action to complete");
+ cleanupLatch.await(5, TimeUnit.MINUTES);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
collectionName, ZkStateReader.SHARD_ID_PROP, sliceId);
@@ -114,7 +141,7 @@ public class DeleteShardCmd implements Cmd {
// wait for a while until we don't see the shard
TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
boolean removed = false;
- while (! timeout.hasTimedOut()) {
+ while (!timeout.hasTimedOut()) {
Thread.sleep(100);
DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
removed = collection.getSlice(sliceId) == null;
@@ -129,7 +156,6 @@ public class DeleteShardCmd implements Cmd {
}
log.info("Successfully deleted collection: " + collectionName + ", shard: " + sliceId);
-
} catch (SolrException e) {
throw e;
} catch (Exception e) {
@@ -137,4 +163,18 @@ public class DeleteShardCmd implements Cmd {
"Error executing delete operation for collection: " + collectionName + " shard: " + sliceId, e);
}
}
+
+ private List<ZkNodeProps> getReplicasForSlice(String collectionName, Slice slice) {
+ List<ZkNodeProps> sourceReplicas = new ArrayList<>();
+ for (Replica replica : slice.getReplicas()) {
+ ZkNodeProps props = new ZkNodeProps(
+ COLLECTION_PROP, collectionName,
+ SHARD_ID_PROP, slice.getName(),
+ ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
+ ZkStateReader.REPLICA_PROP, replica.getName(),
+ CoreAdminParams.NODE, replica.getNodeName());
+ sourceReplicas.add(props);
+ }
+ return sourceReplicas;
+ }
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/02b97a29/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
index 4463285..3361a5f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
@@ -219,8 +219,6 @@ public class SplitShardCmd implements Cmd {
ZkNodeProps m = new ZkNodeProps(propMap);
try {
ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
- } catch (SolrException e) {
- throwIfNotNonExistentCoreException(subSlice, e);
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
e);
@@ -233,7 +231,7 @@ public class SplitShardCmd implements Cmd {
if (oldShardsDeleted) {
// refresh the locally cached cluster state
- zkStateReader.forceUpdateCollection(collectionName);
+ // we know we have the latest because otherwise deleteshard would have failed
clusterState = zkStateReader.getClusterState();
collection = clusterState.getCollection(collectionName);
}
@@ -471,24 +469,4 @@ public class SplitShardCmd implements Cmd {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
}
}
-
- private void throwIfNotNonExistentCoreException(String subSlice, SolrException e) {
- Throwable t = e;
- String cause = null;
- while (t != null) {
- if (t instanceof SolrException) {
- SolrException solrException = (SolrException) t;
- cause = solrException.getMetadata("cause");
- if (cause != null && !"NonExistentCore".equals(cause)) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
- e);
- }
- }
- t = t.getCause();
- }
- if (!"NonExistentCore".equals(cause)) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
- e);
- }
- }
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/02b97a29/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 59fe383..0b996b8 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -1019,9 +1019,7 @@ public class CoreContainer {
CoreDescriptor cd = solrCores.getCoreDescriptor(name);
if (cd == null) {
- SolrException solrException = new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
- solrException.setMetadata("cause", "NonExistentCore");
- throw solrException;
+ throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
}
boolean close = solrCores.isLoadedNotPendingClose(name);
[35/50] [abbrv] lucene-solr:apiv2: SOLR-9142: json.facet: new
method=dvhash which works on terms. Also: (1) method=stream now requires you
set sort=index asc to work (2) faceting on numerics with prefix or mincount=0
will give you an error (3) refactored
Posted by no...@apache.org.
SOLR-9142: json.facet: new method=dvhash which works on terms. Also:
(1) method=stream now requires you set sort=index asc to work
(2) faceting on numerics with prefix or mincount=0 will give you an error
(3) refactored similar findTopSlots into one common one in FacetFieldProcessor
(4) new DocSet.collectSortedDocSet utility
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7b5df8a1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7b5df8a1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7b5df8a1
Branch: refs/heads/apiv2
Commit: 7b5df8a10391f5b824e8ea1793917ff60b64b8a8
Parents: 23825b2
Author: David Smiley <ds...@apache.org>
Authored: Wed Aug 31 16:54:24 2016 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Wed Aug 31 16:54:24 2016 -0400
----------------------------------------------------------------------
solr/CHANGES.txt | 11 +
.../java/org/apache/solr/search/DocSetUtil.java | 33 ++
.../apache/solr/search/facet/FacetField.java | 68 ++--
.../solr/search/facet/FacetFieldProcessor.java | 150 ++++++++-
.../facet/FacetFieldProcessorByArray.java | 144 +--------
.../facet/FacetFieldProcessorByHashNumeric.java | 324 ++++++++++---------
.../org/apache/solr/search/facet/SlotAcc.java | 15 +-
.../solr/search/facet/TestJsonFacets.java | 47 ++-
8 files changed, 449 insertions(+), 343 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index cc90e6e..cc28449 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -66,6 +66,15 @@ Jetty 9.3.8.v20160314
Detailed Change List
----------------------
+Upgrade Notes
+----------------------
+
+* If you use the JSON Facet API (json.facet) with method=stream, you must now set sort='index asc' to get the streaming
+behavior; otherwise it won't stream. Reminder: "method" is a hint that doesn't change defaults of other parameters.
+
+* If you use the JSON Facet API (json.facet) to facet on a numeric field and if you use mincount=0 or if you set the
+prefix, then you will now get an error as these options are incompatible with numeric faceting.
+
New Features
----------------------
@@ -105,6 +114,8 @@ Optimizations
* SOLR-9452: JsonRecordReader should not deep copy document before handler.handle(). (noble, shalin)
+* SOLR-9142: JSON Facet API: new method=dvhash can be chosen for fields with high cardinality. (David Smiley)
+
Other Changes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/java/org/apache/solr/search/DocSetUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/DocSetUtil.java b/solr/core/src/java/org/apache/solr/search/DocSetUtil.java
index cc2393d..b7545e6 100644
--- a/solr/core/src/java/org/apache/solr/search/DocSetUtil.java
+++ b/solr/core/src/java/org/apache/solr/search/DocSetUtil.java
@@ -17,10 +17,12 @@
package org.apache.solr.search;
import java.io.IOException;
+import java.util.Iterator;
import java.util.List;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
@@ -29,7 +31,9 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.Bits;
@@ -208,4 +212,33 @@ public class DocSetUtil {
return new SortedIntDocSet(docs);
}
+ public static void collectSortedDocSet(DocSet docs, IndexReader reader, Collector collector) throws IOException {
+ // TODO add SortedDocSet sub-interface and take that.
+ // TODO collectUnsortedDocSet: iterate segment, then all docSet per segment.
+
+ final List<LeafReaderContext> leaves = reader.leaves();
+ final Iterator<LeafReaderContext> ctxIt = leaves.iterator();
+ int segBase = 0;
+ int segMax;
+ int adjustedMax = 0;
+ LeafReaderContext ctx = null;
+ LeafCollector leafCollector = null;
+ for (DocIterator docsIt = docs.iterator(); docsIt.hasNext(); ) {
+ final int doc = docsIt.nextDoc();
+ if (doc >= adjustedMax) {
+ do {
+ ctx = ctxIt.next();
+ segBase = ctx.docBase;
+ segMax = ctx.reader().maxDoc();
+ adjustedMax = segBase + segMax;
+ } while (doc >= adjustedMax);
+ leafCollector = collector.getLeafCollector(ctx);
+ }
+ if (doc < segBase) {
+ throw new IllegalStateException("algorithm expects sorted DocSet but wasn't: " + docs.getClass());
+ }
+ leafCollector.collect(doc - segBase); // per-seg collectors
+ }
+ }
+
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetField.java b/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
index 9cc5420..4d56513 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
@@ -66,28 +66,29 @@ public class FacetField extends FacetRequestSorted {
}
public enum FacetMethod {
- DV, // DocValues
- UIF, // UnInvertedField
- ENUM,
- STREAM,
+ DV, // DocValues, collect into ordinal array
+ UIF, // UnInvertedField, collect into ordinal array
+ DVHASH, // DocValues, collect into hash
+ ENUM, // TermsEnum then intersect DocSet (stream-able)
+ STREAM, // presently equivalent to ENUM
SMART,
;
public static FacetMethod fromString(String method) {
- if (method == null || method.length()==0) return null;
- if ("dv".equals(method)) {
- return DV;
- } else if ("uif".equals(method)) {
- return UIF;
- } else if ("enum".equals(method)) {
- return ENUM;
- } else if ("smart".equals(method)) {
- return SMART;
- } else if ("stream".equals(method)) {
- return STREAM;
+ if (method == null || method.length()==0) return DEFAULT_METHOD;
+ switch (method) {
+ case "dv": return DV;
+ case "uif": return UIF;
+ case "dvhash": return DVHASH;
+ case "enum": return ENUM;
+ case "stream": return STREAM; // TODO replace with enum?
+ case "smart": return SMART;
+ default:
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown FacetField method " + method);
}
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown FacetField method " + method);
}
+
+ static FacetMethod DEFAULT_METHOD = SMART; // non-final for tests to vary
}
@Override
@@ -96,21 +97,42 @@ public class FacetField extends FacetRequestSorted {
FieldType ft = sf.getType();
boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache();
- if (method == FacetMethod.ENUM && sf.indexed()) {
- throw new UnsupportedOperationException();
- } else if (method == FacetMethod.STREAM && sf.indexed()) {
+ LegacyNumericType ntype = ft.getNumericType();
+ // ensure we can support the requested options for numeric faceting:
+ if (ntype != null) {
+ if (prefix != null) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Doesn't make sense to set facet prefix on a numeric field");
+ }
+ if (mincount == 0) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Numeric fields do not support facet mincount=0; try indexing as terms");
+ // TODO if indexed=true then we could add support
+ }
+ }
+
+ // TODO auto-pick ENUM/STREAM SOLR-9351 when index asc and DocSet cardinality is *not* much smaller than term cardinality
+ if (method == FacetMethod.ENUM) {// at the moment these two are the same
+ method = FacetMethod.STREAM;
+ }
+ if (method == FacetMethod.STREAM && sf.indexed() &&
+ "index".equals(sortVariable) && sortDirection == SortDirection.asc) {
return new FacetFieldProcessorByEnumTermsStream(fcontext, this, sf);
}
- LegacyNumericType ntype = ft.getNumericType();
+ // TODO if method=UIF and not single-valued numerics then simply choose that now? TODO add FieldType.getDocValuesType()
if (!multiToken) {
- if (ntype != null) {
- // single valued numeric (docvalues or fieldcache)
+ if (mincount > 0 && prefix == null && (ntype != null || method == FacetMethod.DVHASH)) {
+ // TODO can we auto-pick for strings when term cardinality is much greater than DocSet cardinality?
+ // or if we don't know cardinality but DocSet size is very small
return new FacetFieldProcessorByHashNumeric(fcontext, this, sf);
- } else {
+ } else if (ntype == null) {
// single valued string...
return new FacetFieldProcessorByArrayDV(fcontext, this, sf);
+ } else {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+ "Couldn't pick facet algorithm for field " + sf);
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
index a737321..3c1a40c 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
@@ -18,12 +18,18 @@
package org.apache.solr.search.facet;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
+import java.util.function.BiPredicate;
+import java.util.function.Function;
+import java.util.function.IntFunction;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Query;
+import org.apache.lucene.util.PriorityQueue;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.DocSet;
@@ -57,7 +63,7 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
this.effectiveMincount = (int)(fcontext.isShard() ? Math.min(1 , freq.mincount) : freq.mincount);
}
- // This is used to create accs for second phase (or to create accs for all aggs)
+ /** This is used to create accs for second phase (or to create accs for all aggs) */
@Override
protected void createAccs(int docCount, int slotCount) throws IOException {
if (accMap == null) {
@@ -195,7 +201,140 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
}
}
- void fillBucket(SimpleOrderedMap<Object> target, int count, int slotNum, DocSet subDomain, Query filter) throws IOException {
+ /** Processes the collected data to finds the top slots, and composes it in the response NamedList. */
+ SimpleOrderedMap<Object> findTopSlots(final int numSlots, final int slotCardinality,
+ IntFunction<Comparable> bucketValFromSlotNumFunc,
+ Function<Comparable, String> fieldQueryValFunc) throws IOException {
+ int numBuckets = 0;
+ List<Object> bucketVals = null;
+ if (freq.numBuckets && fcontext.isShard()) {
+ bucketVals = new ArrayList<>(100);
+ }
+
+ final int off = fcontext.isShard() ? 0 : (int) freq.offset;
+ // add a modest amount of over-request if this is a shard request
+ final int lim = freq.limit >= 0 ? (fcontext.isShard() ? (int)(freq.limit*1.1+4) : (int)freq.limit) : Integer.MAX_VALUE;
+
+ final int sortMul = freq.sortDirection.getMultiplier();
+
+ int maxTopVals = (int) (lim >= 0 ? (long) off + lim : Integer.MAX_VALUE - 1);
+ maxTopVals = Math.min(maxTopVals, slotCardinality);
+ final SlotAcc sortAcc = this.sortAcc, indexOrderAcc = this.indexOrderAcc;
+ final BiPredicate<Slot,Slot> orderPredicate;
+ if (indexOrderAcc != null && indexOrderAcc != sortAcc) {
+ orderPredicate = (a, b) -> {
+ int cmp = sortAcc.compare(a.slot, b.slot) * sortMul;
+ return cmp == 0 ? (indexOrderAcc.compare(a.slot, b.slot) > 0) : cmp < 0;
+ };
+ } else {
+ orderPredicate = (a, b) -> {
+ int cmp = sortAcc.compare(a.slot, b.slot) * sortMul;
+ return cmp == 0 ? b.slot < a.slot : cmp < 0;
+ };
+ }
+ final PriorityQueue<Slot> queue = new PriorityQueue<Slot>(maxTopVals) {
+ @Override
+ protected boolean lessThan(Slot a, Slot b) { return orderPredicate.test(a, b); }
+ };
+
+ // note: We avoid object allocation by having a Slot and re-using the 'bottom'.
+ Slot bottom = null;
+ Slot scratchSlot = new Slot();
+ for (int slotNum = 0; slotNum < numSlots; slotNum++) {
+ // screen out buckets not matching mincount immediately (i.e. don't even increment numBuckets)
+ if (effectiveMincount > 0 && countAcc.getCount(slotNum) < effectiveMincount) {
+ continue;
+ }
+
+ numBuckets++;
+ if (bucketVals != null && bucketVals.size()<100) {
+ Object val = bucketValFromSlotNumFunc.apply(slotNum);
+ bucketVals.add(val);
+ }
+
+ if (bottom != null) {
+ scratchSlot.slot = slotNum; // scratchSlot is only used to hold this slotNum for the following line
+ if (orderPredicate.test(bottom, scratchSlot)) {
+ bottom.slot = slotNum;
+ bottom = queue.updateTop();
+ }
+ } else if (lim > 0) {
+ // queue not full
+ Slot s = new Slot();
+ s.slot = slotNum;
+ queue.add(s);
+ if (queue.size() >= maxTopVals) {
+ bottom = queue.top();
+ }
+ }
+ }
+
+ assert queue.size() <= numBuckets;
+
+ SimpleOrderedMap<Object> res = new SimpleOrderedMap<>();
+ if (freq.numBuckets) {
+ if (!fcontext.isShard()) {
+ res.add("numBuckets", numBuckets);
+ } else {
+ SimpleOrderedMap<Object> map = new SimpleOrderedMap<>(2);
+ map.add("numBuckets", numBuckets);
+ map.add("vals", bucketVals);
+ res.add("numBuckets", map);
+ }
+ }
+
+ FacetDebugInfo fdebug = fcontext.getDebugInfo();
+ if (fdebug != null) fdebug.putInfoItem("numBuckets", (long) numBuckets);
+
+ if (freq.allBuckets) {
+ SimpleOrderedMap<Object> allBuckets = new SimpleOrderedMap<>();
+ // countAcc.setValues(allBuckets, allBucketsSlot);
+ allBuckets.add("count", allBucketsAcc.getSpecialCount());
+ allBucketsAcc.setValues(allBuckets, -1); // -1 slotNum is unused for SpecialSlotAcc
+ // allBuckets currently doesn't execute sub-facets (because it doesn't change the domain?)
+ res.add("allBuckets", allBuckets);
+ }
+
+ if (freq.missing) {
+ // TODO: it would be more efficient to build up a missing DocSet if we need it here anyway.
+ SimpleOrderedMap<Object> missingBucket = new SimpleOrderedMap<>();
+ fillBucket(missingBucket, getFieldMissingQuery(fcontext.searcher, freq.field), null);
+ res.add("missing", missingBucket);
+ }
+
+ // if we are deep paging, we don't have to order the highest "offset" counts.
+ int collectCount = Math.max(0, queue.size() - off);
+ assert collectCount <= lim;
+ int[] sortedSlots = new int[collectCount];
+ for (int i = collectCount - 1; i >= 0; i--) {
+ sortedSlots[i] = queue.pop().slot;
+ }
+
+ ArrayList<SimpleOrderedMap> bucketList = new ArrayList<>(collectCount);
+ res.add("buckets", bucketList);
+
+ boolean needFilter = deferredAggs != null || freq.getSubFacets().size() > 0;
+
+ for (int slotNum : sortedSlots) {
+ SimpleOrderedMap<Object> bucket = new SimpleOrderedMap<>();
+ Comparable val = bucketValFromSlotNumFunc.apply(slotNum);
+ bucket.add("val", val);
+
+ Query filter = needFilter ? sf.getType().getFieldQuery(null, sf, fieldQueryValFunc.apply(val)) : null;
+
+ fillBucket(bucket, countAcc.getCount(slotNum), slotNum, null, filter);
+
+ bucketList.add(bucket);
+ }
+
+ return res;
+ }
+
+ private static class Slot {
+ int slot;
+ }
+
+ private void fillBucket(SimpleOrderedMap<Object> target, int count, int slotNum, DocSet subDomain, Query filter) throws IOException {
target.add("count", count);
if (count <= 0 && !freq.processEmpty) return;
@@ -272,13 +411,6 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
}
}
- static class Slot {
- int slot;
- public int tiebreakCompare(int slotA, int slotB) {
- return slotB - slotA;
- }
- }
-
static class SpecialSlotAcc extends SlotAcc {
SlotAcc collectAcc;
SlotAcc[] otherAccs;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java
index 10aa4d9..767bb55 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java
@@ -18,19 +18,15 @@
package org.apache.solr.search.facet;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.PriorityQueue;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.schema.SchemaField;
/**
- * Base class for DV/UIF accumulating counts into an array by ordinal.
+ * Base class for DV/UIF accumulating counts into an array by ordinal. It's
+ * for {@link org.apache.lucene.index.SortedDocValues} and {@link org.apache.lucene.index.SortedSetDocValues} only.
* It can handle terms (strings), not numbers directly but those encoded as terms, and is multi-valued capable.
*/
abstract class FacetFieldProcessorByArray extends FacetFieldProcessor {
@@ -57,11 +53,10 @@ abstract class FacetFieldProcessorByArray extends FacetFieldProcessor {
@Override
public void process() throws IOException {
super.process();
- sf = fcontext.searcher.getSchema().getField(freq.field);
- response = getFieldCacheCounts();
+ response = calcFacets();
}
- private SimpleOrderedMap<Object> getFieldCacheCounts() throws IOException {
+ private SimpleOrderedMap<Object> calcFacets() throws IOException {
String prefix = freq.prefix;
if (prefix == null || prefix.length() == 0) {
prefixRef = null;
@@ -86,128 +81,15 @@ abstract class FacetFieldProcessorByArray extends FacetFieldProcessor {
collectDocs();
- return findTopSlots();
- }
-
- private SimpleOrderedMap<Object> findTopSlots() throws IOException {
- SimpleOrderedMap<Object> res = new SimpleOrderedMap<>();
-
- int numBuckets = 0;
- List<Object> bucketVals = null;
- if (freq.numBuckets && fcontext.isShard()) {
- bucketVals = new ArrayList<>(100);
- }
-
- int off = fcontext.isShard() ? 0 : (int) freq.offset;
- // add a modest amount of over-request if this is a shard request
- int lim = freq.limit >= 0 ? (fcontext.isShard() ? (int)(freq.limit*1.1+4) : (int)freq.limit) : Integer.MAX_VALUE;
-
- int maxsize = (int)(freq.limit >= 0 ? freq.offset + lim : Integer.MAX_VALUE - 1);
- maxsize = Math.min(maxsize, nTerms);
-
- final int sortMul = freq.sortDirection.getMultiplier();
- final SlotAcc sortAcc = this.sortAcc;
-
- PriorityQueue<Slot> queue = new PriorityQueue<Slot>(maxsize) {
- @Override
- protected boolean lessThan(Slot a, Slot b) {
- int cmp = sortAcc.compare(a.slot, b.slot) * sortMul;
- return cmp == 0 ? b.slot < a.slot : cmp < 0;
- }
- };
-
- Slot bottom = null;
- for (int i = 0; i < nTerms; i++) {
- // screen out buckets not matching mincount immediately (i.e. don't even increment numBuckets)
- if (effectiveMincount > 0 && countAcc.getCount(i) < effectiveMincount) {
- continue;
- }
-
- numBuckets++;
- if (bucketVals != null && bucketVals.size()<100) {
- int ord = startTermIndex + i;
- BytesRef br = lookupOrd(ord);
- Object val = sf.getType().toObject(sf, br);
- bucketVals.add(val);
- }
-
- if (bottom != null) {
- if (sortAcc.compare(bottom.slot, i) * sortMul < 0) {
- bottom.slot = i;
- bottom = queue.updateTop();
- }
- } else if (lim > 0) {
- // queue not full
- Slot s = new Slot();
- s.slot = i;
- queue.add(s);
- if (queue.size() >= maxsize) {
- bottom = queue.top();
- }
- }
- }
-
- if (freq.numBuckets) {
- if (!fcontext.isShard()) {
- res.add("numBuckets", numBuckets);
- } else {
- SimpleOrderedMap<Object> map = new SimpleOrderedMap<>(2);
- map.add("numBuckets", numBuckets);
- map.add("vals", bucketVals);
- res.add("numBuckets", map);
- }
- }
-
- FacetDebugInfo fdebug = fcontext.getDebugInfo();
- if (fdebug != null) fdebug.putInfoItem("numBuckets", (long) numBuckets);
-
- // if we are deep paging, we don't have to order the highest "offset" counts.
- int collectCount = Math.max(0, queue.size() - off);
- assert collectCount <= lim;
- int[] sortedSlots = new int[collectCount];
- for (int i = collectCount - 1; i >= 0; i--) {
- sortedSlots[i] = queue.pop().slot;
- }
-
- if (freq.allBuckets) {
- SimpleOrderedMap<Object> allBuckets = new SimpleOrderedMap<>();
- allBuckets.add("count", allBucketsAcc.getSpecialCount());
- if (allBucketsAcc != null) {
- allBucketsAcc.setValues(allBuckets, allBucketsSlot);
- }
- res.add("allBuckets", allBuckets);
- }
-
- ArrayList<SimpleOrderedMap<Object>> bucketList = new ArrayList<>(collectCount);
- res.add("buckets", bucketList);
-
- // TODO: do this with a callback instead?
- boolean needFilter = deferredAggs != null || freq.getSubFacets().size() > 0;
-
- for (int slotNum : sortedSlots) {
- SimpleOrderedMap<Object> bucket = new SimpleOrderedMap<>();
-
- // get the ord of the slot...
- int ord = startTermIndex + slotNum;
-
- BytesRef br = lookupOrd(ord);
- Object val = sf.getType().toObject(sf, br);
-
- bucket.add("val", val);
-
- TermQuery filter = needFilter ? new TermQuery(new Term(sf.getName(), br)) : null;
- fillBucket(bucket, countAcc.getCount(slotNum), slotNum, null, filter);
-
- bucketList.add(bucket);
- }
-
- if (freq.missing) {
- SimpleOrderedMap<Object> missingBucket = new SimpleOrderedMap<>();
- fillBucket(missingBucket, getFieldMissingQuery(fcontext.searcher, freq.field), null);
- res.add("missing", missingBucket);
- }
-
- return res;
+ return super.findTopSlots(nTerms, nTerms,
+ slotNum -> { // getBucketValFromSlotNum
+ try {
+ return (Comparable) sf.getType().toObject(sf, lookupOrd(slotNum + startTermIndex));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ },
+ Object::toString); // getFieldQueryVal
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
index 842df20..6d5aec5 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
@@ -17,25 +17,37 @@
package org.apache.solr.search.facet;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
+import java.text.ParseException;
+import java.util.function.IntFunction;
import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LongValues;
+import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocIterator;
+import org.apache.solr.search.DocSetUtil;
/**
- * Facets numbers into a hash table.
- * It currently only works with {@link NumericDocValues} (single-valued).
+ * Facets numbers into a hash table. The number is either a raw numeric DocValues value, or
+ * a term global ordinal integer.
+ * Limitations:
+ * <ul>
+ * <li>doesn't handle multiValued, but could easily be added</li>
+ * <li>doesn't handle prefix, but could easily be added</li>
+ * <li>doesn't handle mincount==0 -- you're better off with an array alg</li>
+ * </ul>
*/
+// TODO rename: FacetFieldProcessorByHashDV
class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
static int MAXIMUM_STARTING_TABLE_SIZE=1024; // must be a power of two, non-final to support setting by tests
@@ -44,7 +56,6 @@ class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
static final float LOAD_FACTOR = 0.7f;
- long numAdds;
long[] vals;
int[] counts; // maintain the counts here since we need them to tell if there was actually a value anyway
int[] oldToNewMapping;
@@ -82,7 +93,6 @@ class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
rehash();
}
- numAdds++;
int h = hash(val);
for (int slot = h & (vals.length-1); ;slot = (slot + ((h>>7)|1)) & (vals.length-1)) {
int count = counts[slot];
@@ -135,29 +145,93 @@ class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
}
+ /** A hack instance of Calc for Term ordinals in DocValues. */
+ // TODO consider making FacetRangeProcessor.Calc facet top level; then less of a hack?
+ private class TermOrdCalc extends FacetRangeProcessor.Calc {
+
+ IntFunction<BytesRef> lookupOrdFunction; // set in collectDocs()!
+
+ TermOrdCalc() throws IOException {
+ super(sf);
+ }
+
+ @Override
+ public long bitsToSortableBits(long globalOrd) {
+ return globalOrd;
+ }
+
+ /** To be returned in "buckets"/"val" */
+ @Override
+ public Comparable bitsToValue(long globalOrd) {
+ BytesRef bytesRef = lookupOrdFunction.apply((int) globalOrd);
+ // note FacetFieldProcessorByArray.findTopSlots also calls SchemaFieldType.toObject
+ return sf.getType().toObject(sf, bytesRef).toString();
+ }
+
+ @Override
+ public String formatValue(Comparable val) {
+ return (String) val;
+ }
+
+ @Override
+ protected Comparable parseStr(String rawval) throws ParseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected Comparable parseAndAddGap(Comparable value, String gap) throws ParseException {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ FacetRangeProcessor.Calc calc;
+ LongCounts table;
int allBucketsSlot = -1;
FacetFieldProcessorByHashNumeric(FacetContext fcontext, FacetField freq, SchemaField sf) {
super(fcontext, freq, sf);
+ if (freq.mincount == 0) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ getClass()+" doesn't support mincount=0");
+ }
+ if (freq.prefix != null) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ getClass()+" doesn't support prefix"); // yet, but it could
+ }
+ FieldInfo fieldInfo = fcontext.searcher.getLeafReader().getFieldInfos().fieldInfo(sf.getName());
+ if (fieldInfo != null &&
+ fieldInfo.getDocValuesType() != DocValuesType.NUMERIC &&
+ fieldInfo.getDocValuesType() != DocValuesType.SORTED) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ getClass()+" only support single valued number/string with docValues");
+ }
}
@Override
public void process() throws IOException {
super.process();
response = calcFacets();
+ table = null;//gc
}
private SimpleOrderedMap<Object> calcFacets() throws IOException {
- final FacetRangeProcessor.Calc calc = FacetRangeProcessor.getNumericCalc(sf);
+ if (sf.getType().getNumericType() != null) {
+ calc = FacetRangeProcessor.getNumericCalc(sf);
+ } else {
+ calc = new TermOrdCalc(); // kind of a hack
+ }
- // TODO: it would be really nice to know the number of unique values!!!!
+ // TODO: Use the number of indexed terms, if present, as an estimate!
+ // Even for NumericDocValues, we could check for a terms index for an estimate.
+ // Our estimation should aim high to avoid expensive rehashes.
int possibleValues = fcontext.base.size();
// size smaller tables so that no resize will be necessary
int currHashSize = BitUtil.nextHighestPowerOfTwo((int) (possibleValues * (1 / LongCounts.LOAD_FACTOR) + 1));
currHashSize = Math.min(currHashSize, MAXIMUM_STARTING_TABLE_SIZE);
- final LongCounts table = new LongCounts(currHashSize) {
+ table = new LongCounts(currHashSize) {
@Override
protected void rehash() {
super.rehash();
@@ -166,9 +240,19 @@ class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
}
};
- int numSlots = currHashSize;
+ // note: these methods/phases align with FacetFieldProcessorByArray's
- int numMissing = 0;
+ createCollectAcc();
+
+ collectDocs();
+
+ return super.findTopSlots(table.numSlots(), table.cardinality(),
+ slotNum -> calc.bitsToValue(table.vals[slotNum]), // getBucketValFromSlotNum
+ val -> calc.formatValue(val)); // getFieldQueryVal
+ }
+
+ private void createCollectAcc() throws IOException {
+ int numSlots = table.numSlots();
if (freq.allBuckets) {
allBucketsSlot = numSlots++;
@@ -238,160 +322,80 @@ class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
};
// we set the countAcc & indexAcc first so generic ones won't be created for us.
- createCollectAcc(fcontext.base.size(), numSlots);
+ super.createCollectAcc(fcontext.base.size(), numSlots);
if (freq.allBuckets) {
allBucketsAcc = new SpecialSlotAcc(fcontext, collectAcc, allBucketsSlot, otherAccs, 0);
}
+ }
- NumericDocValues values = null;
- Bits docsWithField = null;
-
- // TODO: factor this code out so it can be shared...
- final List<LeafReaderContext> leaves = fcontext.searcher.getIndexReader().leaves();
- final Iterator<LeafReaderContext> ctxIt = leaves.iterator();
- LeafReaderContext ctx = null;
- int segBase = 0;
- int segMax;
- int adjustedMax = 0;
- for (DocIterator docsIt = fcontext.base.iterator(); docsIt.hasNext(); ) {
- final int doc = docsIt.nextDoc();
- if (doc >= adjustedMax) {
- do {
- ctx = ctxIt.next();
- segBase = ctx.docBase;
- segMax = ctx.reader().maxDoc();
- adjustedMax = segBase + segMax;
- } while (doc >= adjustedMax);
- assert doc >= ctx.docBase;
- setNextReaderFirstPhase(ctx);
-
- values = DocValues.getNumeric(ctx.reader(), sf.getName());
- docsWithField = DocValues.getDocsWithField(ctx.reader(), sf.getName());
- }
-
- int segDoc = doc - segBase;
- long val = values.get(segDoc);
- if (val != 0 || docsWithField.get(segDoc)) {
- int slot = table.add(val); // this can trigger a rehash rehash
-
- // countAcc.incrementCount(slot, 1);
- // our countAcc is virtual, so this is not needed
-
- collectFirstPhase(segDoc, slot);
- }
- }
-
- //
- // collection done, time to find the top slots
- //
-
- int numBuckets = 0;
- List<Object> bucketVals = null;
- if (freq.numBuckets && fcontext.isShard()) {
- bucketVals = new ArrayList<>(100);
- }
-
- int off = fcontext.isShard() ? 0 : (int) freq.offset;
- // add a modest amount of over-request if this is a shard request
- int lim = freq.limit >= 0 ? (fcontext.isShard() ? (int)(freq.limit*1.1+4) : (int)freq.limit) : Integer.MAX_VALUE;
-
- int maxsize = (int)(freq.limit >= 0 ? freq.offset + lim : Integer.MAX_VALUE - 1);
- maxsize = Math.min(maxsize, table.cardinality);
-
- final int sortMul = freq.sortDirection.getMultiplier();
-
- PriorityQueue<Slot> queue = new PriorityQueue<Slot>(maxsize) {
- @Override
- protected boolean lessThan(Slot a, Slot b) {
- // TODO: sort-by-index-order
- int cmp = sortAcc.compare(a.slot, b.slot) * sortMul;
- return cmp == 0 ? (indexOrderAcc.compare(a.slot, b.slot) > 0) : cmp < 0;
- }
- };
-
- // TODO: create a countAcc that wrapps the table so we can reuse more code?
-
- Slot bottom = null;
- for (int i=0; i<table.counts.length; i++) {
- int count = table.counts[i];
- if (count < effectiveMincount) {
- // either not a valid slot, or count not high enough
- continue;
- }
- numBuckets++; // can be different from the table cardinality if mincount > 1
-
- long val = table.vals[i];
- if (bucketVals != null && bucketVals.size()<100) {
- bucketVals.add( calc.bitsToValue(val) );
- }
-
- if (bottom == null) {
- bottom = new Slot();
- }
- bottom.slot = i;
-
- bottom = queue.insertWithOverflow(bottom);
- }
-
- SimpleOrderedMap<Object> res = new SimpleOrderedMap<>();
- if (freq.numBuckets) {
- if (!fcontext.isShard()) {
- res.add("numBuckets", numBuckets);
- } else {
- SimpleOrderedMap<Object> map = new SimpleOrderedMap<>(2);
- map.add("numBuckets", numBuckets);
- map.add("vals", bucketVals);
- res.add("numBuckets", map);
- }
- }
-
- FacetDebugInfo fdebug = fcontext.getDebugInfo();
- if (fdebug != null) fdebug.putInfoItem("numBuckets", (long) numBuckets);
-
- if (freq.allBuckets) {
- SimpleOrderedMap<Object> allBuckets = new SimpleOrderedMap<>();
- // countAcc.setValues(allBuckets, allBucketsSlot);
- allBuckets.add("count", table.numAdds);
- allBucketsAcc.setValues(allBuckets, -1);
- // allBuckets currently doesn't execute sub-facets (because it doesn't change the domain?)
- res.add("allBuckets", allBuckets);
- }
-
- if (freq.missing) {
- // TODO: it would be more efficient to buid up a missing DocSet if we need it here anyway.
-
- SimpleOrderedMap<Object> missingBucket = new SimpleOrderedMap<>();
- fillBucket(missingBucket, getFieldMissingQuery(fcontext.searcher, freq.field), null);
- res.add("missing", missingBucket);
- }
-
- // if we are deep paging, we don't have to order the highest "offset" counts.
- int collectCount = Math.max(0, queue.size() - off);
- assert collectCount <= lim;
- int[] sortedSlots = new int[collectCount];
- for (int i = collectCount - 1; i >= 0; i--) {
- sortedSlots[i] = queue.pop().slot;
+ private void collectDocs() throws IOException {
+ if (calc instanceof TermOrdCalc) { // Strings
+
+ // TODO support SortedSetDocValues
+ SortedDocValues globalDocValues = FieldUtil.getSortedDocValues(fcontext.qcontext, sf, null);
+ ((TermOrdCalc)calc).lookupOrdFunction = globalDocValues::lookupOrd;
+
+ DocSetUtil.collectSortedDocSet(fcontext.base, fcontext.searcher.getIndexReader(), new SimpleCollector() {
+ SortedDocValues docValues = globalDocValues; // this segment/leaf. NN
+ LongValues toGlobal = LongValues.IDENTITY; // this segment to global ordinal. NN
+
+ @Override public boolean needsScores() { return false; }
+
+ @Override
+ protected void doSetNextReader(LeafReaderContext ctx) throws IOException {
+ setNextReaderFirstPhase(ctx);
+ if (globalDocValues instanceof MultiDocValues.MultiSortedDocValues) {
+ MultiDocValues.MultiSortedDocValues multiDocValues = (MultiDocValues.MultiSortedDocValues) globalDocValues;
+ docValues = multiDocValues.values[ctx.ord];
+ toGlobal = multiDocValues.mapping.getGlobalOrds(ctx.ord);
+ }
+ }
+
+ @Override
+ public void collect(int segDoc) throws IOException {
+ long ord = docValues.getOrd(segDoc);
+ if (ord != -1) {
+ long val = toGlobal.get(ord);
+ collectValFirstPhase(segDoc, val);
+ }
+ }
+ });
+
+ } else { // Numeric:
+
+ // TODO support SortedNumericDocValues
+ DocSetUtil.collectSortedDocSet(fcontext.base, fcontext.searcher.getIndexReader(), new SimpleCollector() {
+ NumericDocValues values = null; //NN
+ Bits docsWithField = null; //NN
+
+ @Override public boolean needsScores() { return false; }
+
+ @Override
+ protected void doSetNextReader(LeafReaderContext ctx) throws IOException {
+ setNextReaderFirstPhase(ctx);
+ values = DocValues.getNumeric(ctx.reader(), sf.getName());
+ docsWithField = DocValues.getDocsWithField(ctx.reader(), sf.getName());
+ }
+
+ @Override
+ public void collect(int segDoc) throws IOException {
+ long val = values.get(segDoc);
+ if (val != 0 || docsWithField.get(segDoc)) {
+ collectValFirstPhase(segDoc, val);
+ }
+ }
+ });
}
+ }
- ArrayList<SimpleOrderedMap> bucketList = new ArrayList<>(collectCount);
- res.add("buckets", bucketList);
-
- boolean needFilter = deferredAggs != null || freq.getSubFacets().size() > 0;
-
- for (int slotNum : sortedSlots) {
- SimpleOrderedMap<Object> bucket = new SimpleOrderedMap<>();
- Comparable val = calc.bitsToValue(table.vals[slotNum]);
- bucket.add("val", val);
-
- Query filter = needFilter ? sf.getType().getFieldQuery(null, sf, calc.formatValue(val)) : null;
-
- fillBucket(bucket, table.counts[slotNum], slotNum, null, filter);
+ private void collectValFirstPhase(int segDoc, long val) throws IOException {
+ int slot = table.add(val); // this can trigger a rehash
- bucketList.add(bucket);
- }
+ // Our countAcc is virtual, so this is not needed:
+ // countAcc.incrementCount(slot, 1);
- return res;
+ super.collectFirstPhase(segDoc, slot);
}
private void doRehash(LongCounts table) {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java b/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java
index 37b9d9b..de1636e 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java
@@ -32,7 +32,12 @@ import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
-
+/**
+ * Accumulates statistics separated by a slot number.
+ * There is a separate statistic per slot. The slot is usually an ordinal into a set of values, e.g. tracking a count
+ * frequency <em>per term</em>.
+ * Sometimes there doesn't need to be a slot distinction, in which case there is just one nominal slot.
+ */
public abstract class SlotAcc implements Closeable {
String key; // todo...
protected final FacetContext fcontext;
@@ -210,9 +215,7 @@ abstract class DoubleFuncSlotAcc extends FuncSlotAcc {
@Override
public void reset() {
- for (int i=0; i<result.length; i++) {
- result[i] = initialValue;
- }
+ Arrays.fill(result, initialValue);
}
@Override
@@ -246,9 +249,7 @@ abstract class IntSlotAcc extends SlotAcc {
@Override
public void reset() {
- for (int i=0; i<result.length; i++) {
- result[i] = initialValue;
- }
+ Arrays.fill(result, initialValue);
}
@Override
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5df8a1/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
index 7b5a561..6ab25bb 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
@@ -25,6 +25,7 @@ import java.util.List;
import java.util.Map;
import java.util.Random;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import com.tdunning.math.stats.AVLTreeDigest;
import org.apache.solr.common.SolrException;
import org.apache.solr.util.hll.HLL;
@@ -43,12 +44,19 @@ public class TestJsonFacets extends SolrTestCaseHS {
private static SolrInstances servers; // for distributed testing
private static int origTableSize;
+ private static FacetField.FacetMethod origDefaultFacetMethod;
@BeforeClass
public static void beforeTests() throws Exception {
JSONTestUtil.failRepeatedKeys = true;
+
origTableSize = FacetFieldProcessorByHashNumeric.MAXIMUM_STARTING_TABLE_SIZE;
FacetFieldProcessorByHashNumeric.MAXIMUM_STARTING_TABLE_SIZE=2; // stress test resizing
+
+ origDefaultFacetMethod = FacetField.FacetMethod.DEFAULT_METHOD;
+ // instead of the following, see the constructor
+ //FacetField.FacetMethod.DEFAULT_METHOD = rand(FacetField.FacetMethod.values());
+
initCore("solrconfig-tlog.xml","schema_latest.xml");
}
@@ -62,12 +70,25 @@ public class TestJsonFacets extends SolrTestCaseHS {
public static void afterTests() throws Exception {
JSONTestUtil.failRepeatedKeys = false;
FacetFieldProcessorByHashNumeric.MAXIMUM_STARTING_TABLE_SIZE=origTableSize;
+ FacetField.FacetMethod.DEFAULT_METHOD = origDefaultFacetMethod;
if (servers != null) {
servers.stop();
servers = null;
}
}
+ // tip: when debugging a test, comment out the @ParametersFactory and edit the constructor to be no-arg
+
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() {
+ // wrap each enum val in an Object[] and return as Iterable
+ return () -> Arrays.stream(FacetField.FacetMethod.values()).map(it -> new Object[]{it}).iterator();
+ }
+
+ public TestJsonFacets(FacetField.FacetMethod defMethod) {
+ FacetField.FacetMethod.DEFAULT_METHOD = defMethod; // note: the real default is restored in afterTests
+ }
+
// attempt to reproduce https://github.com/Heliosearch/heliosearch/issues/33
@Test
public void testComplex() throws Exception {
@@ -180,8 +201,8 @@ public class TestJsonFacets extends SolrTestCaseHS {
client.commit();
}
-
- public void testStatsSimple() throws Exception {
+ @Test
+ public void testMethodStream() throws Exception {
Client client = Client.localClient();
indexSimple(client);
@@ -196,15 +217,15 @@ public class TestJsonFacets extends SolrTestCaseHS {
// test streaming
assertJQ(req("q", "*:*", "rows", "0"
- , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream }}" +
- ", cat2:{terms:{field:'cat_s', method:stream, sort:'index asc' }}" + // default sort
- ", cat3:{terms:{field:'cat_s', method:stream, mincount:3 }}" + // mincount
- ", cat4:{terms:{field:'cat_s', method:stream, prefix:B }}" + // prefix
- ", cat5:{terms:{field:'cat_s', method:stream, offset:1 }}" + // offset
+ , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream }}" + // won't stream; need sort:index asc
+ ", cat2:{terms:{field:'cat_s', method:stream, sort:'index asc' }}" +
+ ", cat3:{terms:{field:'cat_s', method:stream, sort:'index asc', mincount:3 }}" + // mincount
+ ", cat4:{terms:{field:'cat_s', method:stream, sort:'index asc', prefix:B }}" + // prefix
+ ", cat5:{terms:{field:'cat_s', method:stream, sort:'index asc', offset:1 }}" + // offset
" }"
)
, "facets=={count:6 " +
- ", cat :{buckets:[{val:A, count:2},{val:B, count:3}]}" +
+ ", cat :{buckets:[{val:B, count:3},{val:A, count:2}]}" +
", cat2:{buckets:[{val:A, count:2},{val:B, count:3}]}" +
", cat3:{buckets:[{val:B, count:3}]}" +
", cat4:{buckets:[{val:B, count:3}]}" +
@@ -215,7 +236,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
// test nested streaming under non-streaming
assertJQ(req("q", "*:*", "rows", "0"
- , "json.facet", "{ cat:{terms:{field:'cat_s', sort:'index asc', facet:{where:{terms:{field:where_s,method:stream}}} }}}"
+ , "json.facet", "{ cat:{terms:{field:'cat_s', sort:'index asc', facet:{where:{terms:{field:where_s,method:stream,sort:'index asc'}}} }}}"
)
, "facets=={count:6 " +
", cat :{buckets:[{val:A, count:2, where:{buckets:[{val:NJ,count:1},{val:NY,count:1}]} },{val:B, count:3, where:{buckets:[{val:NJ,count:2},{val:NY,count:1}]} }]}"
@@ -224,7 +245,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
// test nested streaming under streaming
assertJQ(req("q", "*:*", "rows", "0"
- , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream, facet:{where:{terms:{field:where_s,method:stream}}} }}}"
+ , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream,sort:'index asc', facet:{where:{terms:{field:where_s,method:stream,sort:'index asc'}}} }}}"
)
, "facets=={count:6 " +
", cat :{buckets:[{val:A, count:2, where:{buckets:[{val:NJ,count:1},{val:NY,count:1}]} },{val:B, count:3, where:{buckets:[{val:NJ,count:2},{val:NY,count:1}]} }]}"
@@ -233,7 +254,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
// test nested streaming with stats under streaming
assertJQ(req("q", "*:*", "rows", "0"
- , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream, facet:{ where:{terms:{field:where_s,method:stream, facet:{x:'max(num_d)'} }}} }}}"
+ , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream,sort:'index asc', facet:{ where:{terms:{field:where_s,method:stream,sort:'index asc',sort:'index asc', facet:{x:'max(num_d)'} }}} }}}"
)
, "facets=={count:6 " +
", cat :{buckets:[{val:A, count:2, where:{buckets:[{val:NJ,count:1,x:2.0},{val:NY,count:1,x:4.0}]} },{val:B, count:3, where:{buckets:[{val:NJ,count:2,x:11.0},{val:NY,count:1,x:-5.0}]} }]}"
@@ -243,7 +264,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
// test nested streaming with stats under streaming with stats
assertJQ(req("q", "*:*", "rows", "0",
"facet","true"
- , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream, facet:{ y:'min(num_d)', where:{terms:{field:where_s,method:stream, facet:{x:'max(num_d)'} }}} }}}"
+ , "json.facet", "{ cat:{terms:{field:'cat_s', method:stream,sort:'index asc', facet:{ y:'min(num_d)', where:{terms:{field:where_s,method:stream,sort:'index asc', facet:{x:'max(num_d)'} }}} }}}"
)
, "facets=={count:6 " +
", cat :{buckets:[{val:A, count:2, y:2.0, where:{buckets:[{val:NJ,count:1,x:2.0},{val:NY,count:1,x:4.0}]} },{val:B, count:3, y:-9.0, where:{buckets:[{val:NJ,count:2,x:11.0},{val:NY,count:1,x:-5.0}]} }]}"
@@ -294,7 +315,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
}
@Test
- public void testDistrib() throws Exception {
+ public void testStatsDistrib() throws Exception {
initServers();
Client client = servers.getClient(random().nextInt());
client.queryDefaults().set( "shards", servers.getShards(), "debugQuery", Boolean.toString(random().nextBoolean()) );
[05/50] [abbrv] lucene-solr:apiv2: LUCENE-7426: remove deprecated
DataInput/Output map/string methods
Posted by no...@apache.org.
LUCENE-7426: remove deprecated DataInput/Output map/string methods
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f6253d5e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f6253d5e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f6253d5e
Branch: refs/heads/apiv2
Commit: f6253d5ed1755b6dd14cdb235d818ade7ac4f0b4
Parents: 81c796a
Author: Robert Muir <rm...@apache.org>
Authored: Thu Aug 25 12:02:01 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Thu Aug 25 12:02:52 2016 -0400
----------------------------------------------------------------------
.../lucene50/Lucene50SegmentInfoFormat.java | 21 +++------
.../lucene50/Lucene50RWSegmentInfoFormat.java | 21 +++------
.../lucene50/Lucene50FieldInfosFormat.java | 12 ++----
.../org/apache/lucene/index/SegmentInfos.java | 24 ++---------
.../java/org/apache/lucene/store/DataInput.java | 33 --------------
.../org/apache/lucene/store/DataOutput.java | 45 --------------------
.../lucene/store/BaseDirectoryTestCase.java | 30 -------------
.../lucene/store/MockIndexInputWrapper.java | 20 ++++-----
8 files changed, 28 insertions(+), 178 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
index 5d615ec..69cda34 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
@@ -18,7 +18,6 @@ package org.apache.lucene.codecs.lucene50;
import java.io.IOException;
-import java.util.Collections;
import java.util.Map;
import java.util.Set;
@@ -50,7 +49,7 @@ public class Lucene50SegmentInfoFormat extends SegmentInfoFormat {
Throwable priorE = null;
SegmentInfo si = null;
try {
- int format = CodecUtil.checkIndexHeader(input, Lucene50SegmentInfoFormat.CODEC_NAME,
+ CodecUtil.checkIndexHeader(input, Lucene50SegmentInfoFormat.CODEC_NAME,
Lucene50SegmentInfoFormat.VERSION_START,
Lucene50SegmentInfoFormat.VERSION_CURRENT,
segmentID, "");
@@ -62,19 +61,9 @@ public class Lucene50SegmentInfoFormat extends SegmentInfoFormat {
}
final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
- final Map<String,String> diagnostics;
- final Set<String> files;
- final Map<String,String> attributes;
-
- if (format >= VERSION_SAFE_MAPS) {
- diagnostics = input.readMapOfStrings();
- files = input.readSetOfStrings();
- attributes = input.readMapOfStrings();
- } else {
- diagnostics = Collections.unmodifiableMap(input.readStringStringMap());
- files = Collections.unmodifiableSet(input.readStringSet());
- attributes = Collections.unmodifiableMap(input.readStringStringMap());
- }
+ final Map<String,String> diagnostics = input.readMapOfStrings();
+ final Set<String> files = input.readSetOfStrings();
+ final Map<String,String> attributes = input.readMapOfStrings();
si = new SegmentInfo(dir, version, segment, docCount, isCompoundFile, null, diagnostics, segmentID, attributes, null);
si.setFiles(files);
@@ -95,7 +84,7 @@ public class Lucene50SegmentInfoFormat extends SegmentInfoFormat {
/** File extension used to store {@link SegmentInfo}. */
public final static String SI_EXTENSION = "si";
static final String CODEC_NAME = "Lucene50SegmentInfo";
- static final int VERSION_START = 0;
static final int VERSION_SAFE_MAPS = 1;
+ static final int VERSION_START = VERSION_SAFE_MAPS;
static final int VERSION_CURRENT = VERSION_SAFE_MAPS;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWSegmentInfoFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWSegmentInfoFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWSegmentInfoFormat.java
index 41c817a..965ee96 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWSegmentInfoFormat.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene50/Lucene50RWSegmentInfoFormat.java
@@ -18,7 +18,6 @@ package org.apache.lucene.codecs.lucene50;
import java.io.IOException;
-import java.util.Collections;
import java.util.Map;
import java.util.Set;
@@ -50,7 +49,7 @@ public class Lucene50RWSegmentInfoFormat extends Lucene50SegmentInfoFormat {
Throwable priorE = null;
SegmentInfo si = null;
try {
- int format = CodecUtil.checkIndexHeader(input, Lucene50SegmentInfoFormat.CODEC_NAME,
+ CodecUtil.checkIndexHeader(input, Lucene50SegmentInfoFormat.CODEC_NAME,
Lucene50SegmentInfoFormat.VERSION_START,
Lucene50SegmentInfoFormat.VERSION_CURRENT,
segmentID, "");
@@ -62,19 +61,9 @@ public class Lucene50RWSegmentInfoFormat extends Lucene50SegmentInfoFormat {
}
final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
- final Map<String,String> diagnostics;
- final Set<String> files;
- final Map<String,String> attributes;
-
- if (format >= VERSION_SAFE_MAPS) {
- diagnostics = input.readMapOfStrings();
- files = input.readSetOfStrings();
- attributes = input.readMapOfStrings();
- } else {
- diagnostics = Collections.unmodifiableMap(input.readStringStringMap());
- files = Collections.unmodifiableSet(input.readStringSet());
- attributes = Collections.unmodifiableMap(input.readStringStringMap());
- }
+ final Map<String,String> diagnostics = input.readMapOfStrings();
+ final Set<String> files = input.readSetOfStrings();
+ final Map<String,String> attributes = input.readMapOfStrings();
si = new SegmentInfo(dir, version, segment, docCount, isCompoundFile, null, diagnostics, segmentID, attributes, null);
si.setFiles(files);
@@ -130,7 +119,7 @@ public class Lucene50RWSegmentInfoFormat extends Lucene50SegmentInfoFormat {
/** File extension used to store {@link SegmentInfo}. */
public final static String SI_EXTENSION = "si";
static final String CODEC_NAME = "Lucene50SegmentInfo";
- static final int VERSION_START = 0;
static final int VERSION_SAFE_MAPS = 1;
+ static final int VERSION_START = VERSION_SAFE_MAPS;
static final int VERSION_CURRENT = VERSION_SAFE_MAPS;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
index 35931de..a76bfeb 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
@@ -112,7 +112,7 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
Throwable priorE = null;
FieldInfo infos[] = null;
try {
- int format = CodecUtil.checkIndexHeader(input, Lucene50FieldInfosFormat.CODEC_NAME,
+ CodecUtil.checkIndexHeader(input, Lucene50FieldInfosFormat.CODEC_NAME,
Lucene50FieldInfosFormat.FORMAT_START,
Lucene50FieldInfosFormat.FORMAT_CURRENT,
segmentInfo.getId(), segmentSuffix);
@@ -139,12 +139,8 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
// DV Types are packed in one byte
final DocValuesType docValuesType = getDocValuesType(input, input.readByte());
final long dvGen = input.readLong();
- Map<String,String> attributes;
- if (format >= FORMAT_SAFE_MAPS) {
- attributes = input.readMapOfStrings();
- } else {
- attributes = Collections.unmodifiableMap(input.readStringStringMap());
- }
+ Map<String,String> attributes = input.readMapOfStrings();
+
// just use the last field's map if its the same
if (attributes.equals(lastAttributes)) {
attributes = lastAttributes;
@@ -288,8 +284,8 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
// Codec header
static final String CODEC_NAME = "Lucene50FieldInfos";
- static final int FORMAT_START = 0;
static final int FORMAT_SAFE_MAPS = 1;
+ static final int FORMAT_START = FORMAT_SAFE_MAPS;
static final int FORMAT_CURRENT = FORMAT_SAFE_MAPS;
// Field flags
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
index d87fc84..8f627cd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -119,10 +119,6 @@ import org.apache.lucene.util.Version;
*/
public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo> {
- /** The file format version for the segments_N codec header, since 5.0+ */
- public static final int VERSION_50 = 4;
- /** The file format version for the segments_N codec header, since 5.1+ */
- public static final int VERSION_51 = 5; // use safe maps
/** Adds the {@link Version} that committed this segments_N file, as well as the {@link Version} of the oldest segment, since 5.3+ */
public static final int VERSION_53 = 6;
@@ -294,7 +290,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
if (magic != CodecUtil.CODEC_MAGIC) {
throw new IndexFormatTooOldException(input, magic, CodecUtil.CODEC_MAGIC, CodecUtil.CODEC_MAGIC);
}
- int format = CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_50, VERSION_CURRENT);
+ int format = CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_53, VERSION_CURRENT);
byte id[] = new byte[StringHelper.ID_LENGTH];
input.readBytes(id, 0, id.length);
CodecUtil.checkIndexHeaderSuffix(input, Long.toString(generation, Character.MAX_RADIX));
@@ -351,11 +347,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
long fieldInfosGen = input.readLong();
long dvGen = input.readLong();
SegmentCommitInfo siPerCommit = new SegmentCommitInfo(info, delCount, delGen, fieldInfosGen, dvGen);
- if (format >= VERSION_51) {
- siPerCommit.setFieldInfosFiles(input.readSetOfStrings());
- } else {
- siPerCommit.setFieldInfosFiles(Collections.unmodifiableSet(input.readStringSet()));
- }
+ siPerCommit.setFieldInfosFiles(input.readSetOfStrings());
final Map<Integer,Set<String>> dvUpdateFiles;
final int numDVFields = input.readInt();
if (numDVFields == 0) {
@@ -363,11 +355,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
} else {
Map<Integer,Set<String>> map = new HashMap<>(numDVFields);
for (int i = 0; i < numDVFields; i++) {
- if (format >= VERSION_51) {
- map.put(input.readInt(), input.readSetOfStrings());
- } else {
- map.put(input.readInt(), Collections.unmodifiableSet(input.readStringSet()));
- }
+ map.put(input.readInt(), input.readSetOfStrings());
}
dvUpdateFiles = Collections.unmodifiableMap(map);
}
@@ -381,11 +369,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
}
}
- if (format >= VERSION_51) {
- infos.userData = input.readMapOfStrings();
- } else {
- infos.userData = Collections.unmodifiableMap(input.readStringStringMap());
- }
+ infos.userData = input.readMapOfStrings();
CodecUtil.checkFooter(input);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/core/src/java/org/apache/lucene/store/DataInput.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/store/DataInput.java b/lucene/core/src/java/org/apache/lucene/store/DataInput.java
index 037211f..d834995 100644
--- a/lucene/core/src/java/org/apache/lucene/store/DataInput.java
+++ b/lucene/core/src/java/org/apache/lucene/store/DataInput.java
@@ -255,24 +255,6 @@ public abstract class DataInput implements Cloneable {
throw new Error("This cannot happen: Failing to clone DataInput");
}
}
-
- /** Reads a Map<String,String> previously written
- * with {@link DataOutput#writeStringStringMap(Map)}.
- * @deprecated Only for reading existing formats. Encode maps with
- * {@link DataOutput#writeMapOfStrings(Map)} instead.
- */
- @Deprecated
- public Map<String,String> readStringStringMap() throws IOException {
- final Map<String,String> map = new HashMap<>();
- final int count = readInt();
- for(int i=0;i<count;i++) {
- final String key = readString();
- final String val = readString();
- map.put(key, val);
- }
-
- return map;
- }
/**
* Reads a Map<String,String> previously written
@@ -295,21 +277,6 @@ public abstract class DataInput implements Cloneable {
return Collections.unmodifiableMap(map);
}
}
-
- /** Reads a Set<String> previously written
- * with {@link DataOutput#writeStringSet(Set)}.
- * @deprecated Only for reading existing formats. Encode maps with
- * {@link DataOutput#writeSetOfStrings(Set)} instead. */
- @Deprecated
- public Set<String> readStringSet() throws IOException {
- final Set<String> set = new HashSet<>();
- final int count = readInt();
- for(int i=0;i<count;i++) {
- set.add(readString());
- }
-
- return set;
- }
/**
* Reads a Set<String> previously written
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/core/src/java/org/apache/lucene/store/DataOutput.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/store/DataOutput.java b/lucene/core/src/java/org/apache/lucene/store/DataOutput.java
index 1f6dd74..9c11249 100644
--- a/lucene/core/src/java/org/apache/lucene/store/DataOutput.java
+++ b/lucene/core/src/java/org/apache/lucene/store/DataOutput.java
@@ -283,29 +283,6 @@ public abstract class DataOutput {
/**
* Writes a String map.
* <p>
- * First the size is written as an {@link #writeInt(int) Int32},
- * followed by each key-value pair written as two consecutive
- * {@link #writeString(String) String}s.
- *
- * @param map Input map. May be null (equivalent to an empty map)
- * @deprecated Use {@link #writeMapOfStrings(Map)} instead.
- */
- @Deprecated
- public void writeStringStringMap(Map<String,String> map) throws IOException {
- if (map == null) {
- writeInt(0);
- } else {
- writeInt(map.size());
- for(final Map.Entry<String, String> entry: map.entrySet()) {
- writeString(entry.getKey());
- writeString(entry.getValue());
- }
- }
- }
-
- /**
- * Writes a String map.
- * <p>
* First the size is written as an {@link #writeVInt(int) vInt},
* followed by each key-value pair written as two consecutive
* {@link #writeString(String) String}s.
@@ -320,28 +297,6 @@ public abstract class DataOutput {
writeString(entry.getValue());
}
}
-
- /**
- * Writes a String set.
- * <p>
- * First the size is written as an {@link #writeInt(int) Int32},
- * followed by each value written as a
- * {@link #writeString(String) String}.
- *
- * @param set Input set. May be null (equivalent to an empty set)
- * @deprecated Use {@link #writeMapOfStrings(Map)} instead.
- */
- @Deprecated
- public void writeStringSet(Set<String> set) throws IOException {
- if (set == null) {
- writeInt(0);
- } else {
- writeInt(set.size());
- for(String value : set) {
- writeString(value);
- }
- }
- }
/**
* Writes a String set.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
index 1aae60b..2860ff9 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
@@ -290,36 +290,6 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
dir.close();
}
- public void testStringSet() throws Exception {
- Directory dir = getDirectory(createTempDir("testStringSet"));
- IndexOutput output = dir.createOutput("stringset", newIOContext(random()));
- output.writeStringSet(asSet("test1", "test2"));
- output.close();
-
- IndexInput input = dir.openInput("stringset", newIOContext(random()));
- assertEquals(16, input.length());
- assertEquals(asSet("test1", "test2"), input.readStringSet());
- input.close();
- dir.close();
- }
-
- public void testStringMap() throws Exception {
- Map<String,String> m = new HashMap<>();
- m.put("test1", "value1");
- m.put("test2", "value2");
-
- Directory dir = getDirectory(createTempDir("testStringMap"));
- IndexOutput output = dir.createOutput("stringmap", newIOContext(random()));
- output.writeStringStringMap(m);
- output.close();
-
- IndexInput input = dir.openInput("stringmap", newIOContext(random()));
- assertEquals(30, input.length());
- assertEquals(m, input.readStringStringMap());
- input.close();
- dir.close();
- }
-
public void testSetOfStrings() throws Exception {
Directory dir = getDirectory(createTempDir("testSetOfStrings"));
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f6253d5e/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java
index f68e18c..515c0dc 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java
@@ -178,12 +178,6 @@ public class MockIndexInputWrapper extends IndexInput {
}
@Override
- public Map<String,String> readStringStringMap() throws IOException {
- ensureOpen();
- return delegate.readStringStringMap();
- }
-
- @Override
public int readVInt() throws IOException {
ensureOpen();
return delegate.readVInt();
@@ -208,15 +202,21 @@ public class MockIndexInputWrapper extends IndexInput {
}
@Override
- public Set<String> readStringSet() throws IOException {
+ public void skipBytes(long numBytes) throws IOException {
+ ensureOpen();
+ super.skipBytes(numBytes);
+ }
+
+ @Override
+ public Map<String,String> readMapOfStrings() throws IOException {
ensureOpen();
- return delegate.readStringSet();
+ return delegate.readMapOfStrings();
}
@Override
- public void skipBytes(long numBytes) throws IOException {
+ public Set<String> readSetOfStrings() throws IOException {
ensureOpen();
- super.skipBytes(numBytes);
+ return delegate.readSetOfStrings();
}
@Override
[26/50] [abbrv] lucene-solr:apiv2: SOLR-9374: Speed up Jmx MBean
retrieval for FieldCache
Posted by no...@apache.org.
SOLR-9374: Speed up Jmx MBean retrieval for FieldCache
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b1b933eb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b1b933eb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b1b933eb
Branch: refs/heads/apiv2
Commit: b1b933eb43730a819a37ab0b33d78b09df838b33
Parents: 4f316bc
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sun Aug 28 01:04:36 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Sun Aug 28 01:04:36 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../src/java/org/apache/solr/search/SolrFieldCacheMBean.java | 6 ++++--
.../java/org/apache/solr/uninverting/UninvertingReader.java | 4 ++++
3 files changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b1b933eb/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index a4f918c..de75a39 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -92,6 +92,8 @@ Bug Fixes
Optimizations
----------------------
+* SOLR-9374: Speed up Jmx MBean retrieval for FieldCache. (Tim Owen via shalin)
+
Other Changes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b1b933eb/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
index 4c8c0c1..62bc4fa 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
@@ -61,12 +61,14 @@ public class SolrFieldCacheMBean implements JmxAugmentedSolrInfoMBean {
private NamedList getStats(boolean listEntries) {
NamedList stats = new SimpleOrderedMap();
- String[] entries = UninvertingReader.getUninvertedStats();
- stats.add("entries_count", entries.length);
if (listEntries) {
+ String[] entries = UninvertingReader.getUninvertedStats();
+ stats.add("entries_count", entries.length);
for (int i = 0; i < entries.length; i++) {
stats.add("entry#" + i, entries[i]);
}
+ } else {
+ stats.add("entries_count", UninvertingReader.getUninvertedStatsSize());
}
return stats;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b1b933eb/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
index 42b2f76..8d49fcb 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
@@ -388,4 +388,8 @@ public class UninvertingReader extends FilterLeafReader {
}
return info;
}
+
+ public static int getUninvertedStatsSize() {
+ return FieldCache.DEFAULT.getCacheEntries().length;
+ }
}
[20/50] [abbrv] lucene-solr:apiv2: SOLR-9445: Admin requests are
retried by CloudSolrClient and LBHttpSolrClient on failure
Posted by no...@apache.org.
SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ae40929f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ae40929f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ae40929f
Branch: refs/heads/apiv2
Commit: ae40929f0b13f5cbf83b0700bab694fd7a65f660
Parents: b3d12d2
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sat Aug 27 09:08:02 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Sat Aug 27 09:08:02 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../solr/client/solrj/impl/CloudSolrClient.java | 13 +---
.../client/solrj/impl/LBHttpSolrClient.java | 20 +++---
.../apache/solr/common/params/CommonParams.java | 10 +++
.../client/solrj/impl/CloudSolrClientTest.java | 68 ++++++++++++++++++--
5 files changed, 87 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ae40929f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index b502bf0..2dee6ab 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -79,6 +79,8 @@ Bug Fixes
* SOLR-6744: fl renaming / alias of uniqueKey field generates null pointer exception in SolrCloud configuration
(Mike Drob via Tom�s Fern�ndez L�bbe)
+* SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure. (shalin)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ae40929f/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
index 1f1c675..958cf14 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
@@ -22,7 +22,6 @@ import java.net.ConnectException;
import java.net.SocketException;
import java.nio.file.Path;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -85,11 +84,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
-import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
-import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
-import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
/**
* SolrJ client class to communicate with SolrCloud.
@@ -996,12 +991,6 @@ public class CloudSolrClient extends SolrClient {
collection = (reqParams != null) ? reqParams.get("collection", getDefaultCollection()) : getDefaultCollection();
return requestWithRetryOnStaleState(request, 0, collection);
}
- private static final Set<String> ADMIN_PATHS = new HashSet<>(Arrays.asList(
- CORES_HANDLER_PATH,
- COLLECTIONS_HANDLER_PATH,
- CONFIGSETS_HANDLER_PATH,
- AUTHC_PATH,
- AUTHZ_PATH));
/**
* As this class doesn't watch external collections on the client side,
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ae40929f/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
index c65a328..74b0943 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
@@ -54,6 +54,8 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.slf4j.MDC;
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+
/**
* LBHttpSolrClient or "LoadBalanced HttpSolrClient" is a load balancing wrapper around
* {@link HttpSolrClient}. This is useful when you
@@ -331,7 +333,7 @@ public class LBHttpSolrClient extends SolrClient {
public Rsp request(Req req) throws SolrServerException, IOException {
Rsp rsp = new Rsp();
Exception ex = null;
- boolean isUpdate = req.request instanceof IsUpdateRequest;
+ boolean isNonRetryable = req.request instanceof IsUpdateRequest || ADMIN_PATHS.contains(req.request.getPath());
List<ServerWrapper> skipped = null;
long timeAllowedNano = getTimeAllowedInNanos(req.getRequest());
@@ -362,7 +364,7 @@ public class LBHttpSolrClient extends SolrClient {
MDC.put("LBHttpSolrClient.url", serverStr);
HttpSolrClient client = makeSolrClient(serverStr);
- ex = doRequest(client, req, rsp, isUpdate, false, null);
+ ex = doRequest(client, req, rsp, isNonRetryable, false, null);
if (ex == null) {
return rsp; // SUCCESS
}
@@ -378,7 +380,7 @@ public class LBHttpSolrClient extends SolrClient {
break;
}
- ex = doRequest(wrapper.client, req, rsp, isUpdate, true, wrapper.getKey());
+ ex = doRequest(wrapper.client, req, rsp, isNonRetryable, true, wrapper.getKey());
if (ex == null) {
return rsp; // SUCCESS
}
@@ -405,7 +407,7 @@ public class LBHttpSolrClient extends SolrClient {
return e;
}
- protected Exception doRequest(HttpSolrClient client, Req req, Rsp rsp, boolean isUpdate,
+ protected Exception doRequest(HttpSolrClient client, Req req, Rsp rsp, boolean isNonRetryable,
boolean isZombie, String zombieKey) throws SolrServerException, IOException {
Exception ex = null;
try {
@@ -417,7 +419,7 @@ public class LBHttpSolrClient extends SolrClient {
} catch (SolrException e) {
// we retry on 404 or 403 or 503 or 500
// unless it's an update - then we only retry on connect exception
- if (!isUpdate && RETRY_CODES.contains(e.code())) {
+ if (!isNonRetryable && RETRY_CODES.contains(e.code())) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
// Server is alive but the request was likely malformed or invalid
@@ -427,22 +429,22 @@ public class LBHttpSolrClient extends SolrClient {
throw e;
}
} catch (SocketException e) {
- if (!isUpdate || e instanceof ConnectException) {
+ if (!isNonRetryable || e instanceof ConnectException) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
throw e;
}
} catch (SocketTimeoutException e) {
- if (!isUpdate) {
+ if (!isNonRetryable) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
throw e;
}
} catch (SolrServerException e) {
Throwable rootCause = e.getRootCause();
- if (!isUpdate && rootCause instanceof IOException) {
+ if (!isNonRetryable && rootCause instanceof IOException) {
ex = (!isZombie) ? addZombie(client, e) : e;
- } else if (isUpdate && rootCause instanceof ConnectException) {
+ } else if (isNonRetryable && rootCause instanceof ConnectException) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
throw e;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ae40929f/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
index 5ccd70f..b830b41 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
@@ -16,7 +16,10 @@
*/
package org.apache.solr.common.params;
+import java.util.Arrays;
+import java.util.HashSet;
import java.util.Locale;
+import java.util.Set;
/**
@@ -178,6 +181,13 @@ public interface CommonParams {
public static final String AUTHC_PATH = "/admin/authentication";
public static final String ZK_PATH = "/admin/zookeeper";
+ public static final Set<String> ADMIN_PATHS = new HashSet<>(Arrays.asList(
+ CORES_HANDLER_PATH,
+ COLLECTIONS_HANDLER_PATH,
+ CONFIGSETS_HANDLER_PATH,
+ AUTHC_PATH,
+ AUTHZ_PATH));
+
/** valid values for: <code>echoParams</code> */
public enum EchoParamStyle {
EXPLICIT,
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ae40929f/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index cf12036..5e8f6ce 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -22,6 +22,7 @@ import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -38,11 +39,13 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.cloud.AbstractDistribZkTestBase;
import org.apache.solr.cloud.SolrCloudTestCase;
@@ -60,6 +63,9 @@ import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.handler.admin.ConfigSetsHandler;
+import org.apache.solr.handler.admin.CoreAdminHandler;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
@@ -80,10 +86,11 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
private static final String id = "id";
private static final int TIMEOUT = 30;
+ private static final int NODE_COUNT = 3;
@BeforeClass
public static void setupCluster() throws Exception {
- configureCluster(3)
+ configureCluster(NODE_COUNT)
.addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
.configure();
@@ -384,6 +391,11 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
private Long getNumRequests(String baseUrl, String collectionName) throws
SolrServerException, IOException {
+ return getNumRequests(baseUrl, collectionName, "QUERYHANDLER", "standard", false);
+ }
+
+ private Long getNumRequests(String baseUrl, String collectionName, String category, String key, boolean returnNumErrors) throws
+ SolrServerException, IOException {
NamedList<Object> resp;
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "/"+ collectionName)) {
@@ -392,14 +404,60 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/admin/mbeans");
params.set("stats", "true");
- params.set("key", "standard");
- params.set("cat", "QUERYHANDLER");
+ params.set("key", key);
+ params.set("cat", category);
// use generic request to avoid extra processing of queries
QueryRequest req = new QueryRequest(params);
resp = client.request(req);
}
- return (Long) resp.findRecursive("solr-mbeans", "QUERYHANDLER",
- "standard", "stats", "requests");
+ return (Long) resp.findRecursive("solr-mbeans", category, key, "stats", returnNumErrors ? "errors" : "requests");
+ }
+
+ @Test
+ public void testNonRetryableRequests() throws Exception {
+ try (CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
+ // important to have one replica on each node
+ RequestStatusState state = CollectionAdminRequest.createCollection("foo", "conf", 1, NODE_COUNT).processAndWait(client, 60);
+ if (state == RequestStatusState.COMPLETED) {
+ AbstractDistribZkTestBase.waitForRecoveriesToFinish("foo", client.getZkStateReader(), true, true, TIMEOUT);
+ client.setDefaultCollection("foo");
+
+ Map<String, String> adminPathToMbean = new HashMap<>(CommonParams.ADMIN_PATHS.size());
+ adminPathToMbean.put(CommonParams.COLLECTIONS_HANDLER_PATH, CollectionsHandler.class.getName());
+ adminPathToMbean.put(CommonParams.CORES_HANDLER_PATH, CoreAdminHandler.class.getName());
+ adminPathToMbean.put(CommonParams.CONFIGSETS_HANDLER_PATH, ConfigSetsHandler.class.getName());
+ // we do not add the authc/authz handlers because they do not currently expose any mbeans
+
+ for (String adminPath : adminPathToMbean.keySet()) {
+ long errorsBefore = 0;
+ for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
+ Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERYHANDLER", adminPathToMbean.get(adminPath), true);
+ errorsBefore += numRequests;
+ log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
+ }
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("qt", adminPath);
+ params.set("action", "foobar"); // this should cause an error
+ QueryRequest req = new QueryRequest(params);
+ try {
+ NamedList<Object> resp = client.request(req);
+ fail("call to foo for admin path " + adminPath + " should have failed");
+ } catch (Exception e) {
+ // expected
+ }
+ long errorsAfter = 0;
+ for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
+ Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERYHANDLER", adminPathToMbean.get(adminPath), true);
+ errorsAfter += numRequests;
+ log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
+ }
+ assertEquals(errorsBefore + 1, errorsAfter);
+ }
+ } else {
+ fail("Collection could not be created within 60 seconds");
+ }
+ }
}
@Test
[45/50] [abbrv] lucene-solr:apiv2: SOLR-9460: Fully fix test setup
Posted by no...@apache.org.
SOLR-9460: Fully fix test setup
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e026ac4f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e026ac4f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e026ac4f
Branch: refs/heads/apiv2
Commit: e026ac4fe5d1bbb1c798699feeb39bf2efa15be2
Parents: 62f8b8d
Author: Uwe Schindler <us...@apache.org>
Authored: Sat Sep 3 20:30:30 2016 +0200
Committer: Uwe Schindler <us...@apache.org>
Committed: Sat Sep 3 20:30:30 2016 +0200
----------------------------------------------------------------------
.../TestSolrCloudWithSecureImpersonation.java | 22 ++++++++++++--------
1 file changed, 13 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e026ac4f/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
index 3727620..ef41e4d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
@@ -58,16 +58,18 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
private static SolrClient solrClient;
private static String getUsersFirstGroup() throws Exception {
- org.apache.hadoop.security.Groups hGroups =
- new org.apache.hadoop.security.Groups(new Configuration());
String group = "*"; // accept any group if a group can't be found
- try {
- List<String> g = hGroups.getGroups(System.getProperty("user.name"));
- if (g != null && g.size() > 0) {
- group = g.get(0);
+ if (!Constants.WINDOWS) { // does not work on Windows!
+ org.apache.hadoop.security.Groups hGroups =
+ new org.apache.hadoop.security.Groups(new Configuration());
+ try {
+ List<String> g = hGroups.getGroups(System.getProperty("user.name"));
+ if (g != null && g.size() > 0) {
+ group = g.get(0);
+ }
+ } catch (NullPointerException npe) {
+ // if user/group doesn't exist on test box
}
- } catch (NullPointerException npe) {
- // if user/group doesn't exist on test box
}
return group;
}
@@ -154,7 +156,9 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
miniCluster.shutdown();
}
miniCluster = null;
- solrClient.close();
+ if (solrClient != null) {
+ solrClient.close();
+ }
solrClient = null;
System.clearProperty("authenticationPlugin");
System.clearProperty(KerberosPlugin.DELEGATION_TOKEN_ENABLED);
[11/50] [abbrv] lucene-solr:apiv2: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/lucene-solr
Posted by no...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8683da80
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8683da80
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8683da80
Branch: refs/heads/apiv2
Commit: 8683da80ed2befd4abe0a7028ae95aefd4b3eb21
Parents: 884aa16 d489b8c
Author: Karl Wright <Da...@gmail.com>
Authored: Thu Aug 25 18:10:16 2016 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Thu Aug 25 18:10:16 2016 -0400
----------------------------------------------------------------------
dev-tools/idea/lucene/join/join.iml | 1 -
.../idea/lucene/queryparser/queryparser.iml | 1 -
dev-tools/scripts/smokeTestRelease.py | 17 +-
lucene/CHANGES.txt | 7 +-
.../analysis/miscellaneous/TestTrimFilter.java | 46 +-
.../lucene/codecs/lucene50/Lucene50Codec.java | 170 ---
.../lucene50/Lucene50DocValuesConsumer.java | 658 ---------
.../lucene50/Lucene50DocValuesFormat.java | 115 --
.../lucene50/Lucene50DocValuesProducer.java | 1299 ------------------
.../codecs/lucene50/Lucene50NormsFormat.java | 62 -
.../codecs/lucene50/Lucene50NormsProducer.java | 481 -------
.../lucene50/Lucene50SegmentInfoFormat.java | 21 +-
.../lucene/codecs/lucene53/Lucene53Codec.java | 176 ---
.../apache/lucene/codecs/lucene53/package.html | 25 -
.../lucene/codecs/lucene54/Lucene54Codec.java | 178 ---
.../apache/lucene/codecs/lucene54/package.html | 25 -
.../services/org.apache.lucene.codecs.Codec | 3 -
.../org.apache.lucene.codecs.DocValuesFormat | 1 -
.../codecs/lucene50/Lucene50NormsConsumer.java | 403 ------
.../lucene/codecs/lucene50/Lucene50RWCodec.java | 41 -
.../codecs/lucene50/Lucene50RWNormsFormat.java | 36 -
.../lucene50/Lucene50RWSegmentInfoFormat.java | 21 +-
.../lucene50/TestLucene50DocValuesFormat.java | 281 ----
.../lucene50/TestLucene50NormsFormat.java | 130 --
.../index/TestBackwardsCompatibility.java | 4 +-
.../org/apache/lucene/index/index.6.2.0-cfs.zip | Bin 0 -> 15880 bytes
.../apache/lucene/index/index.6.2.0-nocfs.zip | Bin 0 -> 15867 bytes
.../lucene50/Lucene50FieldInfosFormat.java | 12 +-
.../org/apache/lucene/index/IndexWriter.java | 18 -
.../org/apache/lucene/index/SegmentInfos.java | 24 +-
.../org/apache/lucene/search/BooleanQuery.java | 40 +
.../java/org/apache/lucene/store/DataInput.java | 33 -
.../org/apache/lucene/store/DataOutput.java | 45 -
.../index/TestAllFilesCheckIndexHeader.java | 7 +-
.../lucene/search/TestBooleanRewrites.java | 59 +
.../CustomSeparatorBreakIterator.java | 4 +-
.../postingshighlight/WholeBreakIterator.java | 4 +-
lucene/join/build.xml | 6 +-
.../search/join/DocValuesTermsCollector.java | 82 --
.../org/apache/lucene/search/join/JoinUtil.java | 45 -
.../search/join/TermsIncludingScoreQuery.java | 9 -
.../apache/lucene/search/join/TestJoinUtil.java | 23 +-
lucene/queryparser/build.xml | 6 +-
.../flexible/standard/StandardQueryParser.java | 19 -
.../LegacyNumericRangeQueryNodeBuilder.java | 93 --
.../builders/StandardQueryTreeBuilder.java | 4 -
.../standard/config/LegacyNumericConfig.java | 165 ---
.../LegacyNumericFieldConfigListener.java | 75 -
.../config/StandardQueryConfigHandler.java | 29 +-
.../standard/nodes/LegacyNumericQueryNode.java | 153 ---
.../nodes/LegacyNumericRangeQueryNode.java | 152 --
.../LegacyNumericQueryNodeProcessor.java | 154 ---
.../LegacyNumericRangeQueryNodeProcessor.java | 170 ---
.../StandardQueryNodeProcessorPipeline.java | 2 -
.../lucene/queryparser/xml/CoreParser.java | 1 -
.../LegacyNumericRangeQueryBuilder.java | 135 --
.../standard/TestLegacyNumericQueryParser.java | 535 --------
.../xml/CoreParserTestIndexData.java | 2 -
.../queryparser/xml/LegacyNumericRangeQuery.xml | 31 -
.../LegacyNumericRangeQueryWithoutLowerTerm.xml | 31 -
.../xml/LegacyNumericRangeQueryWithoutRange.xml | 31 -
.../LegacyNumericRangeQueryWithoutUpperTerm.xml | 31 -
.../lucene/queryparser/xml/TestCoreParser.java | 20 -
.../builders/TestNumericRangeQueryBuilder.java | 179 ---
.../lucene/document/InetAddressPoint.java | 2 +-
.../lucene/store/BaseDirectoryTestCase.java | 30 -
.../lucene/store/MockIndexInputWrapper.java | 20 +-
lucene/tools/junit4/solr-tests.policy | 1 +
lucene/tools/junit4/tests.policy | 1 +
solr/CHANGES.txt | 22 +-
.../org/apache/solr/core/RequestParams.java | 7 +-
.../src/java/org/apache/solr/core/SolrCore.java | 20 +-
.../apache/solr/handler/SolrConfigHandler.java | 25 +-
.../handler/component/RealTimeGetComponent.java | 18 +-
.../solr/request/macro/MacroExpander.java | 12 +
.../solr/rest/ManagedResourceStorage.java | 9 +-
.../search/LegacyNumericRangeQueryBuilder.java | 136 ++
.../org/apache/solr/search/SolrCoreParser.java | 2 +-
.../apache/solr/search/SolrIndexSearcher.java | 18 +-
.../apache/solr/search/facet/FacetField.java | 47 +-
.../solr/search/facet/FacetFieldMerger.java | 211 +++
.../apache/solr/search/facet/FacetMerger.java | 126 +-
.../apache/solr/search/facet/FacetModule.java | 550 +++-----
.../apache/solr/search/facet/FacetRange.java | 8 +-
.../solr/search/facet/FacetRangeMerger.java | 123 ++
.../apache/solr/search/facet/FacetRequest.java | 69 +-
.../search/facet/FacetRequestSortedMerger.java | 234 ++++
.../org/apache/solr/search/facet/HLLAgg.java | 2 +-
.../apache/solr/search/facet/PercentileAgg.java | 2 +-
.../org/apache/solr/search/facet/UniqueAgg.java | 2 +-
.../solr/spelling/suggest/SolrSuggester.java | 20 +-
.../java/org/apache/solr/update/PeerSync.java | 27 +-
.../java/org/apache/solr/update/UpdateLog.java | 7 +-
.../processor/DistributedUpdateProcessor.java | 10 +-
.../solr/collection1/conf/solrconfig-tlog.xml | 2 +-
.../solr/cloud/PeerSyncReplicationTest.java | 360 +++++
.../apache/solr/handler/TestReqParamsAPI.java | 73 +-
.../solr/request/macro/TestMacroExpander.java | 116 ++
.../TestLegacyNumericRangeQueryBuilder.java | 179 +++
.../apache/solr/common/cloud/ZkStateReader.java | 6 +-
.../src/java/org/apache/solr/JSONTestUtil.java | 13 +
101 files changed, 2146 insertions(+), 6995 deletions(-)
----------------------------------------------------------------------
[18/50] [abbrv] lucene-solr:apiv2: SOLR-9389: HDFS Transaction logs
stay open for writes which leaks Xceivers.
Posted by no...@apache.org.
SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6bff06ce
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6bff06ce
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6bff06ce
Branch: refs/heads/apiv2
Commit: 6bff06ce4fad8edbe2a45e9b3639dfc8d3d2bb87
Parents: 5861a2e
Author: markrmiller <ma...@apache.org>
Authored: Fri Aug 26 13:39:59 2016 -0400
Committer: markrmiller <ma...@apache.org>
Committed: Fri Aug 26 13:39:59 2016 -0400
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../apache/solr/update/HdfsTransactionLog.java | 141 ++++++++++---------
.../org/apache/solr/update/HdfsUpdateLog.java | 9 +-
.../org/apache/solr/update/TransactionLog.java | 5 +
.../java/org/apache/solr/update/UpdateLog.java | 2 +
5 files changed, 93 insertions(+), 66 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6bff06ce/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 1473fa9..f4bf7a7 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -74,6 +74,8 @@ Bug Fixes
* SOLR-9310: PeerSync fails on a node restart due to IndexFingerPrint mismatch (Pushkar Raste, noble)
+* SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers. (Tim Owen via Mark Miller)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6bff06ce/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
index 7ccbb95..e725127 100644
--- a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
+++ b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
@@ -64,7 +64,7 @@ public class HdfsTransactionLog extends TransactionLog {
Path tlogFile;
-
+ private long finalLogSize;
private FSDataOutputStream tlogOutStream;
private FileSystem fs;
@@ -144,13 +144,8 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public boolean endsWithCommit() throws IOException {
- long size;
- synchronized (this) {
- fos.flush();
- tlogOutStream.hflush();
- size = fos.size();
- }
-
+ ensureFlushed();
+ long size = getLogSize();
// the end of the file should have the end message (added during a commit) plus a 4 byte size
byte[] buf = new byte[ END_MESSAGE.length() ];
@@ -159,11 +154,10 @@ public class HdfsTransactionLog extends TransactionLog {
FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile), pos);
try {
- //ChannelFastInputStream is = new ChannelFastInputStream(channel, pos);
- dis.read(buf);
- for (int i=0; i<buf.length; i++) {
- if (buf[i] != END_MESSAGE.charAt(i)) return false;
- }
+ dis.read(buf);
+ for (int i=0; i<buf.length; i++) {
+ if (buf[i] != END_MESSAGE.charAt(i)) return false;
+ }
} finally {
dis.close();
}
@@ -176,10 +170,8 @@ public class HdfsTransactionLog extends TransactionLog {
public void rollback(long pos) throws IOException {
synchronized (this) {
assert snapshot_size == pos;
- fos.flush();
- tlogOutStream.hflush();
+ ensureFlushed();
// TODO: how do we rollback with hdfs?? We need HDFS-3107
- //raf.setLength(pos);
fos.setWritten(pos);
assert fos.size() == pos;
numRecords = snapshot_numRecords;
@@ -233,8 +225,10 @@ public class HdfsTransactionLog extends TransactionLog {
endRecord(pos);
- fos.flush(); // flush since this will be the last record in a log fill
- tlogOutStream.hflush();
+ ensureFlushed(); // flush since this will be the last record in a log fill
+
+ // now the commit command is written we will never write to this log again
+ closeOutput();
//assert fos.size() == channel.size();
@@ -255,19 +249,7 @@ public class HdfsTransactionLog extends TransactionLog {
try {
// make sure any unflushed buffer has been flushed
- synchronized (this) {
- // TODO: optimize this by keeping track of what we have flushed up to
- fos.flushBuffer();
-
- // flush to hdfs
- tlogOutStream.hflush();
- /***
- System.out.println("###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos);
- if (fos.size() != raf.length() || pos >= fos.size() ) {
- throw new RuntimeException("ERROR" + "###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos);
- }
- ***/
- }
+ ensureFlushed();
FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile),
pos);
@@ -284,6 +266,52 @@ public class HdfsTransactionLog extends TransactionLog {
}
@Override
+ public void closeOutput() {
+ try {
+ doCloseOutput();
+ } catch (IOException e) {
+ log.error("Could not close tlog output", e);
+ // This situation is not fatal to the caller
+ }
+ }
+
+ private void doCloseOutput() throws IOException {
+ synchronized (this) {
+ if (fos == null) return;
+ if (debug) {
+ log.debug("Closing output for " + tlogFile);
+ }
+ fos.flushBuffer();
+ finalLogSize = fos.size();
+ fos = null;
+ }
+
+ tlogOutStream.hflush();
+ tlogOutStream.close();
+ tlogOutStream = null;
+ }
+
+ private void ensureFlushed() throws IOException {
+ synchronized (this) {
+ if (fos != null) {
+ fos.flush();
+ tlogOutStream.hflush();
+ }
+ }
+ }
+
+ @Override
+ public long getLogSize() {
+ synchronized (this) {
+ if (fos != null) {
+ return fos.size();
+ } else {
+ return finalLogSize;
+ }
+ }
+ }
+
+ @Override
public void finish(UpdateLog.SyncLevel syncLevel) {
if (syncLevel == UpdateLog.SyncLevel.NONE) return;
try {
@@ -309,12 +337,7 @@ public class HdfsTransactionLog extends TransactionLog {
log.debug("Closing tlog" + this);
}
- synchronized (this) {
- fos.flushBuffer();
- }
-
- tlogOutStream.hflush();
- tlogOutStream.close();
+ doCloseOutput();
} catch (IOException e) {
log.error("Exception closing tlog.", e);
@@ -359,17 +382,19 @@ public class HdfsTransactionLog extends TransactionLog {
public HDFSLogReader(long startingPos) {
super();
incref();
+ initStream(startingPos);
+ }
+
+ private void initStream(long pos) {
try {
synchronized (HdfsTransactionLog.this) {
- fos.flushBuffer();
- sz = fos.size();
+ ensureFlushed();
+ sz = getLogSize();
}
-
- tlogOutStream.hflush();
-
+
FSDataInputStream fdis = fs.open(tlogFile);
- fis = new FSDataFastInputStream(fdis, startingPos);
+ fis = new FSDataFastInputStream(fdis, pos);
} catch (IOException e) {
throw new RuntimeException(e);
}
@@ -385,10 +410,10 @@ public class HdfsTransactionLog extends TransactionLog {
synchronized (HdfsTransactionLog.this) {
if (trace) {
- log.trace("Reading log record. pos="+pos+" currentSize="+fos.size());
+ log.trace("Reading log record. pos="+pos+" currentSize="+getLogSize());
}
- if (pos >= fos.size()) {
+ if (pos >= getLogSize()) {
return null;
}
}
@@ -398,16 +423,8 @@ public class HdfsTransactionLog extends TransactionLog {
if (pos >= sz) {
log.info("Read available inputstream data, opening new inputstream pos={} sz={}", pos, sz);
- synchronized (HdfsTransactionLog.this) {
- fos.flushBuffer();
- sz = fos.size();
- }
-
- tlogOutStream.hflush();
fis.close();
-
- FSDataInputStream fdis = fs.open(tlogFile);
- fis = new FSDataFastInputStream(fdis, pos);
+ initStream(pos);
}
if (pos == 0) {
@@ -415,7 +432,7 @@ public class HdfsTransactionLog extends TransactionLog {
// shouldn't currently happen - header and first record are currently written at the same time
synchronized (HdfsTransactionLog.this) {
- if (fis.position() >= fos.size()) {
+ if (fis.position() >= getLogSize()) {
return null;
}
pos = fis.position();
@@ -443,7 +460,7 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public String toString() {
synchronized (HdfsTransactionLog.this) {
- return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}";
+ return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + getLogSize() + "}";
}
}
@@ -454,7 +471,7 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public long currentSize() {
- return fos.size();
+ return getLogSize();
}
}
@@ -478,12 +495,8 @@ public class HdfsTransactionLog extends TransactionLog {
long sz;
synchronized (HdfsTransactionLog.this) {
- fos.flushBuffer();
-
- // this must be an hflush
- tlogOutStream.hflush();
- sz = fos.size();
- //assert sz == channel.size();
+ ensureFlushed();
+ sz = getLogSize();
}
fis = new FSDataFastInputStream(fs.open(tlogFile), 0);
@@ -554,7 +567,7 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public String toString() {
synchronized (HdfsTransactionLog.this) {
- return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}";
+ return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + getLogSize() + "}";
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6bff06ce/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
index 4cbcf4f..764b099 100644
--- a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
@@ -219,8 +219,13 @@ public class HdfsUpdateLog extends UpdateLog {
// It's possible that at abnormal close both "tlog" and "prevTlog" were
// uncapped.
for (TransactionLog ll : logs) {
- newestLogsOnStartup.addFirst(ll);
- if (newestLogsOnStartup.size() >= 2) break;
+ if (newestLogsOnStartup.size() < 2) {
+ newestLogsOnStartup.addFirst(ll);
+ } else {
+ // We're never going to modify old non-recovery logs - no need to hold their output open
+ log.info("Closing output for old non-recovery log " + ll);
+ ll.closeOutput();
+ }
}
try {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6bff06ce/solr/core/src/java/org/apache/solr/update/TransactionLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
index f7213ed..997485a 100644
--- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java
+++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
@@ -520,6 +520,11 @@ public class TransactionLog implements Closeable {
}
}
+ /** Move to a read-only state, closing and releasing resources while keeping the log available for reads */
+ public void closeOutput() {
+
+ }
+
public void finish(UpdateLog.SyncLevel syncLevel) {
if (syncLevel == UpdateLog.SyncLevel.NONE) return;
try {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6bff06ce/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 0b4fc18..5b917b8 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -821,11 +821,13 @@ public class UpdateLog implements PluginInfoInitialized {
try {
if (ll.endsWithCommit()) {
+ ll.closeOutput();
ll.decref();
continue;
}
} catch (IOException e) {
log.error("Error inspecting tlog " + ll, e);
+ ll.closeOutput();
ll.decref();
continue;
}
[04/50] [abbrv] lucene-solr:apiv2: LUCENE-7416: Rewrite optimizations
for BooleanQuery.
Posted by no...@apache.org.
LUCENE-7416: Rewrite optimizations for BooleanQuery.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/81c796a1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/81c796a1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/81c796a1
Branch: refs/heads/apiv2
Commit: 81c796a1fa30a9bec77712a8f8e188b347dc490a
Parents: 1830662
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Aug 25 15:42:28 2016 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Thu Aug 25 15:48:50 2016 +0200
----------------------------------------------------------------------
lucene/CHANGES.txt | 7 ++-
.../org/apache/lucene/search/BooleanQuery.java | 40 +++++++++++++
.../lucene/search/TestBooleanRewrites.java | 59 ++++++++++++++++++++
3 files changed, 105 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81c796a1/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index fbe016b..cb90ec1 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -31,7 +31,12 @@ Other
* LUCENE-7360: Remove Explanation.toHtml() (Alan Woodward)
======================= Lucene 6.3.0 =======================
-(No Changes)
+
+Optimizations
+
+* LUCENE-7416: BooleanQuery optimizes queries that have queries that occur both
+ in the sets of SHOULD and FILTER clauses, or both in MUST/FILTER and MUST_NOT
+ clauses. (Spyros Kapnissis via Adrien Grand)
======================= Lucene 6.2.0 =======================
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81c796a1/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index 3742bfc..b2477e8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -272,6 +272,17 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
}
+ // Check whether some clauses are both required and excluded
+ if (clauseSets.get(Occur.MUST_NOT).size() > 0) {
+ final Set<Query> reqAndExclQueries = new HashSet<Query>(clauseSets.get(Occur.FILTER));
+ reqAndExclQueries.addAll(clauseSets.get(Occur.MUST));
+ reqAndExclQueries.retainAll(clauseSets.get(Occur.MUST_NOT));
+
+ if (reqAndExclQueries.isEmpty() == false) {
+ return new MatchNoDocsQuery("FILTER or MUST clause also in MUST_NOT");
+ }
+ }
+
// remove FILTER clauses that are also MUST clauses
// or that match all documents
if (clauseSets.get(Occur.MUST).size() > 0 && clauseSets.get(Occur.FILTER).size() > 0) {
@@ -293,6 +304,35 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
}
+ // convert FILTER clauses that are also SHOULD clauses to MUST clauses
+ if (clauseSets.get(Occur.SHOULD).size() > 0 && clauseSets.get(Occur.FILTER).size() > 0) {
+ final Collection<Query> filters = clauseSets.get(Occur.FILTER);
+ final Collection<Query> shoulds = clauseSets.get(Occur.SHOULD);
+
+ Set<Query> intersection = new HashSet<>(filters);
+ intersection.retainAll(shoulds);
+
+ if (intersection.isEmpty() == false) {
+ BooleanQuery.Builder builder = new BooleanQuery.Builder();
+ int minShouldMatch = getMinimumNumberShouldMatch();
+
+ for (BooleanClause clause : clauses) {
+ if (intersection.contains(clause.getQuery())) {
+ if (clause.getOccur() == Occur.SHOULD) {
+ builder.add(new BooleanClause(clause.getQuery(), Occur.MUST));
+ minShouldMatch--;
+ }
+ } else {
+ builder.add(clause);
+ }
+ }
+
+ builder.setMinimumNumberShouldMatch(Math.max(0, minShouldMatch));
+ return builder.build();
+ }
+ }
+
+
// Rewrite queries whose single scoring clause is a MUST clause on a
// MatchAllDocsQuery to a ConstantScoreQuery
{
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81c796a1/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
index 0886340..4470841 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
@@ -205,6 +205,65 @@ public class TestBooleanRewrites extends LuceneTestCase {
.build();
assertEquals(expected, searcher.rewrite(bq));
}
+
+ // Duplicate Should and Filter query is converted to Must (with minShouldMatch -1)
+ public void testConvertShouldAndFilterToMust() throws IOException {
+ IndexSearcher searcher = newSearcher(new MultiReader());
+
+ // no minShouldMatch
+ BooleanQuery bq = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "bar")), Occur.FILTER)
+ .build();
+ assertEquals(new TermQuery(new Term("foo", "bar")), searcher.rewrite(bq));
+
+
+ // minShouldMatch is set to -1
+ bq = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "bar")), Occur.FILTER)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "quz")), Occur.SHOULD)
+ .setMinimumNumberShouldMatch(2)
+ .build();
+
+ BooleanQuery expected = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "quz")), Occur.SHOULD)
+ .setMinimumNumberShouldMatch(1)
+ .build();
+ assertEquals(expected, searcher.rewrite(bq));
+ }
+
+ // Duplicate Must or Filter with MustNot returns no match
+ public void testDuplicateMustOrFilterWithMustNot() throws IOException {
+ IndexSearcher searcher = newSearcher(new MultiReader());
+
+ // Test Must with MustNot
+ BooleanQuery bq = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ // other terms
+ .add(new TermQuery(new Term("foo", "baz")), Occur.MUST)
+ .add(new TermQuery(new Term("foo", "bad")), Occur.SHOULD)
+ //
+ .add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
+ .build();
+
+ assertEquals(new MatchNoDocsQuery(), searcher.rewrite(bq));
+
+ // Test Filter with MustNot
+ BooleanQuery bq2 = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.FILTER)
+ // other terms
+ .add(new TermQuery(new Term("foo", "baz")), Occur.MUST)
+ .add(new TermQuery(new Term("foo", "bad")), Occur.SHOULD)
+ //
+ .add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
+ .build();
+
+ assertEquals(new MatchNoDocsQuery(), searcher.rewrite(bq2));
+ }
public void testRemoveMatchAllFilter() throws IOException {
IndexSearcher searcher = newSearcher(new MultiReader());
[29/50] [abbrv] lucene-solr:apiv2: SOLR-9188: Trying revert a change
and fix the unexpected IOException in jenkins failure.
Posted by no...@apache.org.
SOLR-9188: Trying revert a change and fix the unexpected IOException in jenkins failure.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/757c245b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/757c245b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/757c245b
Branch: refs/heads/apiv2
Commit: 757c245bee057b899107be113fcfc0e4cce3b4a2
Parents: 0ed8c2a
Author: Noble Paul <no...@apache.org>
Authored: Mon Aug 29 13:07:03 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Mon Aug 29 13:07:03 2016 +0530
----------------------------------------------------------------------
.../java/org/apache/solr/servlet/SolrDispatchFilter.java | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/757c245b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index 8c792e9..17cd6b5 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -299,10 +299,15 @@ public class SolrDispatchFilter extends BaseSolrFilter {
boolean requestContinues = false;
final AtomicBoolean isAuthenticated = new AtomicBoolean(false);
AuthenticationPlugin authenticationPlugin = cores.getAuthenticationPlugin();
- if (authenticationPlugin == null ||
- PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest)request).getPathInfo())) {
+ if (authenticationPlugin == null) {
return true;
} else {
+ try {
+ if (PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest) request).getPathInfo())) return true;
+ } catch (Exception e) {
+ log.error("Unexpected error ", e);
+ }
+
//special case when solr is securing inter-node requests
String header = ((HttpServletRequest) request).getHeader(PKIAuthenticationPlugin.HEADER);
if (header != null && cores.getPkiAuthenticationPlugin() != null)
[27/50] [abbrv] lucene-solr:apiv2: SOLR-9449: Example schemas do not
index _version_ field anymore because the field has DocValues enabled already
Posted by no...@apache.org.
SOLR-9449: Example schemas do not index _version_ field anymore because the field has DocValues enabled already
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/738d5270
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/738d5270
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/738d5270
Branch: refs/heads/apiv2
Commit: 738d52700a4387d201509aec8bbf82d983fa7ed5
Parents: b1b933e
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Mon Aug 29 09:32:31 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Aug 29 09:32:31 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +++
solr/server/solr/configsets/basic_configs/conf/managed-schema | 3 ++-
.../configsets/data_driven_schema_configs/conf/managed-schema | 3 ++-
.../configsets/sample_techproducts_configs/conf/managed-schema | 3 ++-
4 files changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/738d5270/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index de75a39..70c9f1e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -94,6 +94,9 @@ Optimizations
* SOLR-9374: Speed up Jmx MBean retrieval for FieldCache. (Tim Owen via shalin)
+* SOLR-9449: Example schemas do not index _version_ field anymore because the field
+ has DocValues enabled already. (shalin)
+
Other Changes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/738d5270/solr/server/solr/configsets/basic_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/basic_configs/conf/managed-schema b/solr/server/solr/configsets/basic_configs/conf/managed-schema
index 95a9027..60a0e98 100644
--- a/solr/server/solr/configsets/basic_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/basic_configs/conf/managed-schema
@@ -118,7 +118,8 @@
If you don't need it, consider removing it and the corresponding copyField directive.
-->
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
- <field name="_version_" type="long" indexed="true" stored="false"/>
+ <!-- doc values are enabled by default for primitive types such as long so we don't index the version field -->
+ <field name="_version_" type="long" indexed="false" stored="false"/>
<field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
<field name="_text_" type="text_general" indexed="true" stored="false" multiValued="true"/>
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/738d5270/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema b/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
index 5b969dd..b1373d8 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
@@ -118,7 +118,8 @@
If you don't need it, consider removing it and the corresponding copyField directive.
-->
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
- <field name="_version_" type="long" indexed="true" stored="false"/>
+ <!-- doc values are enabled by default for primitive types such as long so we don't index the version field -->
+ <field name="_version_" type="long" indexed="false" stored="false"/>
<field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
<field name="_text_" type="text_general" indexed="true" stored="false" multiValued="true"/>
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/738d5270/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
index 87b84df..4980540 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
@@ -112,7 +112,8 @@
<!-- If you remove this field, you must _also_ disable the update log in solrconfig.xml
or Solr won't start. _version_ and update log are required for SolrCloud
-->
- <field name="_version_" type="long" indexed="true" stored="false" />
+ <!-- doc values are enabled by default for primitive types such as long so we don't index the version field -->
+ <field name="_version_" type="long" indexed="false" stored="false"/>
<!-- points to the root document of a block of nested documents. Required for nested
document support, may be removed otherwise
[13/50] [abbrv] lucene-solr:apiv2: LUCENE-7416: Make 7.0 only.
Posted by no...@apache.org.
LUCENE-7416: Make 7.0 only.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/63b2e805
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/63b2e805
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/63b2e805
Branch: refs/heads/apiv2
Commit: 63b2e8052f4f8bdb28e04d026e8388a0a77fd970
Parents: d44e731
Author: Adrien Grand <jp...@gmail.com>
Authored: Fri Aug 26 09:12:02 2016 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Fri Aug 26 09:12:02 2016 +0200
----------------------------------------------------------------------
lucene/CHANGES.txt | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/63b2e805/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index cb90ec1..00de5a5 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -22,6 +22,12 @@ Bug Fixes
Improvements
+Optimizations
+
+* LUCENE-7416: BooleanQuery optimizes queries that have queries that occur both
+ in the sets of SHOULD and FILTER clauses, or both in MUST/FILTER and MUST_NOT
+ clauses. (Spyros Kapnissis via Adrien Grand)
+
Other
* LUCENE-7328: Remove LegacyNumericEncoding from GeoPointField. (Nick Knize)
@@ -32,11 +38,7 @@ Other
======================= Lucene 6.3.0 =======================
-Optimizations
-
-* LUCENE-7416: BooleanQuery optimizes queries that have queries that occur both
- in the sets of SHOULD and FILTER clauses, or both in MUST/FILTER and MUST_NOT
- clauses. (Spyros Kapnissis via Adrien Grand)
+(No changes)
======================= Lucene 6.2.0 =======================
[19/50] [abbrv] lucene-solr:apiv2: SOLR-6744: Consider uniqueKey
rename when handling shard responses in distributed search
Posted by no...@apache.org.
SOLR-6744: Consider uniqueKey rename when handling shard responses in distributed search
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b3d12d26
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b3d12d26
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b3d12d26
Branch: refs/heads/apiv2
Commit: b3d12d265bb389f1ec239e8a96f044f7b89c01b1
Parents: 6bff06c
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Fri Aug 26 16:10:48 2016 -0700
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Fri Aug 26 16:10:48 2016 -0700
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +++
.../solr/handler/component/QueryComponent.java | 5 ++++-
.../java/org/apache/solr/search/ReturnFields.java | 7 +++++++
.../org/apache/solr/search/SolrReturnFields.java | 11 +++++++++++
.../DistributedQueryComponentCustomSortTest.java | 15 +++++++++++++--
5 files changed, 38 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b3d12d26/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f4bf7a7..b502bf0 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -76,6 +76,9 @@ Bug Fixes
* SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers. (Tim Owen via Mark Miller)
+* SOLR-6744: fl renaming / alias of uniqueKey field generates null pointer exception in SolrCloud configuration
+ (Mike Drob via Tom�s Fern�ndez L�bbe)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b3d12d26/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index 0e37439..ba6a68d 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -1305,6 +1305,10 @@ public class QueryComponent extends SearchComponent
String keyFieldName = rb.req.getSchema().getUniqueKeyField().getName();
boolean removeKeyField = !rb.rsp.getReturnFields().wantsField(keyFieldName);
+ if (rb.rsp.getReturnFields().getFieldRenames().get(keyFieldName) != null) {
+ // if id was renamed we need to use the new name
+ keyFieldName = rb.rsp.getReturnFields().getFieldRenames().get(keyFieldName);
+ }
for (ShardResponse srsp : sreq.responses) {
if (srsp.getException() != null) {
@@ -1330,7 +1334,6 @@ public class QueryComponent extends SearchComponent
continue;
}
SolrDocumentList docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
-
for (SolrDocument doc : docs) {
Object id = doc.getFieldValue(keyFieldName);
ShardDoc sdoc = rb.resultIds.get(id.toString());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b3d12d26/solr/core/src/java/org/apache/solr/search/ReturnFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ReturnFields.java b/solr/core/src/java/org/apache/solr/search/ReturnFields.java
index dabfdd6..ec2b878 100644
--- a/solr/core/src/java/org/apache/solr/search/ReturnFields.java
+++ b/solr/core/src/java/org/apache/solr/search/ReturnFields.java
@@ -16,6 +16,7 @@
*/
package org.apache.solr.search;
+import java.util.Map;
import java.util.Set;
import org.apache.solr.response.transform.DocTransformer;
@@ -53,6 +54,12 @@ public abstract class ReturnFields {
*/
public abstract Set<String> getRequestedFieldNames();
+ /**
+ * Get the fields which have been renamed
+ * @return a mapping of renamed fields
+ */
+ public abstract Map<String,String> getFieldRenames();
+
/** Returns <code>true</code> if the specified field should be returned. */
public abstract boolean wantsField(String name);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b3d12d26/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
index 6382f45..2b1b303 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
@@ -35,9 +35,11 @@ import org.apache.solr.response.transform.TransformerFactory;
import org.apache.solr.response.transform.ValueSourceAugmenter;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
/**
@@ -64,6 +66,7 @@ public class SolrReturnFields extends ReturnFields {
protected DocTransformer transformer;
protected boolean _wantsScore = false;
protected boolean _wantsAllFields = false;
+ protected Map<String,String> renameFields = Collections.emptyMap();
public SolrReturnFields() {
_wantsAllFields = true;
@@ -129,6 +132,9 @@ public class SolrReturnFields extends ReturnFields {
}
augmenters.addTransformer( new RenameFieldTransformer( from, to, copy ) );
}
+ if (rename.size() > 0 ) {
+ renameFields = rename.asShallowMap();
+ }
if( !_wantsAllFields && !globs.isEmpty() ) {
// TODO??? need to fill up the fields with matching field names in the index
// and add them to okFieldNames?
@@ -145,6 +151,11 @@ public class SolrReturnFields extends ReturnFields {
}
}
+ @Override
+ public Map<String,String> getFieldRenames() {
+ return renameFields;
+ }
+
// like getId, but also accepts dashes for legacy fields
public static String getFieldName(StrParser sp) {
sp.eatws();
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b3d12d26/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
index 9e9401b..4b3e92a 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
@@ -60,12 +60,19 @@ public class DistributedQueryComponentCustomSortTest extends BaseDistributedSear
index(id, "12", "text", "d", "payload", ByteBuffer.wrap(new byte[] { 0x34, (byte)0xdd, 0x4d })); // 7
index(id, "13", "text", "d", "payload", ByteBuffer.wrap(new byte[] { (byte)0x80, 0x11, 0x33 })); // 12
commit();
-
+
QueryResponse rsp;
+
rsp = query("q", "*:*", "fl", "id", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
rsp = query("q", "*:*", "fl", "id", "sort", "payload desc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 11, 13, 8, 9, 5, 3, 12, 10, 2, 4, 6, 1, 7);
+
+ // SOLR-6744
+ rsp = query("q", "*:*", "fl", "key:id", "sort", "payload asc", "rows", "20");
+ assertFieldValues(rsp.getResults(), "key", 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
+ rsp = query("q", "*:*", "fl", "key:id,id:text", "sort", "payload asc", "rows", "20");
+ assertFieldValues(rsp.getResults(), "key", 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
rsp = query("q", "text:a", "fl", "id", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 1, 3, 5, 9);
@@ -76,7 +83,11 @@ public class DistributedQueryComponentCustomSortTest extends BaseDistributedSear
assertFieldValues(rsp.getResults(), id, 4, 2, 10);
rsp = query("q", "text:b", "fl", "id", "sort", "payload desc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 10, 2, 4);
-
+
+ // SOLR-6744
+ rsp = query("q", "text:b", "fl", "key:id", "sort", "payload asc", "rows", "20");
+ assertFieldValues(rsp.getResults(), id, null, null, null);
+
rsp = query("q", "text:c", "fl", "id", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 7, 6, 8);
rsp = query("q", "text:c", "fl", "id", "sort", "payload desc", "rows", "20");
[17/50] [abbrv] lucene-solr:apiv2: sync up 6.2.0 CHANGES.txt
Posted by no...@apache.org.
sync up 6.2.0 CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/5861a2e2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/5861a2e2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/5861a2e2
Branch: refs/heads/apiv2
Commit: 5861a2e22373d594555ef4e00791366cc1812462
Parents: 7f3d865
Author: Mike McCandless <mi...@apache.org>
Authored: Fri Aug 26 09:53:39 2016 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Fri Aug 26 09:53:39 2016 -0400
----------------------------------------------------------------------
lucene/CHANGES.txt | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5861a2e2/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 214badc..1387d25 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -32,8 +32,6 @@ Other
* LUCENE-7328: Remove LegacyNumericEncoding from GeoPointField. (Nick Knize)
-* LUCENE-6968: LSH Filter (Tommaso Teofili, Andy Hind, Cao Manh Dat)
-
* LUCENE-7360: Remove Explanation.toHtml() (Alan Woodward)
======================= Lucene 6.3.0 =======================
@@ -54,6 +52,8 @@ New Features
* LUCENE-7381: Add point based DoubleRangeField and RangeFieldQuery for
indexing and querying on Ranges up to 4 dimensions (Nick Knize)
+* LUCENE-6968: LSH Filter (Tommaso Teofili, Andy Hind, Cao Manh Dat)
+
* LUCENE-7302: IndexWriter methods that change the index now return a
long "sequence number" indicating the effective equivalent
single-threaded execution order (Mike McCandless)
@@ -78,6 +78,10 @@ New Features
Polygon instances from a standard GeoJSON string (Robert Muir, Mike
McCandless)
+* LUCENE-7395: PerFieldSimilarityWrapper requires a default similarity
+ for calculating query norm and coordination factor in Lucene 6.x.
+ Lucene 7 will no longer have those factors. (Uwe Schindler, Sascha Markus)
+
* SOLR-9279: Queries module: new ComparisonBoolFunction base class
(Doug Turnbull via David Smiley)
@@ -98,6 +102,10 @@ Bug Fixes
* LUCENE-7391: Fix performance regression in MemoryIndex's fields() introduced
in Lucene 6. (Steve Mason via David Smiley)
+* LUCENE-7395, SOLR-9315: Fix PerFieldSimilarityWrapper to also delegate query
+ norm and coordination factor using a default similarity added as ctor param.
+ (Uwe Schindler, Sascha Markus)
+
* SOLR-9413: Fix analysis/kuromoji's CSVUtil.quoteEscape logic, add TestCSVUtil test.
(AppChecker, Christine Poerschke)
[46/50] [abbrv] lucene-solr:apiv2: FOLR-5725: test fix for miserable
differnce between cloud and non-cloud facets - when
facet.limit=0&facet.missing=true the former responds with missing counts,
but later doesn't.
Posted by no...@apache.org.
FOLR-5725: test fix for miserable differnce between cloud and non-cloud
facets - when facet.limit=0&facet.missing=true the former responds with
missing counts, but later doesn't.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9ac5c1cf
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9ac5c1cf
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9ac5c1cf
Branch: refs/heads/apiv2
Commit: 9ac5c1cf149fdd393209795226dd7ee792b767b2
Parents: e026ac4
Author: Mikhail Khludnev <mk...@apache.org>
Authored: Sat Sep 3 23:52:47 2016 +0300
Committer: Mikhail Khludnev <mk...@apache.org>
Committed: Sat Sep 3 23:54:50 2016 +0300
----------------------------------------------------------------------
.../handler/component/DistributedFacetExistsSmallTest.java | 9 +++++++++
1 file changed, 9 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ac5c1cf/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
index 4a827be..22dfca3 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
@@ -133,7 +133,16 @@ public class DistributedFacetExistsSmallTest extends BaseDistributedSearchTestCa
params.add("facet.mincount", rand.nextBoolean() ? "0": "1" );
}
+ final boolean shardRespondsWithMissingEvenLimitIsZero =
+ params.getBool("facet.missing", false) && params.getInt("facet.limit", 100)==0;
+ // skip miss count check, here cloud is different to non-distrib
+ if (shardRespondsWithMissingEvenLimitIsZero ) {
+ handle.put(null, SKIP);
+ }
query(params);
+ if (shardRespondsWithMissingEvenLimitIsZero ) {
+ handle.remove(null);
+ }
}
private void checkInvalidMincount() throws SolrServerException, IOException {
[21/50] [abbrv] lucene-solr:apiv2: SOLR-9439: Shard split clean up
logic for older failed splits is faulty
Posted by no...@apache.org.
SOLR-9439: Shard split clean up logic for older failed splits is faulty
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7d2f42e5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7d2f42e5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7d2f42e5
Branch: refs/heads/apiv2
Commit: 7d2f42e5436dc669cd48df8dafd45036bd6f9d76
Parents: ae40929
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sat Aug 27 09:08:53 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Sat Aug 27 09:08:53 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../org/apache/solr/cloud/SplitShardCmd.java | 62 ++++++++++++++++----
.../org/apache/solr/core/CoreContainer.java | 7 ++-
.../org/apache/solr/util/TestInjection.java | 20 +++++++
.../org/apache/solr/cloud/ShardSplitTest.java | 54 +++++++++++++++++
5 files changed, 130 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7d2f42e5/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2dee6ab..62c6d5f 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -81,6 +81,8 @@ Bug Fixes
* SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure. (shalin)
+* SOLR-9439: Shard split clean up logic for older failed splits is faulty. (shalin)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7d2f42e5/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
index d7bbf66..4463285 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
@@ -46,6 +46,7 @@ import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.util.TestInjection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -79,6 +80,7 @@ public class SplitShardCmd implements Cmd {
log.info("Split shard invoked");
ZkStateReader zkStateReader = ocmh.zkStateReader;
+ zkStateReader.forceUpdateCollection(collectionName);
String splitKey = message.getStr("split.key");
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
@@ -197,7 +199,10 @@ public class SplitShardCmd implements Cmd {
subSlices.add(subSlice);
String subShardName = collectionName + "_" + subSlice + "_replica1";
subShardNames.add(subShardName);
+ }
+ boolean oldShardsDeleted = false;
+ for (String subSlice : subSlices) {
Slice oSlice = collection.getSlice(subSlice);
if (oSlice != null) {
final Slice.State state = oSlice.getState();
@@ -206,24 +211,33 @@ public class SplitShardCmd implements Cmd {
"Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
} else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
// delete the shards
- for (String sub : subSlices) {
- log.info("Sub-shard: {} already exists therefore requesting its deletion", sub);
- Map<String, Object> propMap = new HashMap<>();
- propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
- propMap.put(COLLECTION_PROP, collectionName);
- propMap.put(SHARD_ID_PROP, sub);
- ZkNodeProps m = new ZkNodeProps(propMap);
- try {
- ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
- } catch (Exception e) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + sub,
- e);
- }
+ log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
+ propMap.put(COLLECTION_PROP, collectionName);
+ propMap.put(SHARD_ID_PROP, subSlice);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ try {
+ ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
+ } catch (SolrException e) {
+ throwIfNotNonExistentCoreException(subSlice, e);
+ } catch (Exception e) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
}
+
+ oldShardsDeleted = true;
}
}
}
+ if (oldShardsDeleted) {
+ // refresh the locally cached cluster state
+ zkStateReader.forceUpdateCollection(collectionName);
+ clusterState = zkStateReader.getClusterState();
+ collection = clusterState.getCollection(collectionName);
+ }
+
final String asyncId = message.getStr(ASYNC);
Map<String, String> requestMap = new HashMap<>();
@@ -406,6 +420,8 @@ public class SplitShardCmd implements Cmd {
replicas.add(propMap);
}
+ assert TestInjection.injectSplitFailureBeforeReplicaCreation();
+
// we must set the slice state into recovery before actually creating the replica cores
// this ensures that the logic inside Overseer to update sub-shard state to 'active'
// always gets a chance to execute. See SOLR-7673
@@ -455,4 +471,24 @@ public class SplitShardCmd implements Cmd {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
}
}
+
+ private void throwIfNotNonExistentCoreException(String subSlice, SolrException e) {
+ Throwable t = e;
+ String cause = null;
+ while (t != null) {
+ if (t instanceof SolrException) {
+ SolrException solrException = (SolrException) t;
+ cause = solrException.getMetadata("cause");
+ if (cause != null && !"NonExistentCore".equals(cause)) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
+ }
+ }
+ t = t.getCause();
+ }
+ if (!"NonExistentCore".equals(cause)) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7d2f42e5/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 1bdf3e3..59fe383 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -1018,8 +1018,11 @@ public class CoreContainer {
}
CoreDescriptor cd = solrCores.getCoreDescriptor(name);
- if (cd == null)
- throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
+ if (cd == null) {
+ SolrException solrException = new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
+ solrException.setMetadata("cause", "NonExistentCore");
+ throw solrException;
+ }
boolean close = solrCores.isLoadedNotPendingClose(name);
SolrCore core = solrCores.remove(name);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7d2f42e5/solr/core/src/java/org/apache/solr/util/TestInjection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java
index 03de74d..efd80bf 100644
--- a/solr/core/src/java/org/apache/solr/util/TestInjection.java
+++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java
@@ -113,6 +113,8 @@ public class TestInjection {
public static String randomDelayInCoreCreation = null;
public static int randomDelayMaxInCoreCreationInSec = 10;
+
+ public static String splitFailureBeforeReplicaCreation = null;
private static Set<Timer> timers = Collections.synchronizedSet(new HashSet<Timer>());
@@ -124,6 +126,7 @@ public class TestInjection {
updateLogReplayRandomPause = null;
updateRandomPause = null;
randomDelayInCoreCreation = null;
+ splitFailureBeforeReplicaCreation = null;
for (Timer timer : timers) {
timer.cancel();
@@ -285,6 +288,23 @@ public class TestInjection {
return true;
}
+
+ public static boolean injectSplitFailureBeforeReplicaCreation() {
+ if (splitFailureBeforeReplicaCreation != null) {
+ Random rand = random();
+ if (null == rand) return true;
+
+ Pair<Boolean,Integer> pair = parseValue(splitFailureBeforeReplicaCreation);
+ boolean enabled = pair.first();
+ int chanceIn100 = pair.second();
+ if (enabled && rand.nextInt(100) >= (100 - chanceIn100)) {
+ log.info("Injecting failure in creating replica for sub-shard");
+ throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to create replica");
+ }
+ }
+
+ return true;
+ }
private static Pair<Boolean,Integer> parseValue(String raw) {
Matcher m = ENABLED_PERCENT.matcher(raw);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7d2f42e5/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index 08e8277..13e45cd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -40,6 +40,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.HashBasedRouter;
import org.apache.solr.common.cloud.Replica;
@@ -49,6 +50,7 @@ import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TestInjection;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -89,6 +91,58 @@ public class ShardSplitTest extends BasicDistributedZkTest {
//waitForThingsToLevelOut(15);
}
+ /**
+ * Used to test that we can split a shard when a previous split event
+ * left sub-shards in construction or recovery state.
+ *
+ * See SOLR-9439
+ */
+ @Test
+ public void testSplitAfterFailedSplit() throws Exception {
+ waitForThingsToLevelOut(15);
+
+ TestInjection.splitFailureBeforeReplicaCreation = "true:100"; // we definitely want split to fail
+ try {
+ try {
+ CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ splitShard.setShardName(SHARD1);
+ splitShard.process(cloudClient);
+ fail("Shard split was not supposed to succeed after failure injection!");
+ } catch (Exception e) {
+ // expected
+ }
+
+ // assert that sub-shards cores exist and sub-shard is in construction state
+ ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+ zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ ClusterState state = zkStateReader.getClusterState();
+ DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+
+ Slice shard10 = collection.getSlice(SHARD1_0);
+ assertEquals(Slice.State.CONSTRUCTION, shard10.getState());
+ assertEquals(1, shard10.getReplicas().size());
+
+ Slice shard11 = collection.getSlice(SHARD1_1);
+ assertEquals(Slice.State.CONSTRUCTION, shard11.getState());
+ assertEquals(1, shard11.getReplicas().size());
+
+ // lets retry the split
+ TestInjection.reset(); // let the split succeed
+ try {
+ CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ splitShard.setShardName(SHARD1);
+ splitShard.process(cloudClient);
+ // Yay!
+ } catch (Exception e) {
+ log.error("Shard split failed", e);
+ fail("Shard split did not succeed after a previous failed split attempt left sub-shards in construction state");
+ }
+
+ } finally {
+ TestInjection.reset();
+ }
+ }
+
@Test
public void testSplitShardWithRule() throws Exception {
waitForThingsToLevelOut(15);
[43/50] [abbrv] lucene-solr:apiv2: SOLR-5725: facet.exists=true caps
counts by 1 to make facet.method=enum faster.
Posted by no...@apache.org.
SOLR-5725: facet.exists=true caps counts by 1 to make facet.method=enum
faster.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ff69d148
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ff69d148
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ff69d148
Branch: refs/heads/apiv2
Commit: ff69d14868555c43708823df23c90abd58a14d86
Parents: abd4cfb
Author: Mikhail Khludnev <mk...@apache.org>
Authored: Sat Aug 27 22:52:39 2016 +0300
Committer: Mikhail Khludnev <mk...@apache.org>
Committed: Sat Sep 3 07:24:48 2016 +0300
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../solr/handler/component/FacetComponent.java | 31 +-
.../org/apache/solr/request/SimpleFacets.java | 71 ++++-
.../apache/solr/search/SolrIndexSearcher.java | 5 +
.../org/apache/solr/TestRandomFaceting.java | 261 ++++++++++++++---
.../DistributedFacetExistsSmallTest.java | 227 +++++++++++++++
.../apache/solr/request/SimpleFacetsTest.java | 286 ++++++++++++++++++-
.../apache/solr/common/params/FacetParams.java | 8 +
8 files changed, 842 insertions(+), 49 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3b220d2..0d507e3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -77,6 +77,8 @@ prefix, then you will now get an error as these options are incompatible with nu
New Features
----------------------
+* SOLR-5725: facet.method=enum can bypass exact counts calculation with facet.exists=true, it just returns 1 for
+ terms which exists in result docset. (Alexey Kozhemiakin, Sebastian Koziel, Radoslaw Zielinski via Mikhail Khludnev)
Bug Fixes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
index 26b2e59..90608c0 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
@@ -1265,7 +1265,14 @@ public class FacetComponent extends SearchComponent {
if (facetFs != null) {
for (String field : facetFs) {
- DistribFieldFacet ff = new DistribFieldFacet(rb, field);
+ final DistribFieldFacet ff;
+
+ if (params.getFieldBool(field, FacetParams.FACET_EXISTS, false)) {
+ // cap facet count by 1 with this method
+ ff = new DistribFacetExistsField(rb, field);
+ } else {
+ ff = new DistribFieldFacet(rb, field);
+ }
facets.put(ff.getKey(), ff);
}
}
@@ -1469,7 +1476,7 @@ public class FacetComponent extends SearchComponent {
sfc.termNum = termNum++;
counts.put(name, sfc);
}
- sfc.count += count;
+ incCount(sfc, count);
terms.set(sfc.termNum);
last = count;
}
@@ -1485,6 +1492,10 @@ public class FacetComponent extends SearchComponent {
missingMax[shardNum] = last;
counted[shardNum] = terms;
}
+
+ protected void incCount(ShardFacetCount sfc, long count) {
+ sfc.count += count;
+ }
public ShardFacetCount[] getLexSorted() {
ShardFacetCount[] arr
@@ -1530,7 +1541,7 @@ public class FacetComponent extends SearchComponent {
}
}
}
-
+
/**
* <b>This API is experimental and subject to change</b>
*/
@@ -1547,4 +1558,18 @@ public class FacetComponent extends SearchComponent {
}
}
+
+ private static final class DistribFacetExistsField extends DistribFieldFacet {
+ private DistribFacetExistsField(ResponseBuilder rb, String facetStr) {
+ super(rb, facetStr);
+ SimpleFacets.checkMincountOnExists(field, minCount);
+ }
+
+ @Override
+ protected void incCount(ShardFacetCount sfc, long count) {
+ if (count>0) {
+ sfc.count = 1;
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index c2f68f9..52c2129 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -406,7 +406,8 @@ public class SimpleFacets {
String prefix = params.getFieldParam(field, FacetParams.FACET_PREFIX);
String contains = params.getFieldParam(field, FacetParams.FACET_CONTAINS);
boolean ignoreCase = params.getFieldBool(field, FacetParams.FACET_CONTAINS_IGNORE_CASE, false);
-
+ boolean exists = params.getFieldBool(field, FacetParams.FACET_EXISTS, false);
+
NamedList<Integer> counts;
SchemaField sf = searcher.getSchema().getField(field);
FieldType ft = sf.getType();
@@ -422,13 +423,15 @@ public class SimpleFacets {
requestedMethod = FacetMethod.FC;
} else if(FacetParams.FACET_METHOD_uif.equals(methodStr)) {
requestedMethod = FacetMethod.UIF;
- }else{
+ } else {
requestedMethod=null;
}
final boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache();
- FacetMethod appliedFacetMethod = selectFacetMethod(sf, requestedMethod, mincount);
+ FacetMethod appliedFacetMethod = selectFacetMethod(field,
+ sf, requestedMethod, mincount,
+ exists);
RTimer timer = null;
if (fdebug != null) {
@@ -446,7 +449,8 @@ public class SimpleFacets {
switch (appliedFacetMethod) {
case ENUM:
assert TrieField.getMainValuePrefix(ft) == null;
- counts = getFacetTermEnumCounts(searcher, docs, field, offset, limit, mincount,missing,sort,prefix, contains, ignoreCase, params);
+ counts = getFacetTermEnumCounts(searcher, docs, field, offset, limit, mincount,missing,sort,prefix, contains, ignoreCase,
+ exists);
break;
case FCS:
assert !multiToken;
@@ -538,6 +542,29 @@ public class SimpleFacets {
return counts;
}
+ /**
+ * @param existsRequested facet.exists=true is passed for the given field
+ * */
+ static FacetMethod selectFacetMethod(String fieldName,
+ SchemaField field, FacetMethod method, Integer mincount,
+ boolean existsRequested) {
+ if (existsRequested) {
+ checkMincountOnExists(fieldName, mincount);
+ if (method == null) {
+ method = FacetMethod.ENUM;
+ }
+ }
+ final FacetMethod facetMethod = selectFacetMethod(field, method, mincount);
+
+ if (existsRequested && facetMethod!=FacetMethod.ENUM) {
+ throw new SolrException (ErrorCode.BAD_REQUEST,
+ FacetParams.FACET_EXISTS + "=true is requested, but "+
+ FacetParams.FACET_METHOD+"="+FacetParams.FACET_METHOD_enum+ " can't be used with "+fieldName
+ );
+ }
+ return facetMethod;
+ }
+
/**
* This method will force the appropriate facet method even if the user provided a different one as a request parameter
*
@@ -811,7 +838,8 @@ public class SimpleFacets {
* @see FacetParams#FACET_ZEROS
* @see FacetParams#FACET_MISSING
*/
- public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing, String sort, String prefix, String contains, boolean ignoreCase, SolrParams params)
+ public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing,
+ String sort, String prefix, String contains, boolean ignoreCase, boolean intersectsCheck)
throws IOException {
/* :TODO: potential optimization...
@@ -901,7 +929,11 @@ public class SimpleFacets {
deState.postingsEnum = postingsEnum;
}
- c = searcher.numDocs(docs, deState);
+ if (intersectsCheck) {
+ c = searcher.intersects(docs, deState) ? 1 : 0;
+ } else {
+ c = searcher.numDocs(docs, deState);
+ }
postingsEnum = deState.postingsEnum;
} else {
@@ -916,19 +948,33 @@ public class SimpleFacets {
if (postingsEnum instanceof MultiPostingsEnum) {
MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
+
+ SEGMENTS_LOOP:
for (int subindex = 0; subindex < numSubs; subindex++) {
MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
if (sub.postingsEnum == null) continue;
int base = sub.slice.start;
int docid;
while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
- if (fastForRandomSet.exists(docid + base)) c++;
+ if (fastForRandomSet.exists(docid + base)) {
+ c++;
+ if (intersectsCheck) {
+ assert c==1;
+ break SEGMENTS_LOOP;
+ }
+ }
}
}
} else {
int docid;
while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
- if (fastForRandomSet.exists(docid)) c++;
+ if (fastForRandomSet.exists(docid)) {
+ c++;
+ if (intersectsCheck) {
+ assert c==1;
+ break;
+ }
+ }
}
}
@@ -969,6 +1015,15 @@ public class SimpleFacets {
return res;
}
+ public static void checkMincountOnExists(String fieldName, int mincount) {
+ if (mincount > 1) {
+ throw new SolrException (ErrorCode.BAD_REQUEST,
+ FacetParams.FACET_MINCOUNT + "="+mincount+" exceed 1 that's not supported with " +
+ FacetParams.FACET_EXISTS + "=true for " + fieldName
+ );
+ }
+ }
+
/**
* A simple key=>val pair whose natural order is such that
* <b>higher</b> vals come before lower vals.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index 7f15574..4c18809 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -2285,6 +2285,11 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI
return all.andNotSize(positiveA.union(positiveB));
}
+ /** @lucene.internal */
+ public boolean intersects(DocSet a, DocsEnumState deState) throws IOException {
+ return a.intersects(getDocSet(deState));
+ }
+
/**
* Takes a list of document IDs, and returns an array of Documents containing all of the stored fields.
*/
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
index daafca1..2ffefdc 100644
--- a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
+++ b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
@@ -16,22 +16,39 @@
*/
package org.apache.solr;
-import org.apache.lucene.util.TestUtil;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.function.Consumer;
+import java.util.regex.Pattern;
+
import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.SchemaField;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.noggit.JSONUtil;
+import org.noggit.ObjectBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.lang.invoke.MethodHandles;
-import java.util.*;
-
@Slow
public class TestRandomFaceting extends SolrTestCaseJ4 {
+ private static final Pattern trieFields = Pattern.compile(".*_t.");
+
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String FOO_STRING_FIELD = "foo_s1";
@@ -80,6 +97,21 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
types.add(new FldType("missing_ss",new IRange(0,0), new SVal('a','b',1,1)));
// TODO: doubles, multi-floats, ints with precisionStep>0, booleans
+ types.add(new FldType("small_tf",ZERO_ONE, new FVal(-4,5)));
+ assert trieFields.matcher("small_tf").matches();
+ assert !trieFields.matcher("small_f").matches();
+
+ types.add(new FldType("foo_ti",ZERO_ONE, new IRange(-2,indexSize)));
+ assert trieFields.matcher("foo_ti").matches();
+ assert !trieFields.matcher("foo_i").matches();
+
+ types.add(new FldType("bool_b",ZERO_ONE, new Vals(){
+ @Override
+ public Comparable get() {
+ return random().nextBoolean();
+ }
+
+ }));
}
void addMoreDocs(int ndocs) throws Exception {
@@ -144,8 +176,8 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
}
- List<String> multiValuedMethods = Arrays.asList(new String[]{"enum","fc"});
- List<String> singleValuedMethods = Arrays.asList(new String[]{"enum","fc","fcs"});
+ List<String> multiValuedMethods = Arrays.asList(new String[]{"enum","fc", null});
+ List<String> singleValuedMethods = Arrays.asList(new String[]{"enum","fc","fcs", null});
void doFacetTests(FldType ftype) throws Exception {
@@ -154,10 +186,9 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
Random rand = random();
boolean validate = validateResponses;
ModifiableSolrParams params = params("facet","true", "wt","json", "indent","true", "omitHeader","true");
- params.add("q","*:*", "rows","0"); // TODO: select subsets
+ params.add("q","*:*"); // TODO: select subsets
params.add("rows","0");
-
SchemaField sf = req.getSchema().getField(ftype.fname);
boolean multiValued = sf.getType().multiValuedFieldCache();
@@ -198,6 +229,10 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
params.add("facet.missing", "true");
}
+ if (rand.nextBoolean()) {
+ params.add("facet.enum.cache.minDf",""+ rand.nextInt(indexSize));
+ }
+
// TODO: randomly add other facet params
String key = ftype.fname;
String facet_field = ftype.fname;
@@ -210,45 +245,207 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
List<String> methods = multiValued ? multiValuedMethods : singleValuedMethods;
List<String> responses = new ArrayList<>(methods.size());
for (String method : methods) {
- // params.add("facet.field", "{!key="+method+"}" + ftype.fname);
- // TODO: allow method to be passed on local params?
-
- params.set("facet.method", method);
-
- // if (random().nextBoolean()) params.set("facet.mincount", "1"); // uncomment to test that validation fails
-
- String strResponse = h.query(req(params));
- // Object realResponse = ObjectBuilder.fromJSON(strResponse);
- // System.out.println(strResponse);
-
- responses.add(strResponse);
+ for (boolean exists : new boolean [] {false, true}) {
+ // params.add("facet.field", "{!key="+method+"}" + ftype.fname);
+ // TODO: allow method to be passed on local params?
+ if (method!=null) {
+ params.set("facet.method", method);
+ } else {
+ params.remove("facet.method");
+ }
+
+ params.set("facet.exists", ""+exists);
+ if (!exists && rand.nextBoolean()) {
+ params.remove("facet.exists");
+ }
+
+ // if (random().nextBoolean()) params.set("facet.mincount", "1"); // uncomment to test that validation fails
+ if (params.getInt("facet.limit", 100)!=0) { // it bypasses all processing, and we can go to empty validation
+ if (exists && params.getInt("facet.mincount", 0)>1) {
+ assertQEx("no mincount on facet.exists",
+ rand.nextBoolean() ? "facet.exists":"facet.mincount",
+ req(params), ErrorCode.BAD_REQUEST);
+ continue;
+ }
+ // facet.exists can't be combined with non-enum nor with enum requested for tries, because it will be flipped to FC/FCS
+ final boolean notEnum = method != null && !method.equals("enum");
+ final boolean trieField = trieFields.matcher(ftype.fname).matches();
+ if ((notEnum || trieField) && exists) {
+ assertQEx("facet.exists only when enum or ommitted",
+ "facet.exists", req(params), ErrorCode.BAD_REQUEST);
+ continue;
+ }
+ }
+ String strResponse = h.query(req(params));
+ responses.add(strResponse);
+
+ if (responses.size()>1) {
+ validateResponse(responses.get(0), strResponse, params, method, methods);
+ }
+ }
+
}
-
+
/**
String strResponse = h.query(req(params));
Object realResponse = ObjectBuilder.fromJSON(strResponse);
**/
+ } finally {
+ req.close();
+ }
+ }
+ private void validateResponse(String expected, String actual, ModifiableSolrParams params, String method,
+ List<String> methods) throws Exception {
+ if (params.getBool("facet.exists", false)) {
+ if (isSortByCount(params)) { // it's challenged with facet.sort=count
+ expected = getExpectationForSortByCount(params, methods);// that requires to recalculate expactation
+ } else { // facet.sort=index
+ expected = capFacetCountsTo1(expected);
+ }
+ }
+
+ String err = JSONTestUtil.match("/", actual, expected, 0.0);
+ if (err != null) {
+ log.error("ERROR: mismatch facet response: " + err +
+ "\n expected =" + expected +
+ "\n response = " + actual +
+ "\n request = " + params
+ );
+ fail(err);
+ }
+ }
- if (validate) {
- for (int i=1; i<methods.size(); i++) {
- String err = JSONTestUtil.match("/", responses.get(i), responses.get(0), 0.0);
- if (err != null) {
- log.error("ERROR: mismatch facet response: " + err +
- "\n expected =" + responses.get(0) +
- "\n response = " + responses.get(i) +
- "\n request = " + params
- );
- fail(err);
+ /** if facet.exists=true with facet.sort=counts,
+ * it should return all values with 1 hits ordered by label index
+ * then all vals with 0 , and then missing count with null label,
+ * in the implementation below they are called three stratas
+ * */
+ private String getExpectationForSortByCount( ModifiableSolrParams params, List<String> methods) throws Exception {
+ String indexSortedResponse = getIndexSortedAllFacetValues(params, methods);
+
+ return transformFacetFields(indexSortedResponse, e -> {
+ List<Object> facetSortedByIndex = (List<Object>) e.getValue();
+ Map<Integer,List<Object>> stratas = new HashMap<Integer,List<Object>>(){
+ @Override // poor man multimap, I won't do that anymore, I swear.
+ public List<Object> get(Object key) {
+ if (!containsKey(key)) {
+ put((Integer) key, new ArrayList<>());
}
+ return super.get(key);
}
+ };
+
+ for (Iterator iterator = facetSortedByIndex.iterator(); iterator.hasNext();) {
+ Object label = (Object) iterator.next();
+ Long count = (Long) iterator.next();
+ final Integer strata;
+ if (label==null) { // missing (here "stratas" seems like overengineering )
+ strata = null;
+ }else {
+ if (count>0) {
+ count = 1L; // capping here
+ strata = 1; // non-zero count become zero
+ } else {
+ strata = 0; // zero-count
+ }
+ }
+ final List<Object> facet = stratas.get(strata);
+ facet.add(label);
+ facet.add(count);
}
+ List stratified =new ArrayList<>();
+ for(Integer s : new Integer[]{1, 0}) { // non-zero capped to one goes first, zeroes go then
+ stratified.addAll(stratas.get(s));
+ }// cropping them now
+ int offset=params.getInt("facet.offset", 0) * 2;
+ int end = offset + params.getInt("facet.limit", 100) * 2 ;
+ int fromIndex = offset > stratified.size() ? stratified.size() : offset;
+ stratified = stratified.subList(fromIndex,
+ end > stratified.size() ? stratified.size() : end);
+
+ if (params.getInt("facet.limit", 100)>0) { /// limit=0 omits even miss count
+ stratified.addAll(stratas.get(null));
+ }
+ facetSortedByIndex.clear();
+ facetSortedByIndex.addAll(stratified);
+ });
+ }
-
+ private String getIndexSortedAllFacetValues(ModifiableSolrParams in, List<String> methods) throws Exception {
+ ModifiableSolrParams params = new ModifiableSolrParams(in);
+ params.set("facet.sort", "index");
+ String goodOldMethod = methods.get(random().nextInt( methods.size()));
+ params.set("facet.method", goodOldMethod);
+ params.set("facet.exists", "false");
+ if (random().nextBoolean()) {
+ params.remove("facet.exists");
+ }
+ params.set("facet.limit",-1);
+ params.set("facet.offset",0);
+ final String query;
+ SolrQueryRequest req = null;
+ try {
+ req = req(params);
+ query = h.query(req);
} finally {
req.close();
}
+ return query;
}
+ private boolean isSortByCount(ModifiableSolrParams in) {
+ boolean sortIsCount;
+ String sortParam = in.get("facet.sort");
+ sortIsCount = "count".equals(sortParam) || (sortParam==null && in.getInt("facet.limit",100)>0);
+ return sortIsCount;
+ }
+
+ /*
+ * {
+ "response":{"numFound":6,"start":0,"docs":[]
+ },
+ "facet_counts":{
+ "facet_queries":{},
+ "facet_fields":{
+ "foo_i":[
+ "6",2,
+ "2",1,
+ "3",1]},
+ "facet_ranges":{},
+ "facet_intervals":{},
+ "facet_heatmaps":{}}}
+ * */
+ @SuppressWarnings({"rawtypes", "unchecked"})
+ private String capFacetCountsTo1(String expected) throws IOException {
+ return transformFacetFields(expected, e -> {
+ List<Object> facetValues = (List<Object>) e.getValue();
+ for (ListIterator iterator = facetValues.listIterator(); iterator.hasNext();) {
+ Object value = iterator.next();
+ Long count = (Long) iterator.next();
+ if (value!=null && count > 1) {
+ iterator.set(1);
+ }
+
+ }
+ });
+ }
+
+ private String transformFacetFields(String expected, Consumer<Map.Entry<Object,Object>> consumer) throws IOException {
+ Object json = ObjectBuilder.fromJSON(expected);
+ Map facet_fields = getFacetFieldMap(json);
+ Set entries = facet_fields.entrySet();
+ for (Object facetTuples : entries) { //despite there should be only one field
+ Entry entry = (Entry)facetTuples;
+ consumer.accept(entry);
+ }
+ return JSONUtil.toJSON(json);
+ }
+
+ private Map getFacetFieldMap(Object json) {
+ Object facet_counts = ((Map)json).get("facet_counts");
+ Map facet_fields = (Map) ((Map)facet_counts).get("facet_fields");
+ return facet_fields;
+ }
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
new file mode 100644
index 0000000..4a827be
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetExistsSmallTest.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.component;
+
+import static org.hamcrest.CoreMatchers.is;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.solr.BaseDistributedSearchTestCase;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.response.FacetField;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.junit.Before;
+
+public class DistributedFacetExistsSmallTest extends BaseDistributedSearchTestCase {
+
+ public static final String FLD = "t_s";
+ private int maxId;
+
+ public DistributedFacetExistsSmallTest() {
+ }
+
+ @Before
+ public void prepareIndex() throws Exception {
+ del("*:*");
+
+ final Random rnd = random();
+ index(id, maxId=rnd.nextInt(5), FLD, "AAA");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "B");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "BB");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "BB");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "BBB");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "BBB");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "BBB");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "CC");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "CC");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "CCC");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "CCC");
+ index(id, maxId+=1+rnd.nextInt(5), FLD, "CCC");
+
+ final SolrClient shard0 = clients.get(0);
+ // expectidly fails test
+ //shard0.add(sdoc("id", 13, FLD, "DDD"));
+ commit();
+
+ handle.clear();
+ handle.put("QTime", SKIPVAL);
+ handle.put("timestamp", SKIPVAL);
+ handle.put("maxScore", SKIPVAL);
+ handle.put("_version_", SKIPVAL);
+ }
+
+ @ShardsFixed(num=4)
+ public void test() throws Exception{
+ checkBasicRequest();
+ checkWithMinCountEqOne();
+ checkWithSortCount();
+ checkWithMethodSetPerField();
+
+ {
+ // empty enum for checking npe
+ final ModifiableSolrParams params = buildParams();
+ params.remove("facet.exists");
+ QueryResponse rsp = query(params);
+ }
+
+ checkRandomParams();
+
+ checkInvalidMincount();
+ }
+
+ private void checkRandomParams() throws Exception {
+ final ModifiableSolrParams params = buildParams();
+ Random rand = random();
+
+ if (rand.nextBoolean()) {
+ int from;
+ params.set("q", "["+(from = rand.nextInt(maxId/2))+
+ " TO "+((from-1)+(rand.nextInt(maxId)))+"]");
+ }
+
+ int offset = 0;
+ int indexSize = 6;
+ if (rand .nextInt(100) < 20) {
+ if (rand.nextBoolean()) {
+ offset = rand.nextInt(100) < 10 ? rand.nextInt(indexSize *2) : rand.nextInt(indexSize/3+1);
+ }
+ params.add("facet.offset", Integer.toString(offset));
+ }
+
+ int limit = 100;
+ if (rand.nextInt(100) < 20) {
+ if (rand.nextBoolean()) {
+ limit = rand.nextInt(100) < 10 ? rand.nextInt(indexSize/2+1) : rand.nextInt(indexSize*2);
+ }
+ params.add("facet.limit", Integer.toString(limit));
+ }
+
+ if (rand.nextBoolean()) {
+ params.add("facet.sort", rand.nextBoolean() ? "index" : "count");
+ }
+
+ if ( rand.nextInt(100) < 20) {
+ final String[] prefixes = new String[] {"A","B","C"};
+ params.add("facet.prefix", prefixes[rand.nextInt(prefixes.length)]);
+ }
+
+ if (rand.nextInt(100) < 20) {
+ params.add("facet.missing", "true");
+ }
+
+ if (rand.nextInt(100) < 20) { // assigning only valid vals
+ params.add("facet.mincount", rand.nextBoolean() ? "0": "1" );
+ }
+
+ query(params);
+ }
+
+ private void checkInvalidMincount() throws SolrServerException, IOException {
+ final ModifiableSolrParams params = buildParams();
+ if (random().nextBoolean()) {
+ params.remove("facet.exists");
+ params.set("f."+FLD+".facet.exists","true");
+ }
+
+ if (random().nextBoolean()) {
+ params.set("facet.mincount", ""+(2+random().nextInt(100)) );
+ } else {
+ params.set("f."+FLD+".facet.mincount", ""+(2+random().nextInt(100)) );
+ }
+
+ try {
+ if (random().nextBoolean()) {
+ setDistributedParams(params);
+ queryServer(params);
+ } else {
+ params.set("distrib", "false");
+ controlClient.query(params);
+ }
+ fail();
+ } catch(SolrException e) { // check that distr and single index search fail the same
+ assertEquals(e.code(), ErrorCode.BAD_REQUEST.code);
+ assertTrue(e.getMessage().contains("facet.exists"));
+ assertTrue(e.getMessage().contains("facet.mincount"));
+ assertTrue(e.getMessage().contains(FLD));
+ }
+ }
+
+ private void checkBasicRequest() throws Exception {
+ final ModifiableSolrParams params = buildParams();
+ QueryResponse rsp = query(params);
+ assertResponse(rsp);
+ }
+
+ private void checkWithMinCountEqOne() throws Exception {
+ final ModifiableSolrParams params = buildParams("facet.mincount","1");
+ QueryResponse rsp = query(params);
+ assertResponse(rsp);
+ }
+
+ private void checkWithSortCount() throws Exception {
+ final ModifiableSolrParams params = buildParams("facet.sort","count");
+ QueryResponse rsp = query(params);
+ assertResponse(rsp);
+ }
+
+ private void checkWithMethodSetPerField() throws Exception {
+ final ModifiableSolrParams params = buildParams("f." + FLD + ".facet.exists", "true");
+ params.remove("facet.exists");
+ QueryResponse rsp = query(params);
+ assertResponse(rsp);
+ }
+
+ private ModifiableSolrParams buildParams(String... additionalParams) {
+ final ModifiableSolrParams params = new ModifiableSolrParams();
+
+ params.add("q", "*:*");
+ params.add("rows", "0");
+ //params.add("debugQuery", "true");
+ params.add("facet", "true");
+ params.add("sort", "id asc");
+
+ if(random().nextBoolean()){
+ params.add("facet.method", "enum");
+ }
+
+ params.add("facet.exists", "true");
+ params.add("facet.field", FLD);
+ for(int i = 0; i < additionalParams.length;) {
+ params.add(additionalParams[i++], additionalParams[i++]);
+ }
+ return params;
+ }
+
+ private void assertResponse(QueryResponse rsp) {
+ final FacetField facetField = rsp.getFacetField(FLD);
+
+ assertThat(facetField.getValueCount(), is(6));
+ final List<FacetField.Count> counts = facetField.getValues();
+ for (FacetField.Count count : counts) {
+ assertThat("Count for: " + count.getName(), count.getCount(), is(1L));
+ }
+ assertThat(counts.get(0).getName(), is("AAA"));
+ assertThat(counts.get(1).getName(), is("B"));
+ assertThat(counts.get(2).getName(), is("BB"));
+ }
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java
index 0514918..85035b9 100644
--- a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java
+++ b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java
@@ -38,7 +38,6 @@ import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.util.TimeZoneUtils;
import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
import org.noggit.ObjectBuilder;
import org.slf4j.Logger;
@@ -494,11 +493,9 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 {
ModifiableSolrParams params = params("q","*:*", "rows","0", "facet","true", "facet.field","{!key=myalias}"+field);
- String[] methods = {null, "fc","enum","fcs", "uif"
- };
+ String[] methods = {null, "fc","enum","fcs", "uif"};
if (sf.multiValued() || sf.getType().multiValuedFieldCache()) {
- methods = new String[]{null, "fc","enum", "uif"
- };
+ methods = new String[]{null, "fc","enum", "uif"};
}
prefixes = prefixes==null ? new String[]{null} : prefixes;
@@ -2017,6 +2014,49 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 {
doFacetPrefix("t_s", null, "", "facet.method", "enum", "facet.enum.cache.minDf", "3");
doFacetPrefix("t_s", null, "", "facet.method", "enum", "facet.enum.cache.minDf", "100");
doFacetPrefix("t_s", null, "", "facet.method", "fc");
+ doFacetExistsPrefix("t_s", null, "");
+ doFacetExistsPrefix("t_s", null, "", "facet.enum.cache.minDf", "3");
+ doFacetExistsPrefix("t_s", null, "", "facet.enum.cache.minDf", "100");
+ }
+
+ @Test
+ public void testFacetExistsShouldThrowExceptionForMincountGreaterThanOne () throws Exception {
+ final String f = "t_s";
+ final List<String> msg = Arrays.asList("facet.mincount", "facet.exists", f);
+ Collections.shuffle(msg, random());
+ assertQEx("checking global method or per field", msg.get(0),
+ req("q", "id:[* TO *]"
+ ,"indent","on"
+ ,"facet","true"
+ , random().nextBoolean() ? "facet.exists": "f."+f+".facet.exists", "true"
+ ,"facet.field", f
+ , random().nextBoolean() ? "facet.mincount" : "f."+f+".facet.mincount" ,
+ "" + (2+random().nextInt(Integer.MAX_VALUE-2))
+ )
+ , ErrorCode.BAD_REQUEST);
+
+ assertQ("overriding per field",
+ req("q", "id:[* TO *]"
+ ,"indent","on"
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"f."+f+".facet.exists", "false"
+ ,"facet.field", f
+ ,"facet.mincount",""+(2+random().nextInt(Integer.MAX_VALUE-2))
+ ),
+ "//lst[@name='facet_fields']/lst[@name='"+f+"']");
+
+ assertQ("overriding per field",
+ req("q", "id:[* TO *]"
+ ,"indent","on"
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", f
+ ,"facet.mincount",""+(2+random().nextInt(Integer.MAX_VALUE-2))
+ ,"f."+f+".facet.mincount", random().nextBoolean() ? "0":"1"
+ ),
+ "//lst[@name='facet_fields']/lst[@name='"+f+"']");
+
}
static void indexFacetPrefixSingleValued() {
@@ -2037,7 +2077,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 {
}
@Test
- @Ignore("SOLR-8466 - facet.method=uif ignores facet.contains")
+ //@Ignore("SOLR-8466 - facet.method=uif ignores facet.contains")
public void testFacetContainsUif() {
doFacetContains("contains_s1", "contains_group_s1", "Astra", "BAst", "Ast", "facet.method", "uif");
doFacetPrefix("contains_s1", null, "Astra", "facet.method", "uif", "facet.contains", "Ast");
@@ -2063,6 +2103,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 {
doFacetPrefix("contains_s1", null, "Astra", "facet.method", "enum", "facet.contains", "aSt", "facet.contains.ignoreCase", "true");
doFacetPrefix("contains_s1", null, "Astra", "facet.method", "fcs", "facet.contains", "asT", "facet.contains.ignoreCase", "true");
doFacetPrefix("contains_s1", null, "Astra", "facet.method", "fc", "facet.contains", "aST", "facet.contains.ignoreCase", "true");
+ doFacetExistsPrefix("contains_s1", null, "Astra", "facet.contains", "Ast");
}
static void indexFacetPrefix(String idPrefix, String f, String termSuffix, String g) {
@@ -2313,6 +2354,239 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 {
);
}
+ public void doFacetExistsPrefix(String f, String local, String termSuffix, String... params) {
+ String indent="on";
+ String pre = "//lst[@name='"+f+"']";
+ String lf = local==null ? f : local+f;
+
+ assertQ("test field facet.method",
+ req(params, "q", "id:[* TO *]"
+ ,"indent", indent
+ ,"facet", "true"
+ ,"f."+lf+".facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount", "0"
+ ,"facet.offset", "0"
+ ,"facet.limit", "100"
+ ,"facet.sort", "count"
+ ,"facet.prefix", "B"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=3]"
+ ,pre+"/int[1][@name='B"+termSuffix+"'][.='1']"
+ ,pre+"/int[2][@name='BB"+termSuffix+"'][.='1']"
+ ,pre+"/int[3][@name='BBB"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix middle, exact match first term",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","B"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=3]"
+ ,pre+"/int[1][@name='B"+termSuffix+"'][.='1']"
+ ,pre+"/int[2][@name='BB"+termSuffix+"'][.='1']"
+ ,pre+"/int[3][@name='BBB"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix middle, exact match first term, unsorted",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","index"
+ ,"facet.prefix","B"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=3]"
+ ,pre+"/int[1][@name='B"+termSuffix+"'][.='1']"
+ ,pre+"/int[2][@name='BB"+termSuffix+"'][.='1']"
+ ,pre+"/int[3][@name='BBB"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix middle, paging",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","1"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","B"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=2]"
+ ,pre+"/int[1][@name='BB"+termSuffix+"'][.='1']"
+ ,pre+"/int[2][@name='BBB"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix middle, paging",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","1"
+ ,"facet.limit","1"
+ ,"facet.sort","count"
+ ,"facet.prefix","B"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=1]"
+ ,pre+"/int[1][@name='BB"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix end, not exact match",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","C"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=2]"
+ ,pre+"/int[1][@name='CC"+termSuffix+"'][.='1']"
+ ,pre+"/int[2][@name='CCC"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix end, exact match",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","CC"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=2]"
+ ,pre+"/int[1][@name='CC"+termSuffix+"'][.='1']"
+ ,pre+"/int[2][@name='CCC"+termSuffix+"'][.='1']"
+ );
+
+ assertQ("test facet.prefix past end",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","X"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=0]"
+ );
+
+ assertQ("test facet.prefix past end",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","1"
+ ,"facet.limit","-1"
+ ,"facet.sort","count"
+ ,"facet.prefix","X"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=0]"
+ );
+
+ assertQ("test facet.prefix at start, exact match",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","AAA"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=1]"
+ ,pre+"/int[1][@name='AAA"+termSuffix+"'][.='1']"
+ );
+ assertQ("test facet.prefix at Start, not exact match",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","AA"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=1]"
+ ,pre+"/int[1][@name='AAA"+termSuffix+"'][.='1']"
+ );
+ assertQ("test facet.prefix before start",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","0"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","999"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=0]"
+ );
+
+ assertQ("test facet.prefix before start",
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","0"
+ ,"facet.offset","2"
+ ,"facet.limit","100"
+ ,"facet.sort","count"
+ ,"facet.prefix","999"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=0]"
+ );
+
+ // test offset beyond what is collected internally in queue
+ assertQ(
+ req(params, "q", "id:[* TO *]"
+ ,"indent",indent
+ ,"facet","true"
+ ,"facet.exists", "true"
+ ,"facet.field", lf
+ ,"facet.mincount","1"
+ ,"facet.offset","5"
+ ,"facet.limit","10"
+ ,"facet.sort","count"
+ ,"facet.prefix","CC"
+ )
+ ,"*[count(//lst[@name='facet_fields']/lst/int)=0]"
+ );
+ }
+
public void doFacetContains(String f, String g, String termSuffix, String contains, String groupContains, String... params) {
String indent="on";
String pre = "//lst[@name='"+f+"']";
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ff69d148/solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java b/solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java
index e014c86..038fc6e 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java
@@ -185,6 +185,14 @@ public interface FacetParams {
* only use the filterCache for terms with a df >= to this parameter.
*/
public static final String FACET_ENUM_CACHE_MINDF = FACET + ".enum.cache.minDf";
+
+ /**
+ * A boolean parameter that caps the facet counts at 1.
+ * With this set, a returned count will only be 0 or 1.
+ * For apps that don't need the count, this should be an optimization
+ */
+ public static final String FACET_EXISTS = FACET+".exists";
+
/**
* Any field whose terms the user wants to enumerate over for
* Facet Contraint Counts (multi-value)
[12/50] [abbrv] lucene-solr:apiv2: LUCENE-7425: change from Perl to
Python for our script to polling mirrors during the release process
Posted by no...@apache.org.
LUCENE-7425: change from Perl to Python for our script to polling mirrors during the release process
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d44e7315
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d44e7315
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d44e7315
Branch: refs/heads/apiv2
Commit: d44e7315b460a79008bbb6e1d5d9ace8d9fa78b9
Parents: 8683da8
Author: Mike McCandless <mi...@apache.org>
Authored: Thu Aug 25 19:46:25 2016 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Thu Aug 25 19:48:44 2016 -0400
----------------------------------------------------------------------
dev-tools/scripts/poll-mirrors.pl | 155 ---------------------------------
dev-tools/scripts/poll-mirrors.py | 153 ++++++++++++++++++++++++++++++++
2 files changed, 153 insertions(+), 155 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d44e7315/dev-tools/scripts/poll-mirrors.pl
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/poll-mirrors.pl b/dev-tools/scripts/poll-mirrors.pl
deleted file mode 100755
index cdbd3bb..0000000
--- a/dev-tools/scripts/poll-mirrors.pl
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/perl
-#
-# poll-mirrors.pl
-#
-# This script is designed to poll download sites after posting a release
-# and print out notice as each becomes available. The RM can use this
-# script to delay the release announcement until the release can be
-# downloaded.
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-use strict;
-use warnings;
-use Getopt::Long;
-use POSIX qw/strftime/;
-use LWP::UserAgent;
-
-my $rel_path;
-my $version;
-my $interval = 300;
-my $details = 0;
-
-my $result = GetOptions ("version=s" => \$version,
- "details!" => \$details,
- "path=s" => \$rel_path,
- "interval=i" => \$interval);
-
-my $usage = ""
- . "$0 -v version [ -i interval (seconds; default: 300) ] [ -details ]\n"
- . "$0 -p some/explicit/path [ -i interval (seconds; default: 300) ] [ -details ]\n"
- ;
-
-unless ($result) {
- print STDERR $usage;
- exit(1);
-}
-
-unless (defined($version) xor defined($rel_path)) {
- print STDERR "You must specify either -version or -path but not both\n$usage";
- exit(1);
-}
-
-my $label;
-my $apache_url_suffix;
-my $maven_url;
-
-if (defined($version)) {
- if ($version !~ /^\d+(?:\.\d+)+/) {
- print STDERR "You must specify the release version as a number.\n$usage";
- exit(1);
- }
- $label = $version;
- $apache_url_suffix = "lucene/java/$version/changes/Changes.html";
- $maven_url = "http://repo1.maven.org/maven2/org/apache/lucene/lucene-core/$version/lucene-core-$version.pom.asc";
-} else {
- # path based
- $apache_url_suffix = $label = $rel_path;
-}
-my $previously_selected = select STDOUT;
-$| = 1; # turn off buffering of STDOUT, so status is printed immediately
-select $previously_selected;
-
-my $apache_mirrors_list_url = "http://www.apache.org/mirrors/";
-
-my $agent = LWP::UserAgent->new();
-$agent->timeout(2);
-
-my $maven_available = defined($maven_url) ? 0 : -999;
-
-my @apache_mirrors = ();
-
-my $apache_mirrors_list_page = $agent->get($apache_mirrors_list_url)->decoded_content;
-if (defined($apache_mirrors_list_page)) {
- #<TR>
- # <TD ALIGN=RIGHT><A HREF="http://apache.dattatec.com/">apache.dattatec.com</A> <A HREF="http://apache.dattatec.com/">@</A></TD>
- #
- # <TD>http</TD>
- # <TD ALIGN=RIGHT>8 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
- # <TD ALIGN=RIGHT>5 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
- # <TD>ok</TD>
- #</TR>
- while ($apache_mirrors_list_page =~ m~<TR>(.*?)</TR>~gis) {
- my $mirror_entry = $1;
- next unless ($mirror_entry =~ m~<TD>\s*ok\s*</TD>\s*$~i); # skip mirrors with problems
- if ($mirror_entry =~ m~<A\s+HREF\s*=\s*"([^"]+)"\s*>~i) {
- my $mirror_url = $1;
- push @apache_mirrors, "${mirror_url}${apache_url_suffix}";
- }
- }
-} else {
- print STDERR "Error fetching Apache mirrors list $apache_mirrors_list_url";
- exit(1);
-}
-
-my $num_apache_mirrors = $#apache_mirrors;
-
-my $sleep_interval = 0;
-while (1) {
- print "\n", strftime('%d-%b-%Y %H:%M:%S', localtime);
- print "\nPolling $#apache_mirrors Apache Mirrors";
- print " and Maven Central" unless ($maven_available);
- print "...\n";
-
- my $start = time();
- $maven_available = (200 == $agent->head($maven_url)->code)
- unless ($maven_available);
- @apache_mirrors = &check_mirrors;
- my $stop = time();
- $sleep_interval = $interval - ($stop - $start);
-
- my $num_downloadable_apache_mirrors = $num_apache_mirrors - $#apache_mirrors;
- print "$label is ", ($maven_available ? "" : "not "),
- "downloadable from Maven Central.\n" if defined($maven_url);
- printf "$label is downloadable from %d/%d Apache Mirrors (%0.1f%%)\n",
- $num_downloadable_apache_mirrors, $num_apache_mirrors,
- ($num_downloadable_apache_mirrors*100/$num_apache_mirrors);
-
- last if ($maven_available && 0 == $#apache_mirrors);
-
- if ($sleep_interval > 0) {
- print "Sleeping for $sleep_interval seconds...\n";
- sleep($sleep_interval)
- }
-}
-
-sub check_mirrors {
- my @not_yet_downloadable_apache_mirrors;
- for my $mirror (@apache_mirrors) {
-
- ### print "\n$mirror\n";
- if (200 != $agent->head($mirror)->code) {
- push @not_yet_downloadable_apache_mirrors, $mirror;
- print $details ? "\nFAIL: $mirror\n" : "X";
- } else {
- print ".";
- }
- }
- print "\n";
- return @not_yet_downloadable_apache_mirrors;
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d44e7315/dev-tools/scripts/poll-mirrors.py
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/poll-mirrors.py b/dev-tools/scripts/poll-mirrors.py
new file mode 100644
index 0000000..1ff7e54
--- /dev/null
+++ b/dev-tools/scripts/poll-mirrors.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+#
+# vim: softtabstop=2 shiftwidth=2 expandtab
+#
+# Python port of poll-mirrors.pl
+#
+# This script is designed to poll download sites after posting a release
+# and print out notice as each becomes available. The RM can use this
+# script to delay the release announcement until the release can be
+# downloaded.
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import datetime
+import ftplib
+import re
+import sys
+import time
+
+try:
+ from urllib.parse import urlparse
+except:
+ from urlparse import urlparse
+
+try:
+ import http.client as http
+except ImportError:
+ import httplib as http
+
+def p(s):
+ sys.stdout.write(s)
+ sys.stdout.flush()
+
+def mirror_contains_file(url):
+ url = urlparse(url)
+
+ if url.scheme == 'http':
+ return http_file_exists(url)
+ elif url.scheme == 'ftp':
+ return ftp_file_exists(url)
+
+def http_file_exists(url):
+ exists = False
+
+ try:
+ conn = http.HTTPConnection(url.netloc)
+ conn.request('HEAD', url.path)
+ response = conn.getresponse()
+
+ exists = response.status == 200
+ except:
+ pass
+
+ return exists
+
+def ftp_file_exists(url):
+ listing = []
+ try:
+ conn = ftplib.FTP(url.netloc)
+ conn.login()
+ listing = conn.nlst(url.path)
+ conn.quit()
+ except Exception as e:
+ pass
+
+ return len(listing) > 0
+
+def check_url_list(lst):
+ ret = []
+ for url in lst:
+ if mirror_contains_file(url):
+ p('.')
+ else:
+ p('X')
+ ret.append(url)
+
+ return ret
+
+parser = argparse.ArgumentParser(description='Checks that all Lucene mirrors contain a copy of a release')
+parser.add_argument('-version', '-v', help='Lucene version to check', required=True)
+parser.add_argument('-interval', '-i', help='seconds to wait to query again pending mirrors', type=int, default=300)
+args = parser.parse_args()
+
+try:
+ conn = http.HTTPConnection('www.apache.org')
+ conn.request('GET', '/mirrors/')
+ response = conn.getresponse()
+ html = response.read()
+except Exception as e:
+ p('Unable to fetch the Apache mirrors list!\n')
+ sys.exit(1)
+
+apache_path = 'lucene/java/{}/changes/Changes.html'.format(args.version);
+maven_url = 'http://repo1.maven.org/maven2/' \
+ 'org/apache/lucene/lucene-core/{0}/lucene-core-{0}.pom.asc'.format(args.version)
+maven_available = False
+
+pending_mirrors = []
+for match in re.finditer('<TR>(.*?)</TR>', str(html), re.MULTILINE | re.IGNORECASE | re.DOTALL):
+ row = match.group(1)
+ if not '<TD>ok</TD>' in row:
+ # skip bad mirrors
+ continue
+
+ match = re.search('<A\s+HREF\s*=\s*"([^"]+)"\s*>', row, re.MULTILINE | re.IGNORECASE)
+ if match:
+ pending_mirrors.append(match.group(1) + apache_path)
+
+total_mirrors = len(pending_mirrors)
+
+while True:
+ p('\n' + str(datetime.datetime.now()))
+ p('\nPolling {} Apache Mirrors'.format(len(pending_mirrors)))
+ if not maven_available:
+ p(' and Maven Central')
+ p('...\n')
+
+ if not maven_available:
+ maven_available = mirror_contains_file(maven_url)
+
+ start = time.time()
+ pending_mirrors = check_url_list(pending_mirrors)
+ stop = time.time()
+ remaining = args.interval - (stop - start)
+
+ available_mirrors = total_mirrors - len(pending_mirrors)
+
+ p('\n\n{} is{}downloadable from Maven Central\n'.format(args.version, maven_available and ' ' or ' not '))
+ p('{} is downloadable from {}/{} Apache Mirrors ({:.2f}%)\n'.format(args.version, available_mirrors,
+ total_mirrors,
+ available_mirrors * 100 / total_mirrors))
+ if len(pending_mirrors) == 0:
+ break
+
+ if remaining > 0:
+ p('Sleeping for {} seconds...\n'.format(remaining))
+ time.sleep(remaining)
+
[39/50] [abbrv] lucene-solr:apiv2: SOLR-9319: DELETEREPLICA can
accept a 'count' and remove appropriate replicas
Posted by no...@apache.org.
SOLR-9319: DELETEREPLICA can accept a 'count' and remove appropriate replicas
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e203c9af
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e203c9af
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e203c9af
Branch: refs/heads/apiv2
Commit: e203c9af95461216d9ff39a108c86c5ce4308f5f
Parents: e13f7ae
Author: Noble Paul <no...@apache.org>
Authored: Fri Sep 2 09:27:43 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Fri Sep 2 09:27:43 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../org/apache/solr/cloud/DeleteNodeCmd.java | 1 -
.../org/apache/solr/cloud/DeleteReplicaCmd.java | 144 +++++++++++++++++--
.../org/apache/solr/cloud/ReplaceNodeCmd.java | 3 -
.../solr/handler/admin/CollectionsHandler.java | 14 +-
.../apache/solr/cloud/DeleteReplicaTest.java | 139 +++++++++++++++++-
.../solrj/request/CollectionAdminRequest.java | 12 +-
.../common/params/CollectionAdminParams.java | 9 +-
8 files changed, 292 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index adca63e..b22ea5d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -103,6 +103,8 @@ Bug Fixes
* SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands (shalin, noble)
+* SOLR-9319: DELETEREPLICA can accept a 'count' and remove appropriate replicas (Nitin Sharma, noble )
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
index 0fd001a..afb95a2 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
@@ -27,7 +27,6 @@ import java.util.concurrent.TimeUnit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
index 6f5fc62..2311542 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
@@ -18,8 +18,11 @@ package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
@@ -35,6 +38,7 @@ import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.component.ShardHandler;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
@@ -44,6 +48,7 @@ import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOW
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
@@ -62,36 +67,156 @@ public class DeleteReplicaCmd implements Cmd {
deleteReplica(clusterState, message, results,null);
}
+
@SuppressWarnings("unchecked")
void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
- throws KeeperException, InterruptedException {
+ throws KeeperException, InterruptedException {
+ log.info("deleteReplica() : {}", Utils.toJSONString(message));
+ boolean parallel = message.getBool("parallel", false);
+
+ //If a count is specified the strategy needs be different
+ if (message.getStr(COUNT_PROP) != null) {
+ deleteReplicaBasedOnCount(clusterState, message, results, onComplete, parallel);
+ return;
+ }
+
+
ocmh.checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
String collectionName = message.getStr(COLLECTION_PROP);
String shard = message.getStr(SHARD_ID_PROP);
String replicaName = message.getStr(REPLICA_PROP);
- boolean parallel = message.getBool("parallel", false);
DocCollection coll = clusterState.getCollection(collectionName);
Slice slice = coll.getSlice(shard);
if (slice == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Invalid shard name : " + shard + " in collection : " + collectionName);
+ "Invalid shard name : " + shard + " in collection : " + collectionName);
}
+
+ deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete, parallel);
+
+ }
+
+
+ /**
+ * Delete replicas based on count for a given collection. If a shard is passed, uses that
+ * else deletes given num replicas across all shards for the given collection.
+ */
+ void deleteReplicaBasedOnCount(ClusterState clusterState,
+ ZkNodeProps message,
+ NamedList results,
+ Runnable onComplete,
+ boolean parallel)
+ throws KeeperException, InterruptedException {
+ ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
+ int count = Integer.parseInt(message.getStr(COUNT_PROP));
+ String collectionName = message.getStr(COLLECTION_PROP);
+ String shard = message.getStr(SHARD_ID_PROP);
+ DocCollection coll = clusterState.getCollection(collectionName);
+ Slice slice = null;
+ //Validate if shard is passed.
+ if (shard != null) {
+ slice = coll.getSlice(shard);
+ if (slice == null) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Invalid shard name : " + shard + " in collection : " + collectionName);
+ }
+ }
+
+ Map<Slice, Set<String>> shardToReplicasMapping = new HashMap<Slice, Set<String>>();
+ if (slice != null) {
+ Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(slice, shard, collectionName, count);
+ shardToReplicasMapping.put(slice,replicasToBeDeleted);
+ } else {
+
+ //If there are many replicas left, remove the rest based on count.
+ Collection<Slice> allSlices = coll.getSlices();
+ for (Slice individualSlice : allSlices) {
+ Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(individualSlice, individualSlice.getName(), collectionName, count);
+ shardToReplicasMapping.put(individualSlice, replicasToBeDeleted);
+ }
+ }
+
+ for (Slice shardSlice: shardToReplicasMapping.keySet()) {
+ String shardId = shardSlice.getName();
+ Set<String> replicas = shardToReplicasMapping.get(shardSlice);
+ //callDeleteReplica on all replicas
+ for (String replica: replicas) {
+ log.info("Deleting replica {} for shard {} based on count {}", replica, shardId, count);
+ deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
+ }
+ results.add("shard_id", shardId);
+ results.add("replicas_deleted", replicas);
+ }
+
+ }
+
+
+ /**
+ * Pick replicas to be deleted. Avoid picking the leader.
+ */
+ private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
+ validateReplicaAvailability(slice, shard, collectionName, count);
+ Collection<Replica> allReplicas = slice.getReplicas();
+ Set<String> replicasToBeRemoved = new HashSet<String>();
+ Replica leader = slice.getLeader();
+ for (Replica replica: allReplicas) {
+ if (count == 0) {
+ break;
+ }
+ //Try avoiding to pick up the leader to minimize activity on the cluster.
+ if (leader.getCoreName().equals(replica.getCoreName())) {
+ continue;
+ }
+ replicasToBeRemoved.add(replica.getName());
+ count --;
+ }
+ return replicasToBeRemoved;
+ }
+
+ /**
+ * Validate if there is less replicas than requested to remove. Also error out if there is
+ * only one replica available
+ */
+ private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
+ //If there is a specific shard passed, validate if there any or just 1 replica left
+ if (slice != null) {
+ Collection<Replica> allReplicasForShard = slice.getReplicas();
+ if (allReplicasForShard == null) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found in shard/collection: " +
+ shard + "/" + collectionName);
+ }
+
+
+ if (allReplicasForShard.size() == 1) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
+ shard + "/" + collectionName + ". Cannot delete that.");
+ }
+
+ if (allReplicasForShard.size() <= count) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
+ shard + "/" + collectionName + " Requested: " + count + " Available: " + allReplicasForShard.size() + ".");
+ }
+ }
+ }
+
+ void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
+
Replica replica = slice.getReplica(replicaName);
if (replica == null) {
ArrayList<String> l = new ArrayList<>();
for (Replica r : slice.getReplicas())
l.add(r.getName());
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " + replicaName + " in shard/collection : "
- + shard + "/" + collectionName + " available replicas are " + StrUtils.join(l, ','));
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " + replicaName + " in shard/collection : " +
+ shard + "/" + collectionName + " available replicas are " + StrUtils.join(l, ','));
}
// If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
// on the command.
if (Boolean.parseBoolean(message.getStr(ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Attempted to remove replica : " + collectionName + "/" + shard + "/" + replicaName
- + " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
+ "Attempted to remove replica : " + collectionName + "/" + shard + "/" + replicaName +
+ " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
}
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
@@ -140,7 +265,7 @@ public class DeleteReplicaCmd implements Cmd {
try {
if (!callable.call())
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
- "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
+ "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
} catch (InterruptedException | KeeperException e) {
throw e;
} catch (Exception ex) {
@@ -150,6 +275,7 @@ public class DeleteReplicaCmd implements Cmd {
} else {
ocmh.tpe.submit(callable);
}
+
}
- }
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
index ad02fc0..92c9afe 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
@@ -34,10 +34,8 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -45,7 +43,6 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.util.StrUtils.formatString;
public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index a9703f3..3e134d5 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -116,6 +116,7 @@ import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonParams.NAME;
@@ -491,16 +492,15 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}),
DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
- COLLECTION_PROP,
- SHARD_ID_PROP,
- REPLICA_PROP);
+ COLLECTION_PROP);
- req.getParams().getAll(map,
+ return req.getParams().getAll(map,
DELETE_INDEX,
DELETE_DATA_DIR,
- DELETE_INSTANCE_DIR);
-
- return req.getParams().getAll(map, ONLY_IF_DOWN);
+ DELETE_INSTANCE_DIR,
+ COUNT_PROP, REPLICA_PROP,
+ SHARD_ID_PROP,
+ ONLY_IF_DOWN);
}),
MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null, COLLECTION_PROP, "split.key", "target.collection");
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 403f14b..e1eb027 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud;
import java.io.File;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
+import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -51,6 +52,9 @@ import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOW
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
import static org.apache.solr.common.util.Utils.makeMap;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.REQUESTSTATUS;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+
public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
@@ -120,6 +124,7 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
}
}
+
protected void tryToRemoveOnlyIfDown(String collectionName, CloudSolrClient client, Replica replica, String shard) throws IOException, SolrServerException {
Map m = makeMap("collection", collectionName,
"action", DELETEREPLICA.toLower(),
@@ -133,10 +138,10 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
}
static void removeAndWaitForReplicaGone(String COLL_NAME,
- CloudSolrClient client, Replica replica, String shard)
- throws SolrServerException, IOException, InterruptedException {
+ CloudSolrClient client, Replica replica, String shard)
+ throws SolrServerException, IOException, InterruptedException {
Map m = makeMap("collection", COLL_NAME, "action", DELETEREPLICA.toLower(), "shard",
- shard, "replica", replica.getName());
+ shard, "replica", replica.getName());
SolrParams params = new MapSolrParams(m);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
@@ -146,11 +151,11 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
DocCollection testcoll = null;
while (! timeout.hasTimedOut()) {
testcoll = client.getZkStateReader()
- .getClusterState().getCollection(COLL_NAME);
+ .getClusterState().getCollection(COLL_NAME);
success = testcoll.getSlice(shard).getReplica(replica.getName()) == null;
if (success) {
log.info("replica cleaned up {}/{} core {}",
- shard + "/" + replica.getName(), replica.getStr("core"));
+ shard + "/" + replica.getName(), replica.getStr("core"));
log.info("current state {}", testcoll);
break;
}
@@ -159,6 +164,44 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
assertTrue("Replica not cleaned up", success);
}
+
+ protected void tryRemoveReplicaByCountAndShard(String collectionName, CloudSolrClient client, int count, String shard) throws IOException, SolrServerException {
+ Map m = makeMap("collection", collectionName,
+ "action", DELETEREPLICA.toLower(),
+ "shard", shard,
+ "count", count);
+ SolrParams params = new MapSolrParams(m);
+ SolrRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+ client.request(request);
+ }
+
+
+ protected void tryRemoveReplicaByCountAsync(String collectionName, CloudSolrClient client, int count, String requestid) throws IOException, SolrServerException {
+ Map m = makeMap("collection", collectionName,
+ "action", DELETEREPLICA.toLower(),
+ "count", count,
+ "async", requestid);
+ SolrParams params = new MapSolrParams(m);
+ SolrRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+ client.request(request);
+ }
+
+
+ protected String trackRequestStatus(CloudSolrClient client, String requestId) throws IOException, SolrServerException {
+ Map m = makeMap("action", REQUESTSTATUS.toLower(),
+ "requestid", requestId);
+ SolrParams params = new MapSolrParams(m);
+ SolrRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+ NamedList<Object> resultsList = client.request(request);
+ NamedList innerResponse = (NamedList) resultsList.get("status");
+ return (String) innerResponse.get("state");
+ }
+
+
+
protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
int replicationFactor = 2;
int numShards = 2;
@@ -212,4 +255,90 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
assertFalse("Instance directory still exists", FileUtils.fileExists(instanceDir));
assertFalse("DataDirectory still exists", FileUtils.fileExists(dataDir));
}
+
+ @Test
+ @ShardsFixed(num = 4)
+ public void deleteReplicaByCount() throws Exception {
+ String collectionName = "deleteByCount";
+ try (CloudSolrClient client = createCloudClient(null)) {
+ createCollection(collectionName, 1, 3, 5);
+
+ waitForRecoveriesToFinish(collectionName, false);
+
+ DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
+ .getClusterState().getCollection(collectionName);
+ Collection<Slice> slices = testcoll.getActiveSlices();
+ assertEquals(slices.size(), 1);
+ for (Slice individualShard: slices) {
+ assertEquals(individualShard.getReplicas().size(),3);
+ }
+
+
+ try {
+ // Should not be able to delete 2 replicas (non leader ones for a given shard
+ tryRemoveReplicaByCountAndShard(collectionName, client, 2, "shard1");
+ testcoll = getCommonCloudSolrClient().getZkStateReader()
+ .getClusterState().getCollection(collectionName);
+ slices = testcoll.getActiveSlices();
+ assertEquals(slices.size(), 1);
+ for (Slice individualShard: slices) {
+ assertEquals(individualShard.getReplicas().size(),1);
+ }
+
+ } catch (SolrException se) {
+ fail("Should have been able to remove the replica successfully");
+ }
+
+ }
+ }
+
+ @Test
+ @ShardsFixed(num = 4)
+ public void deleteReplicaByCountForAllShards() throws Exception {
+ String collectionName = "deleteByCountNew";
+ try (CloudSolrClient client = createCloudClient(null)) {
+ createCollection(collectionName, 2, 2, 5);
+
+ waitForRecoveriesToFinish(collectionName, false);
+
+ DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
+ .getClusterState().getCollection(collectionName);
+ Collection<Slice> slices = testcoll.getActiveSlices();
+ assertEquals(slices.size(), 2);
+ for (Slice individualShard: slices) {
+ assertEquals(individualShard.getReplicas().size(),2);
+ }
+
+ String requestIdAsync = "1000";
+
+ try {
+ // Should not be able to delete 2 replicas from all shards (non leader ones)
+ tryRemoveReplicaByCountAsync(collectionName, client, 1, requestIdAsync);
+
+ //Make sure request completes
+ String requestStatus = trackRequestStatus(client, requestIdAsync);
+
+ while ((!requestStatus.equals(RequestStatusState.COMPLETED.getKey())) && (!requestStatus.equals(RequestStatusState.FAILED.getKey()))) {
+ requestStatus = trackRequestStatus(client, requestIdAsync);
+ }
+
+
+ testcoll = getCommonCloudSolrClient().getZkStateReader()
+ .getClusterState().getCollection(collectionName);
+ slices = testcoll.getActiveSlices();
+ assertEquals(slices.size(), 2);
+ for (Slice individualShard: slices) {
+ assertEquals(individualShard.getReplicas().size(),1);
+ }
+
+ } catch (SolrException se) {
+ fail("Should have been able to remove the replica successfully");
+ }
+
+ }
+
+
+ }
+
}
+
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 0a0a191..5402785 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -43,6 +43,8 @@ import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.ContentStream;
import org.apache.solr.common.util.NamedList;
+import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
+
/**
* This class is experimental and subject to change.
*
@@ -1531,6 +1533,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
protected Boolean onlyIfDown;
private Boolean deleteDataDir;
private Boolean deleteInstanceDir;
+ private Integer count;
private Boolean deleteIndexDir;
/**
@@ -1579,10 +1582,8 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
return this;
}
- @Override
- @Deprecated
- public DeleteReplica setAsyncId(String id) {
- this.asyncId = id;
+ public DeleteReplica setCount(Integer count) {
+ this.count = count;
return this;
}
@@ -1603,6 +1604,9 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
if (deleteIndexDir != null) {
params.set(CoreAdminParams.DELETE_INDEX, deleteIndexDir);
}
+ if (count != null) {
+ params.set(COUNT_PROP, deleteIndexDir);
+ }
return params;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e203c9af/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
index e817dd9..a8686a1 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
@@ -16,11 +16,14 @@
*/
package org.apache.solr.common.params;
-public abstract class CollectionAdminParams {
+public interface CollectionAdminParams {
/* Param used by DELETESTATUS call to clear all stored responses */
- public static final String FLUSH = "flush";
+ String FLUSH = "flush";
+
+ String COLLECTION = "collection";
+
+ String COUNT_PROP = "count";
- public static final String COLLECTION = "collection";
}
[50/50] [abbrv] lucene-solr:apiv2: SOLR-8029: Merge remote-tracking
branch 'remotes/origin/master' into apiv2
Posted by no...@apache.org.
SOLR-8029: Merge remote-tracking branch 'remotes/origin/master' into apiv2
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b49d9027
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b49d9027
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b49d9027
Branch: refs/heads/apiv2
Commit: b49d9027b346e65853f6d1210f45d5c918760e70
Parents: 9241198 1a61fb6
Author: Noble Paul <no...@apache.org>
Authored: Wed Sep 7 15:26:10 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Wed Sep 7 15:26:10 2016 +0530
----------------------------------------------------------------------
.../lucene/spatial-extras/spatial-extras.iml | 1 +
.../idea/solr/contrib/analytics/analytics.iml | 1 +
dev-tools/idea/solr/core/src/java/solr-core.iml | 1 +
.../idea/solr/core/src/solr-core-tests.iml | 1 +
dev-tools/scripts/addVersion.py | 6 +-
dev-tools/scripts/buildAndPushRelease.py | 2 +-
dev-tools/scripts/poll-mirrors.pl | 155 --
dev-tools/scripts/poll-mirrors.py | 153 ++
dev-tools/scripts/smokeTestRelease.py | 17 +-
lucene/CHANGES.txt | 44 +-
.../analysis/miscellaneous/TestTrimFilter.java | 46 +-
.../apache/lucene/analysis/ja/util/CSVUtil.java | 2 +-
.../apache/lucene/analysis/ja/TestCSVUtil.java | 52 +
.../lucene/codecs/lucene50/Lucene50Codec.java | 170 --
.../lucene50/Lucene50DocValuesConsumer.java | 658 ------
.../lucene50/Lucene50DocValuesFormat.java | 115 -
.../lucene50/Lucene50DocValuesProducer.java | 1299 ------------
.../codecs/lucene50/Lucene50NormsFormat.java | 62 -
.../codecs/lucene50/Lucene50NormsProducer.java | 481 -----
.../lucene50/Lucene50SegmentInfoFormat.java | 21 +-
.../lucene/codecs/lucene53/Lucene53Codec.java | 176 --
.../apache/lucene/codecs/lucene53/package.html | 25 -
.../lucene/codecs/lucene54/Lucene54Codec.java | 178 --
.../apache/lucene/codecs/lucene54/package.html | 25 -
.../apache/lucene/legacy/LegacyDoubleField.java | 174 ++
.../org/apache/lucene/legacy/LegacyField.java | 90 +
.../apache/lucene/legacy/LegacyFieldType.java | 149 ++
.../apache/lucene/legacy/LegacyFloatField.java | 174 ++
.../apache/lucene/legacy/LegacyIntField.java | 175 ++
.../apache/lucene/legacy/LegacyLongField.java | 184 ++
.../lucene/legacy/LegacyNumericRangeQuery.java | 537 +++++
.../lucene/legacy/LegacyNumericTokenStream.java | 357 ++++
.../apache/lucene/legacy/LegacyNumericType.java | 34 +
.../lucene/legacy/LegacyNumericUtils.java | 510 +++++
.../org/apache/lucene/legacy/package-info.java | 21 +
.../services/org.apache.lucene.codecs.Codec | 3 -
.../org.apache.lucene.codecs.DocValuesFormat | 1 -
.../codecs/lucene50/Lucene50NormsConsumer.java | 403 ----
.../lucene/codecs/lucene50/Lucene50RWCodec.java | 41 -
.../codecs/lucene50/Lucene50RWNormsFormat.java | 36 -
.../lucene50/Lucene50RWSegmentInfoFormat.java | 21 +-
.../lucene50/TestLucene50DocValuesFormat.java | 281 ---
.../lucene50/TestLucene50NormsFormat.java | 130 --
.../index/TestBackwardsCompatibility.java | 12 +-
.../org/apache/lucene/index/index.6.2.0-cfs.zip | Bin 0 -> 15880 bytes
.../apache/lucene/index/index.6.2.0-nocfs.zip | Bin 0 -> 15867 bytes
.../apache/lucene/legacy/TestLegacyField.java | 196 ++
.../lucene/legacy/TestLegacyFieldReuse.java | 81 +
.../lucene/legacy/TestLegacyNumericUtils.java | 571 +++++
.../apache/lucene/legacy/TestLegacyTerms.java | 164 ++
.../TestMultiValuedNumericRangeQuery.java | 84 +
.../lucene/legacy/TestNumericRangeQuery32.java | 461 ++++
.../lucene/legacy/TestNumericRangeQuery64.java | 490 +++++
.../lucene/legacy/TestNumericTokenStream.java | 188 ++
.../analysis/LegacyNumericTokenStream.java | 357 ----
.../org/apache/lucene/analysis/TokenStream.java | 7 +-
.../PackedTokenAttributeImpl.java | 11 +
.../PositionIncrementAttributeImpl.java | 5 +
.../lucene/codecs/MutablePointsReader.java | 6 +-
.../lucene50/Lucene50FieldInfosFormat.java | 12 +-
.../java/org/apache/lucene/document/Field.java | 39 +-
.../org/apache/lucene/document/FieldType.java | 98 +-
.../lucene/document/LegacyDoubleField.java | 172 --
.../lucene/document/LegacyFloatField.java | 174 --
.../apache/lucene/document/LegacyIntField.java | 174 --
.../apache/lucene/document/LegacyLongField.java | 182 --
.../java/org/apache/lucene/geo/Rectangle.java | 29 +
.../index/DocumentsWriterDeleteQueue.java | 16 +-
.../org/apache/lucene/index/IndexWriter.java | 18 -
.../org/apache/lucene/index/LogMergePolicy.java | 6 +-
.../apache/lucene/index/PointValuesWriter.java | 14 +-
.../org/apache/lucene/index/SegmentInfos.java | 24 +-
.../org/apache/lucene/search/BooleanQuery.java | 42 +
.../org/apache/lucene/search/LRUQueryCache.java | 1 +
.../lucene/search/LegacyNumericRangeQuery.java | 536 -----
.../org/apache/lucene/search/PrefixQuery.java | 5 +-
.../apache/lucene/store/ByteBufferGuard.java | 136 ++
.../lucene/store/ByteBufferIndexInput.java | 104 +-
.../java/org/apache/lucene/store/DataInput.java | 33 -
.../org/apache/lucene/store/DataOutput.java | 45 -
.../org/apache/lucene/store/MMapDirectory.java | 8 +-
.../org/apache/lucene/util/AttributeImpl.java | 11 +
.../org/apache/lucene/util/AttributeSource.java | 10 +
.../org/apache/lucene/util/ByteBlockPool.java | 21 +
.../apache/lucene/util/InPlaceMergeSorter.java | 4 +-
.../org/apache/lucene/util/IntroSelector.java | 2 +
.../org/apache/lucene/util/IntroSorter.java | 13 +-
.../apache/lucene/util/LegacyNumericUtils.java | 508 -----
.../org/apache/lucene/util/MSBRadixSorter.java | 109 +-
.../org/apache/lucene/util/RadixSelector.java | 94 +-
.../src/java/org/apache/lucene/util/Sorter.java | 59 +-
.../java/org/apache/lucene/util/Version.java | 7 +
.../apache/lucene/util/automaton/Automaton.java | 4 +-
.../org/apache/lucene/util/bkd/BKDWriter.java | 56 +-
.../util/bkd/MutablePointsReaderUtils.java | 19 +-
.../lucene/analysis/TestNumericTokenStream.java | 169 --
.../TestGrowableByteArrayDataOutput.java | 2 +-
.../org/apache/lucene/document/TestField.java | 94 -
.../apache/lucene/document/TestFieldType.java | 9 -
.../org/apache/lucene/geo/TestGeoUtils.java | 11 +-
.../org/apache/lucene/geo/TestPolygon2D.java | 9 +-
.../index/TestAllFilesCheckIndexHeader.java | 7 +-
.../org/apache/lucene/index/TestFieldReuse.java | 53 +-
.../apache/lucene/index/TestIndexSorting.java | 2 +-
.../index/TestIndexingSequenceNumbers.java | 4 +
.../test/org/apache/lucene/index/TestTerms.java | 134 --
.../org/apache/lucene/search/TestBoolean2.java | 13 +-
.../lucene/search/TestBooleanRewrites.java | 87 +
.../TestMultiValuedNumericRangeQuery.java | 80 -
.../lucene/search/TestNumericRangeQuery32.java | 589 ------
.../lucene/search/TestNumericRangeQuery64.java | 623 ------
.../lucene/search/TestSearcherManager.java | 142 ++
.../TestSimpleExplanationsWithFillerDocs.java | 2 +
.../apache/lucene/store/TestMmapDirectory.java | 38 +
.../apache/lucene/util/TestByteBlockPool.java | 25 +-
.../lucene/util/TestLegacyNumericUtils.java | 564 -----
.../apache/lucene/util/TestMSBRadixSorter.java | 68 +
.../apache/lucene/util/TestRadixSelector.java | 31 +-
.../util/bkd/TestMutablePointsReaderUtils.java | 49 +-
.../CustomSeparatorBreakIterator.java | 4 +-
.../postingshighlight/WholeBreakIterator.java | 4 +-
.../search/join/DocValuesTermsCollector.java | 83 -
.../org/apache/lucene/search/join/JoinUtil.java | 45 -
.../search/join/TermsIncludingScoreQuery.java | 9 -
.../apache/lucene/search/join/TestJoinUtil.java | 23 +-
.../memory/TestMemoryIndexAgainstRAMDir.java | 7 -
.../search/TestDiversifiedTopDocsCollector.java | 5 +-
.../lucene/queries/mlt/TestMoreLikeThis.java | 1 +
.../flexible/standard/StandardQueryParser.java | 19 -
.../LegacyNumericRangeQueryNodeBuilder.java | 93 -
.../builders/StandardQueryTreeBuilder.java | 4 -
.../standard/config/LegacyNumericConfig.java | 166 --
.../LegacyNumericFieldConfigListener.java | 75 -
.../config/StandardQueryConfigHandler.java | 29 +-
.../standard/nodes/LegacyNumericQueryNode.java | 153 --
.../nodes/LegacyNumericRangeQueryNode.java | 153 --
.../LegacyNumericQueryNodeProcessor.java | 154 --
.../LegacyNumericRangeQueryNodeProcessor.java | 170 --
.../StandardQueryNodeProcessorPipeline.java | 2 -
.../lucene/queryparser/xml/CoreParser.java | 1 -
.../LegacyNumericRangeQueryBuilder.java | 135 --
.../standard/TestLegacyNumericQueryParser.java | 535 -----
.../xml/CoreParserTestIndexData.java | 2 -
.../queryparser/xml/LegacyNumericRangeQuery.xml | 31 -
.../LegacyNumericRangeQueryWithoutLowerTerm.xml | 31 -
.../xml/LegacyNumericRangeQueryWithoutRange.xml | 31 -
.../LegacyNumericRangeQueryWithoutUpperTerm.xml | 31 -
.../lucene/queryparser/xml/TestCoreParser.java | 20 -
.../builders/TestNumericRangeQueryBuilder.java | 179 --
.../lucene/replicator/nrt/FileMetaData.java | 5 +
.../lucene/replicator/nrt/SimpleCopyJob.java | 2 +
.../apache/lucene/document/FloatRangeField.java | 262 +++
.../lucene/document/InetAddressPoint.java | 2 +-
.../apache/lucene/document/IntRangeField.java | 262 +++
.../apache/lucene/document/LongRangeField.java | 260 +++
.../search/BaseRangeFieldQueryTestCase.java | 238 +--
.../search/TestDoubleRangeFieldQueries.java | 154 +-
.../search/TestFloatRangeFieldQueries.java | 240 +++
.../lucene/search/TestIntRangeFieldQueries.java | 240 +++
.../search/TestLongRangeFieldQueries.java | 240 +++
lucene/spatial-extras/build.xml | 8 +-
.../lucene/spatial/bbox/BBoxStrategy.java | 39 +-
.../prefix/BytesRefIteratorTokenStream.java | 2 +-
.../spatial/vector/PointVectorStrategy.java | 37 +-
.../lucene/spatial/bbox/TestBBoxStrategy.java | 8 +-
.../apache/lucene/spatial3d/geom/Bounds.java | 7 +
.../spatial3d/geom/GeoConcavePolygon.java | 63 +-
.../lucene/spatial3d/geom/GeoConvexPolygon.java | 59 +-
.../lucene/spatial3d/geom/LatLonBounds.java | 5 +
.../org/apache/lucene/spatial3d/geom/Plane.java | 101 +-
.../apache/lucene/spatial3d/geom/XYZBounds.java | 11 +
.../lucene/spatial3d/geom/GeoBBoxTest.java | 15 +
.../lucene/spatial3d/geom/GeoCircleTest.java | 14 +
.../lucene/spatial3d/geom/GeoPolygonTest.java | 66 +-
.../lucene/store/BaseDirectoryTestCase.java | 30 -
.../lucene/store/MockDirectoryWrapper.java | 4 +-
.../lucene/store/MockIndexInputWrapper.java | 47 +-
.../store/SlowClosingMockIndexInputWrapper.java | 2 +-
.../store/SlowOpeningMockIndexInputWrapper.java | 2 +-
.../java/org/apache/lucene/util/TestUtil.java | 13 +-
.../lucene/store/TestMockDirectoryWrapper.java | 36 +
lucene/tools/junit4/solr-tests.policy | 1 +
lucene/tools/junit4/tests.policy | 5 +-
solr/CHANGES.txt | 159 ++
.../accumulator/FacetingAccumulator.java | 2 +-
.../solr/analytics/util/AnalyticsParsers.java | 2 +-
.../util/valuesource/DateFieldSource.java | 2 +-
.../solr/handler/dataimport/JdbcDataSource.java | 6 +-
.../dataimport/SimplePropertiesWriter.java | 33 +-
.../handler/dataimport/TestJdbcDataSource.java | 39 +
.../handler/extraction/XLSXResponseWriter.java | 414 ++++
.../extraction/solr/collection1/conf/schema.xml | 2 +
.../extraction/TestXLSXResponseWriter.java | 257 +++
.../org/apache/solr/cloud/AddReplicaCmd.java | 192 ++
.../java/org/apache/solr/cloud/BackupCmd.java | 132 ++
.../org/apache/solr/cloud/CreateAliasCmd.java | 101 +
.../apache/solr/cloud/CreateCollectionCmd.java | 291 +++
.../org/apache/solr/cloud/CreateShardCmd.java | 120 ++
.../org/apache/solr/cloud/DeleteAliasCmd.java | 95 +
.../apache/solr/cloud/DeleteCollectionCmd.java | 121 ++
.../org/apache/solr/cloud/DeleteNodeCmd.java | 98 +
.../org/apache/solr/cloud/DeleteReplicaCmd.java | 281 +++
.../org/apache/solr/cloud/DeleteShardCmd.java | 180 ++
.../java/org/apache/solr/cloud/MigrateCmd.java | 333 +++
.../OverseerCollectionConfigSetProcessor.java | 22 +-
.../cloud/OverseerCollectionMessageHandler.java | 1979 ++----------------
.../org/apache/solr/cloud/OverseerRoleCmd.java | 102 +
.../apache/solr/cloud/OverseerStatusCmd.java | 122 ++
.../solr/cloud/OverseerTaskProcessor.java | 6 +-
.../org/apache/solr/cloud/ReplaceNodeCmd.java | 166 ++
.../java/org/apache/solr/cloud/RestoreCmd.java | 243 +++
.../org/apache/solr/cloud/SplitShardCmd.java | 472 +++++
.../apache/solr/cloud/rule/ImplicitSnitch.java | 10 +-
.../org/apache/solr/core/CoreContainer.java | 3 +-
.../java/org/apache/solr/core/CoreSorter.java | 12 +-
.../org/apache/solr/core/RequestParams.java | 7 +-
.../java/org/apache/solr/core/SolrConfig.java | 29 +-
.../src/java/org/apache/solr/core/SolrCore.java | 28 +-
.../apache/solr/core/backup/BackupManager.java | 34 +-
.../backup/repository/BackupRepository.java | 14 +-
.../backup/repository/HdfsBackupRepository.java | 29 +-
.../repository/LocalFileSystemRepository.java | 27 +-
.../org/apache/solr/handler/BlobHandler.java | 2 +-
.../org/apache/solr/handler/CdcrParams.java | 10 +-
.../org/apache/solr/handler/CdcrReplicator.java | 8 +-
.../solr/handler/CdcrReplicatorManager.java | 242 ++-
.../solr/handler/CdcrReplicatorScheduler.java | 6 +-
.../solr/handler/CdcrReplicatorState.java | 23 +
.../apache/solr/handler/CdcrRequestHandler.java | 233 ++-
.../org/apache/solr/handler/IndexFetcher.java | 10 +-
.../solr/handler/MoreLikeThisHandler.java | 2 +-
.../apache/solr/handler/ReplicationHandler.java | 29 +-
.../org/apache/solr/handler/RestoreCore.java | 6 +-
.../org/apache/solr/handler/SchemaHandler.java | 2 +-
.../org/apache/solr/handler/SnapShooter.java | 11 +-
.../apache/solr/handler/SolrConfigHandler.java | 25 +-
.../org/apache/solr/handler/StreamHandler.java | 2 +
.../solr/handler/admin/CollectionsHandler.java | 27 +-
.../solr/handler/admin/CoreAdminOperation.java | 7 +-
.../solr/handler/component/ExpandComponent.java | 4 +-
.../solr/handler/component/FacetComponent.java | 31 +-
.../solr/handler/component/QueryComponent.java | 7 +-
.../handler/component/RealTimeGetComponent.java | 67 +-
.../handler/component/SpellCheckComponent.java | 3 +-
.../solr/handler/component/StatsField.java | 17 +-
.../org/apache/solr/request/IntervalFacets.java | 2 +-
.../org/apache/solr/request/NumericFacets.java | 3 +-
.../org/apache/solr/request/SimpleFacets.java | 73 +-
.../solr/request/macro/MacroExpander.java | 12 +
.../transform/ChildDocTransformerFactory.java | 4 +-
.../transform/SubQueryAugmenterFactory.java | 17 +
.../solr/rest/ManagedResourceStorage.java | 9 +-
.../java/org/apache/solr/schema/BBoxField.java | 7 +-
.../java/org/apache/solr/schema/EnumField.java | 17 +-
.../java/org/apache/solr/schema/FieldType.java | 3 +-
.../org/apache/solr/schema/IndexSchema.java | 6 +-
.../schema/SpatialPointVectorFieldType.java | 9 +-
.../org/apache/solr/schema/TrieDoubleField.java | 2 +-
.../java/org/apache/solr/schema/TrieField.java | 44 +-
.../org/apache/solr/schema/TrieFloatField.java | 2 +-
.../org/apache/solr/schema/TrieIntField.java | 2 +-
.../org/apache/solr/schema/TrieLongField.java | 2 +-
.../org/apache/solr/search/CacheConfig.java | 24 +-
.../solr/search/CollapsingQParserPlugin.java | 10 -
.../java/org/apache/solr/search/DocSetUtil.java | 33 +
.../java/org/apache/solr/search/Grouping.java | 2 +-
.../solr/search/IGainTermsQParserPlugin.java | 240 +++
.../apache/solr/search/JoinQParserPlugin.java | 2 +-
.../search/LegacyNumericRangeQueryBuilder.java | 136 ++
.../java/org/apache/solr/search/QParser.java | 11 +
.../org/apache/solr/search/QParserPlugin.java | 12 +-
.../org/apache/solr/search/QueryParsing.java | 2 +-
.../apache/solr/search/QueryWrapperFilter.java | 2 +-
.../apache/solr/search/ReRankQParserPlugin.java | 119 +-
.../org/apache/solr/search/ReRankWeight.java | 48 +
.../org/apache/solr/search/ReturnFields.java | 7 +
.../org/apache/solr/search/SolrCoreParser.java | 2 +-
.../apache/solr/search/SolrFieldCacheMBean.java | 6 +-
.../apache/solr/search/SolrIndexSearcher.java | 92 +-
.../apache/solr/search/SolrReturnFields.java | 11 +
.../TextLogisticRegressionQParserPlugin.java | 283 +++
.../apache/solr/search/facet/FacetField.java | 1062 +---------
.../solr/search/facet/FacetFieldMerger.java | 211 ++
.../solr/search/facet/FacetFieldProcessor.java | 501 +++++
.../facet/FacetFieldProcessorByArray.java | 95 +
.../facet/FacetFieldProcessorByArrayDV.java | 294 +++
.../facet/FacetFieldProcessorByArrayUIF.java | 71 +
.../FacetFieldProcessorByEnumTermsStream.java | 356 ++++
.../facet/FacetFieldProcessorByHashDV.java | 442 ++++
.../search/facet/FacetFieldProcessorDV.java | 291 ---
.../facet/FacetFieldProcessorNumeric.java | 443 ----
.../apache/solr/search/facet/FacetMerger.java | 126 +-
.../apache/solr/search/facet/FacetModule.java | 550 ++---
.../solr/search/facet/FacetProcessor.java | 203 +-
.../apache/solr/search/facet/FacetQuery.java | 5 -
.../apache/solr/search/facet/FacetRange.java | 13 +-
.../solr/search/facet/FacetRangeMerger.java | 123 ++
.../apache/solr/search/facet/FacetRequest.java | 71 +-
.../search/facet/FacetRequestSortedMerger.java | 234 +++
.../org/apache/solr/search/facet/HLLAgg.java | 2 +-
.../apache/solr/search/facet/PercentileAgg.java | 2 +-
.../org/apache/solr/search/facet/SlotAcc.java | 15 +-
.../solr/search/facet/UnInvertedField.java | 6 +-
.../org/apache/solr/search/facet/UniqueAgg.java | 2 +-
.../distributed/command/QueryCommand.java | 2 +-
.../join/BlockJoinDocSetFacetComponent.java | 34 +-
.../search/join/BlockJoinFacetAccsHolder.java | 97 +
.../search/join/BlockJoinFacetCollector.java | 131 --
.../search/join/BlockJoinFacetComponent.java | 165 +-
.../join/BlockJoinFacetComponentSupport.java | 156 ++
.../search/join/ScoreJoinQParserPlugin.java | 2 +-
.../apache/solr/search/mlt/CloudMLTQParser.java | 2 +-
.../solr/search/mlt/SimpleMLTQParser.java | 2 +-
.../apache/solr/security/BasicAuthPlugin.java | 1 +
.../security/DelegationTokenKerberosFilter.java | 46 +-
.../apache/solr/security/KerberosPlugin.java | 205 +-
.../apache/solr/servlet/SolrDispatchFilter.java | 6 +
.../solr/spelling/suggest/SolrSuggester.java | 20 +-
.../org/apache/solr/uninverting/FieldCache.java | 20 +-
.../solr/uninverting/UninvertingReader.java | 20 +-
.../org/apache/solr/update/CdcrUpdateLog.java | 7 +-
.../solr/update/DefaultSolrCoreState.java | 6 +-
.../solr/update/DirectUpdateHandler2.java | 2 +-
.../apache/solr/update/HdfsTransactionLog.java | 141 +-
.../org/apache/solr/update/HdfsUpdateLog.java | 9 +-
.../java/org/apache/solr/update/PeerSync.java | 27 +-
.../org/apache/solr/update/SolrCoreState.java | 2 +
.../org/apache/solr/update/TransactionLog.java | 7 +-
.../java/org/apache/solr/update/UpdateLog.java | 9 +-
.../org/apache/solr/update/VersionInfo.java | 2 +-
.../update/processor/CdcrUpdateProcessor.java | 10 +-
.../processor/DistributedUpdateProcessor.java | 34 +-
.../org/apache/solr/util/SolrPluginUtils.java | 4 +-
.../org/apache/solr/util/TestInjection.java | 115 +-
.../collection1/conf/schema-psuedo-fields.xml | 3 +
.../solr/collection1/conf/solrconfig-tlog.xml | 2 +-
.../configsets/cdcr-source-disabled/schema.xml | 29 +
.../cdcr-source-disabled/solrconfig.xml | 60 +
.../solr/configsets/cdcr-source/schema.xml | 29 +
.../solr/configsets/cdcr-source/solrconfig.xml | 76 +
.../solr/configsets/cdcr-target/schema.xml | 29 +
.../solr/configsets/cdcr-target/solrconfig.xml | 63 +
.../org/apache/solr/TestRandomFaceting.java | 261 ++-
.../TestReversedWildcardFilterFactory.java | 2 +-
.../AbstractCloudBackupRestoreTestCase.java | 12 +-
.../solr/cloud/BaseCdcrDistributedZkTest.java | 25 +
.../apache/solr/cloud/CdcrBootstrapTest.java | 396 ++++
.../cloud/CdcrReplicationDistributedZkTest.java | 31 +
.../org/apache/solr/cloud/DeleteNodeTest.java | 75 +
.../apache/solr/cloud/DeleteReplicaTest.java | 139 +-
.../apache/solr/cloud/KerberosTestServices.java | 7 +-
.../solr/cloud/PeerSyncReplicationTest.java | 360 ++++
.../org/apache/solr/cloud/ReplaceNodeTest.java | 104 +
.../org/apache/solr/cloud/ShardSplitTest.java | 54 +
.../solr/cloud/TestCloudPseudoReturnFields.java | 91 +-
.../cloud/TestLocalFSCloudBackupRestore.java | 12 +-
.../apache/solr/cloud/TestRandomFlRTGCloud.java | 447 +++-
.../TestSolrCloudWithDelegationTokens.java | 9 +-
.../TestSolrCloudWithSecureImpersonation.java | 364 ++++
.../TestStressCloudBlindAtomicUpdates.java | 25 +-
.../org/apache/solr/cloud/rule/RulesTest.java | 7 +-
.../apache/solr/core/TestSolrConfigHandler.java | 68 +-
.../apache/solr/handler/TestReqParamsAPI.java | 73 +-
.../DistributedFacetExistsSmallTest.java | 236 +++
...DistributedQueryComponentCustomSortTest.java | 15 +-
.../apache/solr/request/SimpleFacetsTest.java | 286 ++-
.../solr/request/macro/TestMacroExpander.java | 116 +
.../apache/solr/search/QueryEqualityTest.java | 18 +
.../TestLegacyNumericRangeQueryBuilder.java | 179 ++
.../solr/search/TestMaxScoreQueryParser.java | 1 +
...OverriddenPrefixQueryForCustomFieldType.java | 2 +-
.../solr/search/TestPseudoReturnFields.java | 25 +-
.../apache/solr/search/TestRankQueryPlugin.java | 4 -
.../org/apache/solr/search/TestSearchPerf.java | 8 +-
.../apache/solr/search/TestSolrQueryParser.java | 4 +-
.../solr/search/TestStandardQParsers.java | 9 +
.../solr/search/facet/TestJsonFacets.java | 57 +-
.../solr/search/function/TestOrdValues.java | 4 +-
.../search/join/BlockJoinFacetSimpleTest.java | 24 +
.../search/join/TestScoreJoinQPNoScore.java | 4 +-
.../solr/search/join/TestScoreJoinQPScore.java | 2 +-
.../solr/security/BasicAuthIntegrationTest.java | 28 +-
...ramDelegationTokenAuthenticationHandler.java | 109 -
.../HttpParamDelegationTokenPlugin.java | 272 +++
.../solr/uninverting/TestDocTermOrds.java | 6 +-
.../TestFieldCacheSanityChecker.java | 8 +-
.../solr/uninverting/TestFieldCacheSort.java | 8 +-
.../solr/uninverting/TestLegacyFieldCache.java | 10 +-
.../solr/uninverting/TestNumericTerms32.java | 14 +-
.../solr/uninverting/TestNumericTerms64.java | 16 +-
.../solr/uninverting/TestUninvertingReader.java | 10 +-
.../org/apache/solr/util/TestTestInjection.java | 4 +
.../basic_configs/conf/managed-schema | 3 +-
.../conf/managed-schema | 3 +-
.../conf/managed-schema | 3 +-
solr/site/SYSTEM_REQUIREMENTS.mdtext | 2 +-
.../solr/client/solrj/impl/CloudSolrClient.java | 1 +
.../client/solrj/impl/LBHttpSolrClient.java | 22 +-
.../solrj/io/ClassificationEvaluation.java | 85 +
.../io/stream/FeaturesSelectionStream.java | 436 ++++
.../client/solrj/io/stream/TextLogitStream.java | 656 ++++++
.../solrj/io/stream/expr/Explanation.java | 1 +
.../solrj/request/CollectionAdminRequest.java | 64 +-
.../apache/solr/common/cloud/ZkNodeProps.java | 12 +
.../apache/solr/common/cloud/ZkStateReader.java | 28 +-
.../common/params/CollectionAdminParams.java | 9 +-
.../solr/common/params/CollectionParams.java | 3 +
.../apache/solr/common/params/CommonParams.java | 10 +
.../apache/solr/common/params/FacetParams.java | 8 +
.../solr/common/util/JsonRecordReader.java | 27 +-
.../solrj/solr/configsets/ml/conf/schema.xml | 77 +
.../solr/configsets/ml/conf/solrconfig.xml | 51 +
.../client/solrj/impl/CloudSolrClientTest.java | 68 +-
.../solrj/io/stream/StreamExpressionTest.java | 180 +-
.../stream/StreamExpressionToExpessionTest.java | 37 +-
.../StreamExpressionToExplanationTest.java | 1 -
.../cloud/TestCollectionStateWatchers.java | 51 +-
.../solr/common/util/TestJavaBinCodec.java | 104 +-
.../src/java/org/apache/solr/JSONTestUtil.java | 13 +
solr/webapp/web/css/angular/index.css | 12 +-
.../web/js/angular/controllers/collections.js | 7 +-
solr/webapp/web/js/angular/controllers/cores.js | 10 +-
.../web/js/angular/controllers/dataimport.js | 58 +-
solr/webapp/web/js/angular/controllers/files.js | 6 +-
.../webapp/web/js/angular/controllers/schema.js | 4 +
solr/webapp/web/js/angular/services.js | 6 +-
solr/webapp/web/partials/dataimport.html | 8 +-
427 files changed, 23472 insertions(+), 17433 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/handler/BlobHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/test/org/apache/solr/core/TestSolrConfigHandler.java
----------------------------------------------------------------------
diff --cc solr/core/src/test/org/apache/solr/core/TestSolrConfigHandler.java
index db7508a,c182495..b27a5a9
--- a/solr/core/src/test/org/apache/solr/core/TestSolrConfigHandler.java
+++ b/solr/core/src/test/org/apache/solr/core/TestSolrConfigHandler.java
@@@ -33,12 -34,14 +34,16 @@@ import com.google.common.collect.Immuta
import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.common.util.PredicateWithErrMsg;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.common.util.Utils;
+ import org.apache.solr.handler.DumpRequestHandler;
import org.apache.solr.handler.TestBlobHandler;
import org.apache.solr.handler.TestSolrConfigHandlerConcurrent;
+import org.apache.solr.util.RESTfulServerProvider;
+ import org.apache.solr.request.SolrQueryRequest;
+ import org.apache.solr.response.SolrQueryResponse;
+ import org.apache.solr.search.SolrCache;
import org.apache.solr.util.RestTestBase;
import org.apache.solr.util.RestTestHarness;
import org.eclipse.jetty.servlet.ServletHolder;
@@@ -454,10 -451,59 +457,59 @@@ public class TestSolrConfigHandler exte
map = getRespMap("/dump100?wt=json&json.nl=arrmap&initArgs=true", writeHarness);
List initArgs = (List) map.get("initArgs");
- assertEquals(2, initArgs.size());
+ assertTrue(initArgs.size() >= 2);
assertTrue(((Map)initArgs.get(0)).containsKey("suggester"));
assertTrue(((Map)initArgs.get(1)).containsKey("suggester"));
- System.out.println(map);
+
+ payload = "{\n" +
+ "'add-requesthandler' : { 'name' : '/dump101', 'class': " +
+ "'" + CacheTest.class.getName() + "' " +
+ ", 'startup' : 'lazy'}\n" +
+ "}";
+ runConfigCommand(writeHarness, "/config?wt=json", payload);
+
+ testForResponseElement(writeHarness,
+ testServerBaseUrl,
+ "/config/overlay?wt=json",
+ cloudSolrClient,
+ Arrays.asList("overlay", "requestHandler", "/dump101", "startup"),
+ "lazy",
+ 10);
+
+ payload = "{\n" +
+ "'add-cache' : {name:'lfuCacheDecayFalse', class:'solr.search.LFUCache', size:10 ,initialSize:9 , timeDecay:false }," +
+ "'add-cache' : {name: 'perSegFilter', class: 'solr.search.LRUCache', size:10, initialSize:0 , autowarmCount:10}}";
+ runConfigCommand(writeHarness, "/config?wt=json", payload);
+
+ map = testForResponseElement(writeHarness,
+ testServerBaseUrl,
+ "/config/overlay?wt=json",
+ cloudSolrClient,
+ Arrays.asList("overlay", "cache", "lfuCacheDecayFalse", "class"),
+ "solr.search.LFUCache",
+ 10);
+ assertEquals("solr.search.LRUCache",getObjectByPath(map, true, ImmutableList.of("overlay", "cache", "perSegFilter", "class")));
+
+ map = getRespMap("/dump101?cacheNames=lfuCacheDecayFalse&cacheNames=perSegFilter&wt=json", writeHarness);
+ assertEquals("Actual output "+ Utils.toJSONString(map), "org.apache.solr.search.LRUCache",getObjectByPath(map, true, ImmutableList.of( "caches", "perSegFilter")));
+ assertEquals("Actual output "+ Utils.toJSONString(map), "org.apache.solr.search.LFUCache",getObjectByPath(map, true, ImmutableList.of( "caches", "lfuCacheDecayFalse")));
+
+ }
+
+ public static class CacheTest extends DumpRequestHandler {
+ @Override
+ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
+ super.handleRequestBody(req, rsp);
+ String[] caches = req.getParams().getParams("cacheNames");
+ if(caches != null && caches.length>0){
+ HashMap m = new HashMap();
+ rsp.add("caches", m);
+ for (String c : caches) {
+ SolrCache cache = req.getSearcher().getCache(c);
+ if(cache != null) m.put(c, cache.getClass().getName());
+ }
+ }
+ }
}
public static Map testForResponseElement(RestTestHarness harness,
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
----------------------------------------------------------------------
diff --cc solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
index 580cf66,958cf14..43524b4
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
@@@ -84,13 -83,9 +84,14 @@@ import org.apache.zookeeper.KeeperExcep
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
-
+ import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
+import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
+import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
+import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
+import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
+
/**
* SolrJ client class to communicate with SolrCloud.
* Instances of this class communicate with Zookeeper to discover
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b49d9027/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
----------------------------------------------------------------------
[08/50] [abbrv] lucene-solr:apiv2: remove unnecessary deprecated
classes
Posted by no...@apache.org.
remove unnecessary deprecated classes
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ada71497
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ada71497
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ada71497
Branch: refs/heads/apiv2
Commit: ada714972cefae02ede192ec8b0819e2d42febde
Parents: 13acba8
Author: Robert Muir <rm...@apache.org>
Authored: Thu Aug 25 12:20:29 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Thu Aug 25 12:20:29 2016 -0400
----------------------------------------------------------------------
.../lucene/codecs/lucene50/Lucene50Codec.java | 170 ---
.../lucene50/Lucene50DocValuesConsumer.java | 658 ---------
.../lucene50/Lucene50DocValuesFormat.java | 115 --
.../lucene50/Lucene50DocValuesProducer.java | 1299 ------------------
.../codecs/lucene50/Lucene50NormsFormat.java | 62 -
.../codecs/lucene50/Lucene50NormsProducer.java | 481 -------
.../lucene/codecs/lucene53/Lucene53Codec.java | 176 ---
.../apache/lucene/codecs/lucene53/package.html | 25 -
.../lucene/codecs/lucene54/Lucene54Codec.java | 178 ---
.../apache/lucene/codecs/lucene54/package.html | 25 -
.../services/org.apache.lucene.codecs.Codec | 3 -
.../org.apache.lucene.codecs.DocValuesFormat | 1 -
.../codecs/lucene50/Lucene50NormsConsumer.java | 403 ------
.../lucene/codecs/lucene50/Lucene50RWCodec.java | 41 -
.../codecs/lucene50/Lucene50RWNormsFormat.java | 36 -
.../lucene50/TestLucene50DocValuesFormat.java | 281 ----
.../lucene50/TestLucene50NormsFormat.java | 130 --
17 files changed, 4084 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
deleted file mode 100644
index 19d6e3b..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.util.Objects;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.CompoundFormat;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.FieldInfosFormat;
-import org.apache.lucene.codecs.FilterCodec;
-import org.apache.lucene.codecs.LiveDocsFormat;
-import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.PointsFormat;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.SegmentInfoFormat;
-import org.apache.lucene.codecs.StoredFieldsFormat;
-import org.apache.lucene.codecs.TermVectorsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
-import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
-import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
-
-/**
- * Implements the Lucene 5.0 index format, with configurable per-field postings
- * and docvalues formats.
- * <p>
- * If you want to reuse functionality of this codec in another codec, extend
- * {@link FilterCodec}.
- *
- * @see org.apache.lucene.codecs.lucene50 package documentation for file format details.
- * @deprecated Only for reading old 5.0-5.2 segments
- */
-@Deprecated
-public class Lucene50Codec extends Codec {
- private final TermVectorsFormat vectorsFormat = new Lucene50TermVectorsFormat();
- private final FieldInfosFormat fieldInfosFormat = new Lucene50FieldInfosFormat();
- private final SegmentInfoFormat segmentInfosFormat = new Lucene50SegmentInfoFormat();
- private final LiveDocsFormat liveDocsFormat = new Lucene50LiveDocsFormat();
- private final CompoundFormat compoundFormat = new Lucene50CompoundFormat();
-
- private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() {
- @Override
- public PostingsFormat getPostingsFormatForField(String field) {
- return Lucene50Codec.this.getPostingsFormatForField(field);
- }
- };
-
- private final DocValuesFormat docValuesFormat = new PerFieldDocValuesFormat() {
- @Override
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return Lucene50Codec.this.getDocValuesFormatForField(field);
- }
- };
-
- private final StoredFieldsFormat storedFieldsFormat;
-
- /**
- * Instantiates a new codec.
- */
- public Lucene50Codec() {
- this(Mode.BEST_SPEED);
- }
-
- /**
- * Instantiates a new codec, specifying the stored fields compression
- * mode to use.
- * @param mode stored fields compression mode to use for newly
- * flushed/merged segments.
- */
- public Lucene50Codec(Mode mode) {
- super("Lucene50");
- this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Objects.requireNonNull(mode));
- }
-
- @Override
- public final StoredFieldsFormat storedFieldsFormat() {
- return storedFieldsFormat;
- }
-
- @Override
- public final TermVectorsFormat termVectorsFormat() {
- return vectorsFormat;
- }
-
- @Override
- public final PostingsFormat postingsFormat() {
- return postingsFormat;
- }
-
- @Override
- public final FieldInfosFormat fieldInfosFormat() {
- return fieldInfosFormat;
- }
-
- @Override
- public SegmentInfoFormat segmentInfoFormat() {
- return segmentInfosFormat;
- }
-
- @Override
- public final LiveDocsFormat liveDocsFormat() {
- return liveDocsFormat;
- }
-
- @Override
- public final CompoundFormat compoundFormat() {
- return compoundFormat;
- }
-
- /** Returns the postings format that should be used for writing
- * new segments of <code>field</code>.
- *
- * The default implementation always returns "Lucene50".
- * <p>
- * <b>WARNING:</b> if you subclass, you are responsible for index
- * backwards compatibility: future version of Lucene are only
- * guaranteed to be able to read the default implementation.
- */
- public PostingsFormat getPostingsFormatForField(String field) {
- return defaultFormat;
- }
-
- /** Returns the docvalues format that should be used for writing
- * new segments of <code>field</code>.
- *
- * The default implementation always returns "Lucene50".
- * <p>
- * <b>WARNING:</b> if you subclass, you are responsible for index
- * backwards compatibility: future version of Lucene are only
- * guaranteed to be able to read the default implementation.
- */
- public DocValuesFormat getDocValuesFormatForField(String field) {
- return defaultDVFormat;
- }
-
- @Override
- public final DocValuesFormat docValuesFormat() {
- return docValuesFormat;
- }
-
- @Override
- public final PointsFormat pointsFormat() {
- return PointsFormat.EMPTY;
- }
-
- private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50");
- private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene50");
-
- private final NormsFormat normsFormat = new Lucene50NormsFormat();
-
- @Override
- public NormsFormat normsFormat() {
- return normsFormat;
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesConsumer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesConsumer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesConsumer.java
deleted file mode 100644
index 59ce73a..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesConsumer.java
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.Closeable; // javadocs
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.DocValuesConsumer;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMOutputStream;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LongsRef;
-import org.apache.lucene.util.MathUtil;
-import org.apache.lucene.util.PagedBytes;
-import org.apache.lucene.util.PagedBytes.PagedBytesDataInput;
-import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util.packed.DirectWriter;
-import org.apache.lucene.util.packed.MonotonicBlockPackedWriter;
-import org.apache.lucene.util.packed.PackedInts;
-
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesFormat.*;
-
-/** writer for {@link Lucene50DocValuesFormat} */
-class Lucene50DocValuesConsumer extends DocValuesConsumer implements Closeable {
-
- IndexOutput data, meta;
- final int maxDoc;
-
- /** expert: Creates a new writer */
- public Lucene50DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
- boolean success = false;
- try {
- String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
- data = state.directory.createOutput(dataName, state.context);
- CodecUtil.writeIndexHeader(data, dataCodec, Lucene50DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
- String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
- meta = state.directory.createOutput(metaName, state.context);
- CodecUtil.writeIndexHeader(meta, metaCodec, Lucene50DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
- maxDoc = state.segmentInfo.maxDoc();
- success = true;
- } finally {
- if (!success) {
- IOUtils.closeWhileHandlingException(this);
- }
- }
- }
-
- @Override
- public void addNumericField(FieldInfo field, Iterable<Number> values) throws IOException {
- addNumericField(field, values, true);
- }
-
- void addNumericField(FieldInfo field, Iterable<Number> values, boolean optimizeStorage) throws IOException {
- long count = 0;
- long minValue = Long.MAX_VALUE;
- long maxValue = Long.MIN_VALUE;
- long gcd = 0;
- long missingCount = 0;
- long zeroCount = 0;
- // TODO: more efficient?
- HashSet<Long> uniqueValues = null;
- if (optimizeStorage) {
- uniqueValues = new HashSet<>();
-
- for (Number nv : values) {
- final long v;
- if (nv == null) {
- v = 0;
- missingCount++;
- zeroCount++;
- } else {
- v = nv.longValue();
- if (v == 0) {
- zeroCount++;
- }
- }
-
- if (gcd != 1) {
- if (v < Long.MIN_VALUE / 2 || v > Long.MAX_VALUE / 2) {
- // in that case v - minValue might overflow and make the GCD computation return
- // wrong results. Since these extreme values are unlikely, we just discard
- // GCD computation for them
- gcd = 1;
- } else if (count != 0) { // minValue needs to be set first
- gcd = MathUtil.gcd(gcd, v - minValue);
- }
- }
-
- minValue = Math.min(minValue, v);
- maxValue = Math.max(maxValue, v);
-
- if (uniqueValues != null) {
- if (uniqueValues.add(v)) {
- if (uniqueValues.size() > 256) {
- uniqueValues = null;
- }
- }
- }
-
- ++count;
- }
- } else {
- for (Number nv : values) {
- long v = nv.longValue();
- minValue = Math.min(minValue, v);
- maxValue = Math.max(maxValue, v);
- ++count;
- }
- }
-
- final long delta = maxValue - minValue;
- final int deltaBitsRequired = DirectWriter.unsignedBitsRequired(delta);
- final int tableBitsRequired = uniqueValues == null
- ? Integer.MAX_VALUE
- : DirectWriter.bitsRequired(uniqueValues.size() - 1);
-
- final int format;
- if (uniqueValues != null
- && count <= Integer.MAX_VALUE
- && (uniqueValues.size() == 1
- || (uniqueValues.size() == 2 && missingCount > 0 && zeroCount == missingCount))) {
- // either one unique value C or two unique values: "missing" and C
- format = CONST_COMPRESSED;
- } else if (uniqueValues != null && tableBitsRequired < deltaBitsRequired) {
- format = TABLE_COMPRESSED;
- } else if (gcd != 0 && gcd != 1) {
- final long gcdDelta = (maxValue - minValue) / gcd;
- final long gcdBitsRequired = DirectWriter.unsignedBitsRequired(gcdDelta);
- format = gcdBitsRequired < deltaBitsRequired ? GCD_COMPRESSED : DELTA_COMPRESSED;
- } else {
- format = DELTA_COMPRESSED;
- }
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.NUMERIC);
- meta.writeVInt(format);
- if (missingCount == 0) {
- meta.writeLong(ALL_LIVE);
- } else if (missingCount == count) {
- meta.writeLong(ALL_MISSING);
- } else {
- meta.writeLong(data.getFilePointer());
- writeMissingBitset(values);
- }
- meta.writeLong(data.getFilePointer());
- meta.writeVLong(count);
-
- switch (format) {
- case CONST_COMPRESSED:
- // write the constant (nonzero value in the n=2 case, singleton value otherwise)
- meta.writeLong(minValue < 0 ? Collections.min(uniqueValues) : Collections.max(uniqueValues));
- break;
- case GCD_COMPRESSED:
- meta.writeLong(minValue);
- meta.writeLong(gcd);
- final long maxDelta = (maxValue - minValue) / gcd;
- final int bits = DirectWriter.unsignedBitsRequired(maxDelta);
- meta.writeVInt(bits);
- final DirectWriter quotientWriter = DirectWriter.getInstance(data, count, bits);
- for (Number nv : values) {
- long value = nv == null ? 0 : nv.longValue();
- quotientWriter.add((value - minValue) / gcd);
- }
- quotientWriter.finish();
- break;
- case DELTA_COMPRESSED:
- final long minDelta = delta < 0 ? 0 : minValue;
- meta.writeLong(minDelta);
- meta.writeVInt(deltaBitsRequired);
- final DirectWriter writer = DirectWriter.getInstance(data, count, deltaBitsRequired);
- for (Number nv : values) {
- long v = nv == null ? 0 : nv.longValue();
- writer.add(v - minDelta);
- }
- writer.finish();
- break;
- case TABLE_COMPRESSED:
- final Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]);
- Arrays.sort(decode);
- final HashMap<Long,Integer> encode = new HashMap<>();
- meta.writeVInt(decode.length);
- for (int i = 0; i < decode.length; i++) {
- meta.writeLong(decode[i]);
- encode.put(decode[i], i);
- }
- meta.writeVInt(tableBitsRequired);
- final DirectWriter ordsWriter = DirectWriter.getInstance(data, count, tableBitsRequired);
- for (Number nv : values) {
- ordsWriter.add(encode.get(nv == null ? 0 : nv.longValue()));
- }
- ordsWriter.finish();
- break;
- default:
- throw new AssertionError();
- }
- meta.writeLong(data.getFilePointer());
- }
-
- // TODO: in some cases representing missing with minValue-1 wouldn't take up additional space and so on,
- // but this is very simple, and algorithms only check this for values of 0 anyway (doesnt slow down normal decode)
- void writeMissingBitset(Iterable<?> values) throws IOException {
- byte bits = 0;
- int count = 0;
- for (Object v : values) {
- if (count == 8) {
- data.writeByte(bits);
- count = 0;
- bits = 0;
- }
- if (v != null) {
- bits |= 1 << (count & 7);
- }
- count++;
- }
- if (count > 0) {
- data.writeByte(bits);
- }
- }
-
- @Override
- public void addBinaryField(FieldInfo field, Iterable<BytesRef> values) throws IOException {
- // write the byte[] data
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.BINARY);
- int minLength = Integer.MAX_VALUE;
- int maxLength = Integer.MIN_VALUE;
- final long startFP = data.getFilePointer();
- long count = 0;
- long missingCount = 0;
- for(BytesRef v : values) {
- final int length;
- if (v == null) {
- length = 0;
- missingCount++;
- } else {
- length = v.length;
- }
- minLength = Math.min(minLength, length);
- maxLength = Math.max(maxLength, length);
- if (v != null) {
- data.writeBytes(v.bytes, v.offset, v.length);
- }
- count++;
- }
- meta.writeVInt(minLength == maxLength ? BINARY_FIXED_UNCOMPRESSED : BINARY_VARIABLE_UNCOMPRESSED);
- if (missingCount == 0) {
- meta.writeLong(ALL_LIVE);
- } else if (missingCount == count) {
- meta.writeLong(ALL_MISSING);
- } else {
- meta.writeLong(data.getFilePointer());
- writeMissingBitset(values);
- }
- meta.writeVInt(minLength);
- meta.writeVInt(maxLength);
- meta.writeVLong(count);
- meta.writeLong(startFP);
-
- // if minLength == maxLength, it's a fixed-length byte[], we are done (the addresses are implicit)
- // otherwise, we need to record the length fields...
- if (minLength != maxLength) {
- meta.writeLong(data.getFilePointer());
- meta.writeVInt(PackedInts.VERSION_CURRENT);
- meta.writeVInt(MONOTONIC_BLOCK_SIZE);
-
- final MonotonicBlockPackedWriter writer = new MonotonicBlockPackedWriter(data, MONOTONIC_BLOCK_SIZE);
- long addr = 0;
- writer.add(addr);
- for (BytesRef v : values) {
- if (v != null) {
- addr += v.length;
- }
- writer.add(addr);
- }
- writer.finish();
- }
- }
-
- /** expert: writes a value dictionary for a sorted/sortedset field */
- private void addTermsDict(FieldInfo field, final Iterable<BytesRef> values) throws IOException {
- // first check if it's a "fixed-length" terms dict
- int minLength = Integer.MAX_VALUE;
- int maxLength = Integer.MIN_VALUE;
- long numValues = 0;
- for (BytesRef v : values) {
- minLength = Math.min(minLength, v.length);
- maxLength = Math.max(maxLength, v.length);
- numValues++;
- }
- if (minLength == maxLength) {
- // no index needed: direct addressing by mult
- addBinaryField(field, values);
- } else if (numValues < REVERSE_INTERVAL_COUNT) {
- // low cardinality: waste a few KB of ram, but can't really use fancy index etc
- addBinaryField(field, values);
- } else {
- assert numValues > 0; // we don't have to handle the empty case
- // header
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.BINARY);
- meta.writeVInt(BINARY_PREFIX_COMPRESSED);
- meta.writeLong(-1L);
- // now write the bytes: sharing prefixes within a block
- final long startFP = data.getFilePointer();
- // currently, we have to store the delta from expected for every 1/nth term
- // we could avoid this, but it's not much and less overall RAM than the previous approach!
- RAMOutputStream addressBuffer = new RAMOutputStream();
- MonotonicBlockPackedWriter termAddresses = new MonotonicBlockPackedWriter(addressBuffer, MONOTONIC_BLOCK_SIZE);
- // buffers up 16 terms
- RAMOutputStream bytesBuffer = new RAMOutputStream();
- // buffers up block header
- RAMOutputStream headerBuffer = new RAMOutputStream();
- BytesRefBuilder lastTerm = new BytesRefBuilder();
- lastTerm.grow(maxLength);
- long count = 0;
- int suffixDeltas[] = new int[INTERVAL_COUNT];
- for (BytesRef v : values) {
- int termPosition = (int) (count & INTERVAL_MASK);
- if (termPosition == 0) {
- termAddresses.add(data.getFilePointer() - startFP);
- // abs-encode first term
- headerBuffer.writeVInt(v.length);
- headerBuffer.writeBytes(v.bytes, v.offset, v.length);
- lastTerm.copyBytes(v);
- } else {
- // prefix-code: we only share at most 255 characters, to encode the length as a single
- // byte and have random access. Larger terms just get less compression.
- int sharedPrefix = Math.min(255, StringHelper.bytesDifference(lastTerm.get(), v));
- bytesBuffer.writeByte((byte) sharedPrefix);
- bytesBuffer.writeBytes(v.bytes, v.offset + sharedPrefix, v.length - sharedPrefix);
- // we can encode one smaller, because terms are unique.
- suffixDeltas[termPosition] = v.length - sharedPrefix - 1;
- }
-
- count++;
- // flush block
- if ((count & INTERVAL_MASK) == 0) {
- flushTermsDictBlock(headerBuffer, bytesBuffer, suffixDeltas);
- }
- }
- // flush trailing crap
- int leftover = (int) (count & INTERVAL_MASK);
- if (leftover > 0) {
- Arrays.fill(suffixDeltas, leftover, suffixDeltas.length, 0);
- flushTermsDictBlock(headerBuffer, bytesBuffer, suffixDeltas);
- }
- final long indexStartFP = data.getFilePointer();
- // write addresses of indexed terms
- termAddresses.finish();
- addressBuffer.writeTo(data);
- addressBuffer = null;
- termAddresses = null;
- meta.writeVInt(minLength);
- meta.writeVInt(maxLength);
- meta.writeVLong(count);
- meta.writeLong(startFP);
- meta.writeLong(indexStartFP);
- meta.writeVInt(PackedInts.VERSION_CURRENT);
- meta.writeVInt(MONOTONIC_BLOCK_SIZE);
- addReverseTermIndex(field, values, maxLength);
- }
- }
-
- // writes term dictionary "block"
- // first term is absolute encoded as vint length + bytes.
- // lengths of subsequent N terms are encoded as either N bytes or N shorts.
- // in the double-byte case, the first byte is indicated with -1.
- // subsequent terms are encoded as byte suffixLength + bytes.
- private void flushTermsDictBlock(RAMOutputStream headerBuffer, RAMOutputStream bytesBuffer, int suffixDeltas[]) throws IOException {
- boolean twoByte = false;
- for (int i = 1; i < suffixDeltas.length; i++) {
- if (suffixDeltas[i] > 254) {
- twoByte = true;
- }
- }
- if (twoByte) {
- headerBuffer.writeByte((byte)255);
- for (int i = 1; i < suffixDeltas.length; i++) {
- headerBuffer.writeShort((short) suffixDeltas[i]);
- }
- } else {
- for (int i = 1; i < suffixDeltas.length; i++) {
- headerBuffer.writeByte((byte) suffixDeltas[i]);
- }
- }
- headerBuffer.writeTo(data);
- headerBuffer.reset();
- bytesBuffer.writeTo(data);
- bytesBuffer.reset();
- }
-
- // writes reverse term index: used for binary searching a term into a range of 64 blocks
- // for every 64 blocks (1024 terms) we store a term, trimming any suffix unnecessary for comparison
- // terms are written as a contiguous byte[], but never spanning 2^15 byte boundaries.
- private void addReverseTermIndex(FieldInfo field, final Iterable<BytesRef> values, int maxLength) throws IOException {
- long count = 0;
- BytesRefBuilder priorTerm = new BytesRefBuilder();
- priorTerm.grow(maxLength);
- BytesRef indexTerm = new BytesRef();
- long startFP = data.getFilePointer();
- PagedBytes pagedBytes = new PagedBytes(15);
- MonotonicBlockPackedWriter addresses = new MonotonicBlockPackedWriter(data, MONOTONIC_BLOCK_SIZE);
-
- for (BytesRef b : values) {
- int termPosition = (int) (count & REVERSE_INTERVAL_MASK);
- if (termPosition == 0) {
- int len = StringHelper.sortKeyLength(priorTerm.get(), b);
- indexTerm.bytes = b.bytes;
- indexTerm.offset = b.offset;
- indexTerm.length = len;
- addresses.add(pagedBytes.copyUsingLengthPrefix(indexTerm));
- } else if (termPosition == REVERSE_INTERVAL_MASK) {
- priorTerm.copyBytes(b);
- }
- count++;
- }
- addresses.finish();
- long numBytes = pagedBytes.getPointer();
- pagedBytes.freeze(true);
- PagedBytesDataInput in = pagedBytes.getDataInput();
- meta.writeLong(startFP);
- data.writeVLong(numBytes);
- data.copyBytes(in, numBytes);
- }
-
- @Override
- public void addSortedField(FieldInfo field, Iterable<BytesRef> values, Iterable<Number> docToOrd) throws IOException {
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.SORTED);
- addTermsDict(field, values);
- addNumericField(field, docToOrd, false);
- }
-
- @Override
- public void addSortedNumericField(FieldInfo field, final Iterable<Number> docToValueCount, final Iterable<Number> values) throws IOException {
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.SORTED_NUMERIC);
- if (isSingleValued(docToValueCount)) {
- meta.writeVInt(SORTED_SINGLE_VALUED);
- // The field is single-valued, we can encode it as NUMERIC
- addNumericField(field, singletonView(docToValueCount, values, null));
- } else {
- final SortedSet<LongsRef> uniqueValueSets = uniqueValueSets(docToValueCount, values);
- if (uniqueValueSets != null) {
- meta.writeVInt(SORTED_SET_TABLE);
-
- // write the set_id -> values mapping
- writeDictionary(uniqueValueSets);
-
- // write the doc -> set_id as a numeric field
- addNumericField(field, docToSetId(uniqueValueSets, docToValueCount, values), false);
- } else {
- meta.writeVInt(SORTED_WITH_ADDRESSES);
- // write the stream of values as a numeric field
- addNumericField(field, values, true);
- // write the doc -> ord count as a absolute index to the stream
- addAddresses(field, docToValueCount);
- }
- }
- }
-
- @Override
- public void addSortedSetField(FieldInfo field, Iterable<BytesRef> values, final Iterable<Number> docToOrdCount, final Iterable<Number> ords) throws IOException {
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.SORTED_SET);
-
- if (isSingleValued(docToOrdCount)) {
- meta.writeVInt(SORTED_SINGLE_VALUED);
- // The field is single-valued, we can encode it as SORTED
- addSortedField(field, values, singletonView(docToOrdCount, ords, -1L));
- } else {
- final SortedSet<LongsRef> uniqueValueSets = uniqueValueSets(docToOrdCount, ords);
- if (uniqueValueSets != null) {
- meta.writeVInt(SORTED_SET_TABLE);
-
- // write the set_id -> ords mapping
- writeDictionary(uniqueValueSets);
-
- // write the ord -> byte[] as a binary field
- addTermsDict(field, values);
-
- // write the doc -> set_id as a numeric field
- addNumericField(field, docToSetId(uniqueValueSets, docToOrdCount, ords), false);
- } else {
- meta.writeVInt(SORTED_WITH_ADDRESSES);
-
- // write the ord -> byte[] as a binary field
- addTermsDict(field, values);
-
- // write the stream of ords as a numeric field
- // NOTE: we could return an iterator that delta-encodes these within a doc
- addNumericField(field, ords, false);
-
- // write the doc -> ord count as a absolute index to the stream
- addAddresses(field, docToOrdCount);
- }
- }
- }
-
- private SortedSet<LongsRef> uniqueValueSets(Iterable<Number> docToValueCount, Iterable<Number> values) {
- Set<LongsRef> uniqueValueSet = new HashSet<>();
- LongsRef docValues = new LongsRef(256);
-
- Iterator<Number> valueCountIterator = docToValueCount.iterator();
- Iterator<Number> valueIterator = values.iterator();
- int totalDictSize = 0;
- while (valueCountIterator.hasNext()) {
- docValues.length = valueCountIterator.next().intValue();
- if (docValues.length > 256) {
- return null;
- }
- for (int i = 0; i < docValues.length; ++i) {
- docValues.longs[i] = valueIterator.next().longValue();
- }
- if (uniqueValueSet.contains(docValues)) {
- continue;
- }
- totalDictSize += docValues.length;
- if (totalDictSize > 256) {
- return null;
- }
- uniqueValueSet.add(new LongsRef(Arrays.copyOf(docValues.longs, docValues.length), 0, docValues.length));
- }
- assert valueIterator.hasNext() == false;
- return new TreeSet<>(uniqueValueSet);
- }
-
- private void writeDictionary(SortedSet<LongsRef> uniqueValueSets) throws IOException {
- int lengthSum = 0;
- for (LongsRef longs : uniqueValueSets) {
- lengthSum += longs.length;
- }
-
- meta.writeInt(lengthSum);
- for (LongsRef valueSet : uniqueValueSets) {
- for (int i = 0; i < valueSet.length; ++i) {
- meta.writeLong(valueSet.longs[valueSet.offset + i]);
- }
- }
-
- meta.writeInt(uniqueValueSets.size());
- for (LongsRef valueSet : uniqueValueSets) {
- meta.writeInt(valueSet.length);
- }
- }
-
- private Iterable<Number> docToSetId(SortedSet<LongsRef> uniqueValueSets, Iterable<Number> docToValueCount, Iterable<Number> values) {
- final Map<LongsRef, Integer> setIds = new HashMap<>();
- int i = 0;
- for (LongsRef set : uniqueValueSets) {
- setIds.put(set, i++);
- }
- assert i == uniqueValueSets.size();
-
- return new Iterable<Number>() {
-
- @Override
- public Iterator<Number> iterator() {
- final Iterator<Number> valueCountIterator = docToValueCount.iterator();
- final Iterator<Number> valueIterator = values.iterator();
- final LongsRef docValues = new LongsRef(256);
- return new Iterator<Number>() {
-
- @Override
- public boolean hasNext() {
- return valueCountIterator.hasNext();
- }
-
- @Override
- public Number next() {
- docValues.length = valueCountIterator.next().intValue();
- for (int i = 0; i < docValues.length; ++i) {
- docValues.longs[i] = valueIterator.next().longValue();
- }
- final Integer id = setIds.get(docValues);
- assert id != null;
- return id;
- }
-
- };
-
- }
- };
- }
-
- // writes addressing information as MONOTONIC_COMPRESSED integer
- private void addAddresses(FieldInfo field, Iterable<Number> values) throws IOException {
- meta.writeVInt(field.number);
- meta.writeByte(Lucene50DocValuesFormat.NUMERIC);
- meta.writeVInt(MONOTONIC_COMPRESSED);
- meta.writeLong(-1L);
- meta.writeLong(data.getFilePointer());
- meta.writeVLong(maxDoc);
- meta.writeVInt(PackedInts.VERSION_CURRENT);
- meta.writeVInt(MONOTONIC_BLOCK_SIZE);
-
- final MonotonicBlockPackedWriter writer = new MonotonicBlockPackedWriter(data, MONOTONIC_BLOCK_SIZE);
- long addr = 0;
- writer.add(addr);
- for (Number v : values) {
- addr += v.longValue();
- writer.add(addr);
- }
- writer.finish();
- meta.writeLong(data.getFilePointer());
- }
-
- @Override
- public void close() throws IOException {
- boolean success = false;
- try {
- if (meta != null) {
- meta.writeVInt(-1); // write EOF marker
- CodecUtil.writeFooter(meta); // write checksum
- }
- if (data != null) {
- CodecUtil.writeFooter(data); // write checksum
- }
- success = true;
- } finally {
- if (success) {
- IOUtils.close(data, meta);
- } else {
- IOUtils.closeWhileHandlingException(data, meta);
- }
- meta = data = null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesFormat.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesFormat.java
deleted file mode 100644
index 7258a71..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesFormat.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.codecs.DocValuesConsumer;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.DocValuesProducer;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.SegmentWriteState;
-
-/**
- * Lucene 5.0 Doc values format.
- * @deprecated Only for reading old 5.0-5.3 segments
- */
-@Deprecated
-public class Lucene50DocValuesFormat extends DocValuesFormat {
-
- /** Sole Constructor */
- public Lucene50DocValuesFormat() {
- super("Lucene50");
- }
-
- @Override
- public DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
- return new Lucene50DocValuesConsumer(state, DATA_CODEC, DATA_EXTENSION, META_CODEC, META_EXTENSION);
- }
-
- @Override
- public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException {
- return new Lucene50DocValuesProducer(state, DATA_CODEC, DATA_EXTENSION, META_CODEC, META_EXTENSION);
- }
-
- static final String DATA_CODEC = "Lucene50DocValuesData";
- static final String DATA_EXTENSION = "dvd";
- static final String META_CODEC = "Lucene50DocValuesMetadata";
- static final String META_EXTENSION = "dvm";
- static final int VERSION_START = 0;
- static final int VERSION_SORTEDSET_TABLE = 1;
- static final int VERSION_CURRENT = VERSION_SORTEDSET_TABLE;
-
- // indicates docvalues type
- static final byte NUMERIC = 0;
- static final byte BINARY = 1;
- static final byte SORTED = 2;
- static final byte SORTED_SET = 3;
- static final byte SORTED_NUMERIC = 4;
-
- // address terms in blocks of 16 terms
- static final int INTERVAL_SHIFT = 4;
- static final int INTERVAL_COUNT = 1 << INTERVAL_SHIFT;
- static final int INTERVAL_MASK = INTERVAL_COUNT - 1;
-
- // build reverse index from every 1024th term
- static final int REVERSE_INTERVAL_SHIFT = 10;
- static final int REVERSE_INTERVAL_COUNT = 1 << REVERSE_INTERVAL_SHIFT;
- static final int REVERSE_INTERVAL_MASK = REVERSE_INTERVAL_COUNT - 1;
-
- // for conversion from reverse index to block
- static final int BLOCK_INTERVAL_SHIFT = REVERSE_INTERVAL_SHIFT - INTERVAL_SHIFT;
- static final int BLOCK_INTERVAL_COUNT = 1 << BLOCK_INTERVAL_SHIFT;
- static final int BLOCK_INTERVAL_MASK = BLOCK_INTERVAL_COUNT - 1;
-
- /** Compressed using packed blocks of ints. */
- static final int DELTA_COMPRESSED = 0;
- /** Compressed by computing the GCD. */
- static final int GCD_COMPRESSED = 1;
- /** Compressed by giving IDs to unique values. */
- static final int TABLE_COMPRESSED = 2;
- /** Compressed with monotonically increasing values */
- static final int MONOTONIC_COMPRESSED = 3;
- /** Compressed with constant value (uses only missing bitset) */
- static final int CONST_COMPRESSED = 4;
-
- /** Uncompressed binary, written directly (fixed length). */
- static final int BINARY_FIXED_UNCOMPRESSED = 0;
- /** Uncompressed binary, written directly (variable length). */
- static final int BINARY_VARIABLE_UNCOMPRESSED = 1;
- /** Compressed binary with shared prefixes */
- static final int BINARY_PREFIX_COMPRESSED = 2;
-
- /** Standard storage for sorted set values with 1 level of indirection:
- * {@code docId -> address -> ord}. */
- static final int SORTED_WITH_ADDRESSES = 0;
- /** Single-valued sorted set values, encoded as sorted values, so no level
- * of indirection: {@code docId -> ord}. */
- static final int SORTED_SINGLE_VALUED = 1;
- /** Compressed giving IDs to unique sets of values:
- * {@code docId -> setId -> ords} */
- static final int SORTED_SET_TABLE = 2;
-
- /** placeholder for missing offset that means there are no missing values */
- static final int ALL_LIVE = -1;
- /** placeholder for missing offset that means all values are missing */
- static final int ALL_MISSING = -2;
-
- // addressing uses 16k blocks
- static final int MONOTONIC_BLOCK_SIZE = 16384;
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ada71497/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
deleted file mode 100644
index 62c9477..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
+++ /dev/null
@@ -1,1299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene50;
-
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.DocValuesProducer;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.RandomAccessOrds;
-import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.store.ChecksumIndexInput;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.RandomAccessInput;
-import org.apache.lucene.util.Accountable;
-import org.apache.lucene.util.Accountables;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LongValues;
-import org.apache.lucene.util.PagedBytes;
-import org.apache.lucene.util.RamUsageEstimator;
-import org.apache.lucene.util.packed.DirectReader;
-import org.apache.lucene.util.packed.MonotonicBlockPackedReader;
-
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesFormat.*;
-
-/** reader for {@link Lucene50DocValuesFormat} */
-class Lucene50DocValuesProducer extends DocValuesProducer implements Closeable {
- private final Map<String,NumericEntry> numerics = new HashMap<>();
- private final Map<String,BinaryEntry> binaries = new HashMap<>();
- private final Map<String,SortedSetEntry> sortedSets = new HashMap<>();
- private final Map<String,SortedSetEntry> sortedNumerics = new HashMap<>();
- private final Map<String,NumericEntry> ords = new HashMap<>();
- private final Map<String,NumericEntry> ordIndexes = new HashMap<>();
- private final int numFields;
- private final AtomicLong ramBytesUsed;
- private final IndexInput data;
- private final int maxDoc;
-
- // memory-resident structures
- private final Map<String,MonotonicBlockPackedReader> addressInstances = new HashMap<>();
- private final Map<String,MonotonicBlockPackedReader> ordIndexInstances = new HashMap<>();
- private final Map<String,ReverseTermsIndex> reverseIndexInstances = new HashMap<>();
-
- private final boolean merging;
-
- // clone for merge: when merging we don't do any instances.put()s
- Lucene50DocValuesProducer(Lucene50DocValuesProducer original) throws IOException {
- assert Thread.holdsLock(original);
- numerics.putAll(original.numerics);
- binaries.putAll(original.binaries);
- sortedSets.putAll(original.sortedSets);
- sortedNumerics.putAll(original.sortedNumerics);
- ords.putAll(original.ords);
- ordIndexes.putAll(original.ordIndexes);
- numFields = original.numFields;
- ramBytesUsed = new AtomicLong(original.ramBytesUsed.get());
- data = original.data.clone();
- maxDoc = original.maxDoc;
-
- addressInstances.putAll(original.addressInstances);
- ordIndexInstances.putAll(original.ordIndexInstances);
- reverseIndexInstances.putAll(original.reverseIndexInstances);
- merging = true;
- }
-
- /** expert: instantiates a new reader */
- Lucene50DocValuesProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
- String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
- this.maxDoc = state.segmentInfo.maxDoc();
- merging = false;
- ramBytesUsed = new AtomicLong(RamUsageEstimator.shallowSizeOfInstance(getClass()));
-
- int version = -1;
- int numFields = -1;
-
- // read in the entries from the metadata file.
- try (ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context)) {
- Throwable priorE = null;
- try {
- version = CodecUtil.checkIndexHeader(in, metaCodec,
- Lucene50DocValuesFormat.VERSION_START,
- Lucene50DocValuesFormat.VERSION_CURRENT,
- state.segmentInfo.getId(),
- state.segmentSuffix);
- numFields = readFields(in, state.fieldInfos);
- } catch (Throwable exception) {
- priorE = exception;
- } finally {
- CodecUtil.checkFooter(in, priorE);
- }
- }
-
- this.numFields = numFields;
- String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
- this.data = state.directory.openInput(dataName, state.context);
- boolean success = false;
- try {
- final int version2 = CodecUtil.checkIndexHeader(data, dataCodec,
- Lucene50DocValuesFormat.VERSION_START,
- Lucene50DocValuesFormat.VERSION_CURRENT,
- state.segmentInfo.getId(),
- state.segmentSuffix);
- if (version != version2) {
- throw new CorruptIndexException("Format versions mismatch: meta=" + version + ", data=" + version2, data);
- }
-
- // NOTE: data file is too costly to verify checksum against all the bytes on open,
- // but for now we at least verify proper structure of the checksum footer: which looks
- // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
- // such as file truncation.
- CodecUtil.retrieveChecksum(data);
-
- success = true;
- } finally {
- if (!success) {
- IOUtils.closeWhileHandlingException(this.data);
- }
- }
- }
-
- private void readSortedField(FieldInfo info, IndexInput meta) throws IOException {
- // sorted = binary + numeric
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sorted entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.BINARY) {
- throw new CorruptIndexException("sorted entry for field: " + info.name + " is corrupt", meta);
- }
- BinaryEntry b = readBinaryEntry(meta);
- binaries.put(info.name, b);
-
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sorted entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sorted entry for field: " + info.name + " is corrupt", meta);
- }
- NumericEntry n = readNumericEntry(meta);
- ords.put(info.name, n);
- }
-
- private void readSortedSetFieldWithAddresses(FieldInfo info, IndexInput meta) throws IOException {
- // sortedset = binary + numeric (addresses) + ordIndex
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.BINARY) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- BinaryEntry b = readBinaryEntry(meta);
- binaries.put(info.name, b);
-
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- NumericEntry n1 = readNumericEntry(meta);
- ords.put(info.name, n1);
-
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- NumericEntry n2 = readNumericEntry(meta);
- ordIndexes.put(info.name, n2);
- }
-
- private void readSortedSetFieldWithTable(FieldInfo info, IndexInput meta) throws IOException {
- // sortedset table = binary + ordset table + ordset index
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.BINARY) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
-
- BinaryEntry b = readBinaryEntry(meta);
- binaries.put(info.name, b);
-
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- NumericEntry n = readNumericEntry(meta);
- ords.put(info.name, n);
- }
-
- private int readFields(IndexInput meta, FieldInfos infos) throws IOException {
- int numFields = 0;
- int fieldNumber = meta.readVInt();
- while (fieldNumber != -1) {
- numFields++;
- FieldInfo info = infos.fieldInfo(fieldNumber);
- if (info == null) {
- // trickier to validate more: because we use multiple entries for "composite" types like sortedset, etc.
- throw new CorruptIndexException("Invalid field number: " + fieldNumber, meta);
- }
- byte type = meta.readByte();
- if (type == Lucene50DocValuesFormat.NUMERIC) {
- numerics.put(info.name, readNumericEntry(meta));
- } else if (type == Lucene50DocValuesFormat.BINARY) {
- BinaryEntry b = readBinaryEntry(meta);
- binaries.put(info.name, b);
- } else if (type == Lucene50DocValuesFormat.SORTED) {
- readSortedField(info, meta);
- } else if (type == Lucene50DocValuesFormat.SORTED_SET) {
- SortedSetEntry ss = readSortedSetEntry(meta);
- sortedSets.put(info.name, ss);
- if (ss.format == SORTED_WITH_ADDRESSES) {
- readSortedSetFieldWithAddresses(info, meta);
- } else if (ss.format == SORTED_SET_TABLE) {
- readSortedSetFieldWithTable(info, meta);
- } else if (ss.format == SORTED_SINGLE_VALUED) {
- if (meta.readVInt() != fieldNumber) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.SORTED) {
- throw new CorruptIndexException("sortedset entry for field: " + info.name + " is corrupt", meta);
- }
- readSortedField(info, meta);
- } else {
- throw new AssertionError();
- }
- } else if (type == Lucene50DocValuesFormat.SORTED_NUMERIC) {
- SortedSetEntry ss = readSortedSetEntry(meta);
- sortedNumerics.put(info.name, ss);
- if (ss.format == SORTED_WITH_ADDRESSES) {
- if (meta.readVInt() != fieldNumber) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- numerics.put(info.name, readNumericEntry(meta));
- if (meta.readVInt() != fieldNumber) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- NumericEntry ordIndex = readNumericEntry(meta);
- ordIndexes.put(info.name, ordIndex);
- } else if (ss.format == SORTED_SET_TABLE) {
- if (meta.readVInt() != info.number) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- NumericEntry n = readNumericEntry(meta);
- ords.put(info.name, n);
- } else if (ss.format == SORTED_SINGLE_VALUED) {
- if (meta.readVInt() != fieldNumber) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- if (meta.readByte() != Lucene50DocValuesFormat.NUMERIC) {
- throw new CorruptIndexException("sortednumeric entry for field: " + info.name + " is corrupt", meta);
- }
- numerics.put(info.name, readNumericEntry(meta));
- } else {
- throw new AssertionError();
- }
- } else {
- throw new CorruptIndexException("invalid type: " + type, meta);
- }
- fieldNumber = meta.readVInt();
- }
- return numFields;
- }
-
- private NumericEntry readNumericEntry(IndexInput meta) throws IOException {
- NumericEntry entry = new NumericEntry();
- entry.format = meta.readVInt();
- entry.missingOffset = meta.readLong();
- entry.offset = meta.readLong();
- entry.count = meta.readVLong();
- switch(entry.format) {
- case CONST_COMPRESSED:
- entry.minValue = meta.readLong();
- if (entry.count > Integer.MAX_VALUE) {
- // currently just a limitation e.g. of bits interface and so on.
- throw new CorruptIndexException("illegal CONST_COMPRESSED count: " + entry.count, meta);
- }
- break;
- case GCD_COMPRESSED:
- entry.minValue = meta.readLong();
- entry.gcd = meta.readLong();
- entry.bitsPerValue = meta.readVInt();
- break;
- case TABLE_COMPRESSED:
- final int uniqueValues = meta.readVInt();
- if (uniqueValues > 256) {
- throw new CorruptIndexException("TABLE_COMPRESSED cannot have more than 256 distinct values, got=" + uniqueValues, meta);
- }
- entry.table = new long[uniqueValues];
- for (int i = 0; i < uniqueValues; ++i) {
- entry.table[i] = meta.readLong();
- }
- ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(entry.table));
- entry.bitsPerValue = meta.readVInt();
- break;
- case DELTA_COMPRESSED:
- entry.minValue = meta.readLong();
- entry.bitsPerValue = meta.readVInt();
- break;
- case MONOTONIC_COMPRESSED:
- entry.packedIntsVersion = meta.readVInt();
- entry.blockSize = meta.readVInt();
- break;
- default:
- throw new CorruptIndexException("Unknown format: " + entry.format + ", input=", meta);
- }
- entry.endOffset = meta.readLong();
- return entry;
- }
-
- static BinaryEntry readBinaryEntry(IndexInput meta) throws IOException {
- BinaryEntry entry = new BinaryEntry();
- entry.format = meta.readVInt();
- entry.missingOffset = meta.readLong();
- entry.minLength = meta.readVInt();
- entry.maxLength = meta.readVInt();
- entry.count = meta.readVLong();
- entry.offset = meta.readLong();
- switch(entry.format) {
- case BINARY_FIXED_UNCOMPRESSED:
- break;
- case BINARY_PREFIX_COMPRESSED:
- entry.addressesOffset = meta.readLong();
- entry.packedIntsVersion = meta.readVInt();
- entry.blockSize = meta.readVInt();
- entry.reverseIndexOffset = meta.readLong();
- break;
- case BINARY_VARIABLE_UNCOMPRESSED:
- entry.addressesOffset = meta.readLong();
- entry.packedIntsVersion = meta.readVInt();
- entry.blockSize = meta.readVInt();
- break;
- default:
- throw new CorruptIndexException("Unknown format: " + entry.format, meta);
- }
- return entry;
- }
-
- SortedSetEntry readSortedSetEntry(IndexInput meta) throws IOException {
- SortedSetEntry entry = new SortedSetEntry();
- entry.format = meta.readVInt();
- if (entry.format == SORTED_SET_TABLE) {
- final int totalTableLength = meta.readInt();
- if (totalTableLength > 256) {
- throw new CorruptIndexException("SORTED_SET_TABLE cannot have more than 256 values in its dictionary, got=" + totalTableLength, meta);
- }
- entry.table = new long[totalTableLength];
- for (int i = 0; i < totalTableLength; ++i) {
- entry.table[i] = meta.readLong();
- }
- ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(entry.table));
- final int tableSize = meta.readInt();
- if (tableSize > totalTableLength + 1) { // +1 because of the empty set
- throw new CorruptIndexException("SORTED_SET_TABLE cannot have more set ids than ords in its dictionary, got " + totalTableLength + " ords and " + tableSize + " sets", meta);
- }
- entry.tableOffsets = new int[tableSize + 1];
- for (int i = 1; i < entry.tableOffsets.length; ++i) {
- entry.tableOffsets[i] = entry.tableOffsets[i - 1] + meta.readInt();
- }
- ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(entry.tableOffsets));
- } else if (entry.format != SORTED_SINGLE_VALUED && entry.format != SORTED_WITH_ADDRESSES) {
- throw new CorruptIndexException("Unknown format: " + entry.format, meta);
- }
- return entry;
- }
-
- @Override
- public NumericDocValues getNumeric(FieldInfo field) throws IOException {
- NumericEntry entry = numerics.get(field.name);
- return getNumeric(entry);
- }
-
- @Override
- public long ramBytesUsed() {
- return ramBytesUsed.get();
- }
-
- @Override
- public synchronized Collection<Accountable> getChildResources() {
- List<Accountable> resources = new ArrayList<>();
- resources.addAll(Accountables.namedAccountables("addresses field", addressInstances));
- resources.addAll(Accountables.namedAccountables("ord index field", ordIndexInstances));
- resources.addAll(Accountables.namedAccountables("reverse index field", reverseIndexInstances));
- return Collections.unmodifiableList(resources);
- }
-
- @Override
- public void checkIntegrity() throws IOException {
- CodecUtil.checksumEntireFile(data);
- }
-
- @Override
- public String toString() {
- return getClass().getSimpleName() + "(fields=" + numFields + ")";
- }
-
- LongValues getNumeric(NumericEntry entry) throws IOException {
- switch (entry.format) {
- case CONST_COMPRESSED: {
- final long constant = entry.minValue;
- final Bits live = getLiveBits(entry.missingOffset, (int)entry.count);
- return new LongValues() {
- @Override
- public long get(long index) {
- return live.get((int)index) ? constant : 0;
- }
- };
- }
- case DELTA_COMPRESSED: {
- RandomAccessInput slice = this.data.randomAccessSlice(entry.offset, entry.endOffset - entry.offset);
- final long delta = entry.minValue;
- final LongValues values = DirectReader.getInstance(slice, entry.bitsPerValue);
- return new LongValues() {
- @Override
- public long get(long id) {
- return delta + values.get(id);
- }
- };
- }
- case GCD_COMPRESSED: {
- RandomAccessInput slice = this.data.randomAccessSlice(entry.offset, entry.endOffset - entry.offset);
- final long min = entry.minValue;
- final long mult = entry.gcd;
- final LongValues quotientReader = DirectReader.getInstance(slice, entry.bitsPerValue);
- return new LongValues() {
- @Override
- public long get(long id) {
- return min + mult * quotientReader.get(id);
- }
- };
- }
- case TABLE_COMPRESSED: {
- RandomAccessInput slice = this.data.randomAccessSlice(entry.offset, entry.endOffset - entry.offset);
- final long table[] = entry.table;
- final LongValues ords = DirectReader.getInstance(slice, entry.bitsPerValue);
- return new LongValues() {
- @Override
- public long get(long id) {
- return table[(int) ords.get(id)];
- }
- };
- }
- default:
- throw new AssertionError();
- }
- }
-
- @Override
- public BinaryDocValues getBinary(FieldInfo field) throws IOException {
- BinaryEntry bytes = binaries.get(field.name);
- switch(bytes.format) {
- case BINARY_FIXED_UNCOMPRESSED:
- return getFixedBinary(field, bytes);
- case BINARY_VARIABLE_UNCOMPRESSED:
- return getVariableBinary(field, bytes);
- case BINARY_PREFIX_COMPRESSED:
- return getCompressedBinary(field, bytes);
- default:
- throw new AssertionError();
- }
- }
-
- private BinaryDocValues getFixedBinary(FieldInfo field, final BinaryEntry bytes) throws IOException {
- final IndexInput data = this.data.slice("fixed-binary", bytes.offset, bytes.count * bytes.maxLength);
-
- final BytesRef term = new BytesRef(bytes.maxLength);
- final byte[] buffer = term.bytes;
- final int length = term.length = bytes.maxLength;
-
- return new LongBinaryDocValues() {
- @Override
- public BytesRef get(long id) {
- try {
- data.seek(id * length);
- data.readBytes(buffer, 0, buffer.length);
- return term;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
- };
- }
-
- /** returns an address instance for variable-length binary values. */
- private synchronized MonotonicBlockPackedReader getAddressInstance(FieldInfo field, BinaryEntry bytes) throws IOException {
- MonotonicBlockPackedReader addresses = addressInstances.get(field.name);
- if (addresses == null) {
- data.seek(bytes.addressesOffset);
- addresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, bytes.count+1, false);
- if (!merging) {
- addressInstances.put(field.name, addresses);
- ramBytesUsed.addAndGet(addresses.ramBytesUsed() + Integer.BYTES);
- }
- }
- return addresses;
- }
-
- private BinaryDocValues getVariableBinary(FieldInfo field, final BinaryEntry bytes) throws IOException {
- final MonotonicBlockPackedReader addresses = getAddressInstance(field, bytes);
-
- final IndexInput data = this.data.slice("var-binary", bytes.offset, bytes.addressesOffset - bytes.offset);
- final BytesRef term = new BytesRef(Math.max(0, bytes.maxLength));
- final byte buffer[] = term.bytes;
-
- return new LongBinaryDocValues() {
- @Override
- public BytesRef get(long id) {
- long startAddress = addresses.get(id);
- long endAddress = addresses.get(id+1);
- int length = (int) (endAddress - startAddress);
- try {
- data.seek(startAddress);
- data.readBytes(buffer, 0, length);
- term.length = length;
- return term;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
- };
- }
-
- /** returns an address instance for prefix-compressed binary values. */
- private synchronized MonotonicBlockPackedReader getIntervalInstance(FieldInfo field, BinaryEntry bytes) throws IOException {
- MonotonicBlockPackedReader addresses = addressInstances.get(field.name);
- if (addresses == null) {
- data.seek(bytes.addressesOffset);
- final long size = (bytes.count + INTERVAL_MASK) >>> INTERVAL_SHIFT;
- addresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, size, false);
- if (!merging) {
- addressInstances.put(field.name, addresses);
- ramBytesUsed.addAndGet(addresses.ramBytesUsed() + Integer.BYTES);
- }
- }
- return addresses;
- }
-
- /** returns a reverse lookup instance for prefix-compressed binary values. */
- private synchronized ReverseTermsIndex getReverseIndexInstance(FieldInfo field, BinaryEntry bytes) throws IOException {
- ReverseTermsIndex index = reverseIndexInstances.get(field.name);
- if (index == null) {
- index = new ReverseTermsIndex();
- data.seek(bytes.reverseIndexOffset);
- long size = (bytes.count + REVERSE_INTERVAL_MASK) >>> REVERSE_INTERVAL_SHIFT;
- index.termAddresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, size, false);
- long dataSize = data.readVLong();
- PagedBytes pagedBytes = new PagedBytes(15);
- pagedBytes.copy(data, dataSize);
- index.terms = pagedBytes.freeze(true);
- if (!merging) {
- reverseIndexInstances.put(field.name, index);
- ramBytesUsed.addAndGet(index.ramBytesUsed());
- }
- }
- return index;
- }
-
- private BinaryDocValues getCompressedBinary(FieldInfo field, final BinaryEntry bytes) throws IOException {
- final MonotonicBlockPackedReader addresses = getIntervalInstance(field, bytes);
- final ReverseTermsIndex index = getReverseIndexInstance(field, bytes);
- assert addresses.size() > 0; // we don't have to handle empty case
- IndexInput slice = data.slice("terms", bytes.offset, bytes.addressesOffset - bytes.offset);
- return new CompressedBinaryDocValues(bytes, addresses, index, slice);
- }
-
- @Override
- public SortedDocValues getSorted(FieldInfo field) throws IOException {
- final int valueCount = (int) binaries.get(field.name).count;
- final BinaryDocValues binary = getBinary(field);
- NumericEntry entry = ords.get(field.name);
- final LongValues ordinals = getNumeric(entry);
- return new SortedDocValues() {
-
- @Override
- public int getOrd(int docID) {
- return (int) ordinals.get(docID);
- }
-
- @Override
- public BytesRef lookupOrd(int ord) {
- return binary.get(ord);
- }
-
- @Override
- public int getValueCount() {
- return valueCount;
- }
-
- @Override
- public int lookupTerm(BytesRef key) {
- if (binary instanceof CompressedBinaryDocValues) {
- return (int) ((CompressedBinaryDocValues)binary).lookupTerm(key);
- } else {
- return super.lookupTerm(key);
- }
- }
-
- @Override
- public TermsEnum termsEnum() {
- if (binary instanceof CompressedBinaryDocValues) {
- return ((CompressedBinaryDocValues)binary).getTermsEnum();
- } else {
- return super.termsEnum();
- }
- }
- };
- }
-
- /** returns an address instance for sortedset ordinal lists */
- private synchronized MonotonicBlockPackedReader getOrdIndexInstance(FieldInfo field, NumericEntry entry) throws IOException {
- MonotonicBlockPackedReader instance = ordIndexInstances.get(field.name);
- if (instance == null) {
- data.seek(entry.offset);
- instance = MonotonicBlockPackedReader.of(data, entry.packedIntsVersion, entry.blockSize, entry.count+1, false);
- if (!merging) {
- ordIndexInstances.put(field.name, instance);
- ramBytesUsed.addAndGet(instance.ramBytesUsed() + Integer.BYTES);
- }
- }
- return instance;
- }
-
- @Override
- public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
- SortedSetEntry ss = sortedNumerics.get(field.name);
- if (ss.format == SORTED_SINGLE_VALUED) {
- NumericEntry numericEntry = numerics.get(field.name);
- final LongValues values = getNumeric(numericEntry);
- final Bits docsWithField = getLiveBits(numericEntry.missingOffset, maxDoc);
- return DocValues.singleton(values, docsWithField);
- } else if (ss.format == SORTED_WITH_ADDRESSES) {
- NumericEntry numericEntry = numerics.get(field.name);
- final LongValues values = getNumeric(numericEntry);
- final MonotonicBlockPackedReader ordIndex = getOrdIndexInstance(field, ordIndexes.get(field.name));
-
- return new SortedNumericDocValues() {
- long startOffset;
- long endOffset;
-
- @Override
- public void setDocument(int doc) {
- startOffset = ordIndex.get(doc);
- endOffset = ordIndex.get(doc+1L);
- }
-
- @Override
- public long valueAt(int index) {
- return values.get(startOffset + index);
- }
-
- @Override
- public int count() {
- return (int) (endOffset - startOffset);
- }
- };
- } else if (ss.format == SORTED_SET_TABLE) {
- NumericEntry entry = ords.get(field.name);
- final LongValues ordinals = getNumeric(entry);
-
- final long[] table = ss.table;
- final int[] offsets = ss.tableOffsets;
- return new SortedNumericDocValues() {
- int startOffset;
- int endOffset;
-
- @Override
- public void setDocument(int doc) {
- final int ord = (int) ordinals.get(doc);
- startOffset = offsets[ord];
- endOffset = offsets[ord + 1];
- }
-
- @Override
- public long valueAt(int index) {
- return table[startOffset + index];
- }
-
- @Override
- public int count() {
- return endOffset - startOffset;
- }
- };
- } else {
- throw new AssertionError();
- }
- }
-
- @Override
- public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException {
- SortedSetEntry ss = sortedSets.get(field.name);
- switch (ss.format) {
- case SORTED_SINGLE_VALUED:
- final SortedDocValues values = getSorted(field);
- return DocValues.singleton(values);
- case SORTED_WITH_ADDRESSES:
- return getSortedSetWithAddresses(field);
- case SORTED_SET_TABLE:
- return getSortedSetTable(field, ss);
- default:
- throw new AssertionError();
- }
- }
-
- private SortedSetDocValues getSortedSetWithAddresses(FieldInfo field) throws IOException {
- final long valueCount = binaries.get(field.name).count;
- // we keep the byte[]s and list of ords on disk, these could be large
- final LongBinaryDocValues binary = (LongBinaryDocValues) getBinary(field);
- final LongValues ordinals = getNumeric(ords.get(field.name));
- // but the addresses to the ord stream are in RAM
- final MonotonicBlockPackedReader ordIndex = getOrdIndexInstance(field, ordIndexes.get(field.name));
-
- return new RandomAccessOrds() {
- long startOffset;
- long offset;
- long endOffset;
-
- @Override
- public long nextOrd() {
- if (offset == endOffset) {
- return NO_MORE_ORDS;
- } else {
- long ord = ordinals.get(offset);
- offset++;
- return ord;
- }
- }
-
- @Override
- public void setDocument(int docID) {
- startOffset = offset = ordIndex.get(docID);
- endOffset = ordIndex.get(docID+1L);
- }
-
- @Override
- public BytesRef lookupOrd(long ord) {
- return binary.get(ord);
- }
-
- @Override
- public long getValueCount() {
- return valueCount;
- }
-
- @Override
- public long lookupTerm(BytesRef key) {
- if (binary instanceof CompressedBinaryDocValues) {
- return ((CompressedBinaryDocValues)binary).lookupTerm(key);
- } else {
- return super.lookupTerm(key);
- }
- }
-
- @Override
- public TermsEnum termsEnum() {
- if (binary instanceof CompressedBinaryDocValues) {
- return ((CompressedBinaryDocValues)binary).getTermsEnum();
- } else {
- return super.termsEnum();
- }
- }
-
- @Override
- public long ordAt(int index) {
- return ordinals.get(startOffset + index);
- }
-
- @Override
- public int cardinality() {
- return (int) (endOffset - startOffset);
- }
- };
- }
-
- private SortedSetDocValues getSortedSetTable(FieldInfo field, SortedSetEntry ss) throws IOException {
- final long valueCount = binaries.get(field.name).count;
- final LongBinaryDocValues binary = (LongBinaryDocValues) getBinary(field);
- final LongValues ordinals = getNumeric(ords.get(field.name));
-
- final long[] table = ss.table;
- final int[] offsets = ss.tableOffsets;
-
- return new RandomAccessOrds() {
-
- int offset, startOffset, endOffset;
-
- @Override
- public void setDocument(int docID) {
- final int ord = (int) ordinals.get(docID);
- offset = startOffset = offsets[ord];
- endOffset = offsets[ord + 1];
- }
-
- @Override
- public long ordAt(int index) {
- return table[startOffset + index];
- }
-
- @Override
- public long nextOrd() {
- if (offset == endOffset) {
- return NO_MORE_ORDS;
- } else {
- return table[offset++];
- }
- }
-
- @Override
- public int cardinality() {
- return endOffset - startOffset;
- }
-
- @Override
- public BytesRef lookupOrd(long ord) {
- return binary.get(ord);
- }
-
- @Override
- public long getValueCount() {
- return valueCount;
- }
-
- @Override
- public long lookupTerm(BytesRef key) {
- if (binary instanceof CompressedBinaryDocValues) {
- return ((CompressedBinaryDocValues) binary).lookupTerm(key);
- } else {
- return super.lookupTerm(key);
- }
- }
-
- @Override
- public TermsEnum termsEnum() {
- if (binary instanceof CompressedBinaryDocValues) {
- return ((CompressedBinaryDocValues) binary).getTermsEnum();
- } else {
- return super.termsEnum();
- }
- }
-
- };
- }
-
- private Bits getLiveBits(final long offset, final int count) throws IOException {
- if (offset == ALL_MISSING) {
- return new Bits.MatchNoBits(count);
- } else if (offset == ALL_LIVE) {
- return new Bits.MatchAllBits(count);
- } else {
- int length = (int) ((count + 7L) >>> 3);
- final RandomAccessInput in = data.randomAccessSlice(offset, length);
- return new Bits() {
- @Override
- public boolean get(int index) {
- try {
- return (in.readByte(index >> 3) & (1 << (index & 7))) != 0;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public int length() {
- return count;
- }
- };
- }
- }
-
- @Override
- public Bits getDocsWithField(FieldInfo field) throws IOException {
- switch(field.getDocValuesType()) {
- case SORTED_SET:
- return DocValues.docsWithValue(getSortedSet(field), maxDoc);
- case SORTED_NUMERIC:
- return DocValues.docsWithValue(getSortedNumeric(field), maxDoc);
- case SORTED:
- return DocValues.docsWithValue(getSorted(field), maxDoc);
- case BINARY:
- BinaryEntry be = binaries.get(field.name);
- return getLiveBits(be.missingOffset, maxDoc);
- case NUMERIC:
- NumericEntry ne = numerics.get(field.name);
- return getLiveBits(ne.missingOffset, maxDoc);
- default:
- throw new AssertionError();
- }
- }
-
- @Override
- public synchronized DocValuesProducer getMergeInstance() throws IOException {
- return new Lucene50DocValuesProducer(this);
- }
-
- @Override
- public void close() throws IOException {
- data.close();
- }
-
- /** metadata entry for a numeric docvalues field */
- static class NumericEntry {
- private NumericEntry() {}
- /** offset to the bitset representing docsWithField, or -1 if no documents have missing values */
- long missingOffset;
- /** offset to the actual numeric values */
- public long offset;
- /** end offset to the actual numeric values */
- public long endOffset;
- /** bits per value used to pack the numeric values */
- public int bitsPerValue;
-
- int format;
- /** packed ints version used to encode these numerics */
- public int packedIntsVersion;
- /** count of values written */
- public long count;
- /** packed ints blocksize */
- public int blockSize;
-
- long minValue;
- long gcd;
- long table[];
- }
-
- /** metadata entry for a binary docvalues field */
- static class BinaryEntry {
- private BinaryEntry() {}
- /** offset to the bitset representing docsWithField, or -1 if no documents have missing values */
- long missingOffset;
- /** offset to the actual binary values */
- long offset;
-
- int format;
- /** count of values written */
- public long count;
- int minLength;
- int maxLength;
- /** offset to the addressing data that maps a value to its slice of the byte[] */
- public long addressesOffset;
- /** offset to the reverse index */
- public long reverseIndexOffset;
- /** packed ints version used to encode addressing information */
- public int packedIntsVersion;
- /** packed ints blocksize */
- public int blockSize;
- }
-
- /** metadata entry for a sorted-set docvalues field */
- static class SortedSetEntry {
- private SortedSetEntry() {}
- int format;
-
- long[] table;
- int[] tableOffsets;
- }
-
- // internally we compose complex dv (sorted/sortedset) from other ones
- static abstract class LongBinaryDocValues extends BinaryDocValues {
- @Override
- public final BytesRef get(int docID) {
- return get((long)docID);
- }
-
- abstract BytesRef get(long id);
- }
-
- // used for reverse lookup to a small range of blocks
- static class ReverseTermsIndex implements Accountable {
- public MonotonicBlockPackedReader termAddresses;
- public PagedBytes.Reader terms;
-
- @Override
- public long ramBytesUsed() {
- return termAddresses.ramBytesUsed() + terms.ramBytesUsed();
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- List<Accountable> resources = new ArrayList<>();
- resources.add(Accountables.namedAccountable("term bytes", terms));
- resources.add(Accountables.namedAccountable("term addresses", termAddresses));
- return Collections.unmodifiableList(resources);
- }
-
- @Override
- public String toString() {
- return getClass().getSimpleName() + "(size=" + termAddresses.size() + ")";
- }
- }
-
- //in the compressed case, we add a few additional operations for
- //more efficient reverse lookup and enumeration
- static final class CompressedBinaryDocValues extends LongBinaryDocValues {
- final long numValues;
- final long numIndexValues;
- final int maxTermLength;
- final MonotonicBlockPackedReader addresses;
- final IndexInput data;
- final CompressedBinaryTermsEnum termsEnum;
- final PagedBytes.Reader reverseTerms;
- final MonotonicBlockPackedReader reverseAddresses;
- final long numReverseIndexValues;
-
- public CompressedBinaryDocValues(BinaryEntry bytes, MonotonicBlockPackedReader addresses, ReverseTermsIndex index, IndexInput data) throws IOException {
- this.maxTermLength = bytes.maxLength;
- this.numValues = bytes.count;
- this.addresses = addresses;
- this.numIndexValues = addresses.size();
- this.data = data;
- this.reverseTerms = index.terms;
- this.reverseAddresses = index.termAddresses;
- this.numReverseIndexValues = reverseAddresses.size();
- this.termsEnum = getTermsEnum(data);
- }
-
- @Override
- public BytesRef get(long id) {
- try {
- termsEnum.seekExact(id);
- return termsEnum.term();
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- long lookupTerm(BytesRef key) {
- try {
- switch (termsEnum.seekCeil(key)) {
- case FOUND: return termsEnum.ord();
- case NOT_FOUND: return -termsEnum.ord()-1;
- default: return -numValues-1;
- }
- } catch (IOException bogus) {
- throw new RuntimeException(bogus);
- }
- }
-
- TermsEnum getTermsEnum() {
- try {
- return getTermsEnum(data.clone());
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- private CompressedBinaryTermsEnum getTermsEnum(IndexInput input) throws IOException {
- return new CompressedBinaryTermsEnum(input);
- }
-
- class CompressedBinaryTermsEnum extends TermsEnum {
- private long currentOrd = -1;
- // offset to the start of the current block
- private long currentBlockStart;
- private final IndexInput input;
- // delta from currentBlockStart to start of each term
- private final int offsets[] = new int[INTERVAL_COUNT];
- private final byte buffer[] = new byte[2*INTERVAL_COUNT-1];
-
- private final BytesRef term = new BytesRef(maxTermLength);
- private final BytesRef firstTerm = new BytesRef(maxTermLength);
- private final BytesRef scratch = new BytesRef();
-
- CompressedBinaryTermsEnum(IndexInput input) throws IOException {
- this.input = input;
- input.seek(0);
- }
-
- private void readHeader() throws IOException {
- firstTerm.length = input.readVInt();
- input.readBytes(firstTerm.bytes, 0, firstTerm.length);
- input.readBytes(buffer, 0, INTERVAL_COUNT-1);
- if (buffer[0] == -1) {
- readShortAddresses();
- } else {
- readByteAddresses();
- }
- currentBlockStart = input.getFilePointer();
- }
-
- // read single byte addresses: each is delta - 2
- // (shared prefix byte and length > 0 are both implicit)
- private void readByteAddresses() throws IOException {
- int addr = 0;
- for (int i = 1; i < offsets.length; i++) {
- addr += 2 + (buffer[i-1] & 0xFF);
- offsets[i] = addr;
- }
- }
-
- // read double byte addresses: each is delta - 2
- // (shared prefix byte and length > 0 are both implicit)
- private void readShortAddresses() throws IOException {
- input.readBytes(buffer, INTERVAL_COUNT-1, INTERVAL_COUNT);
- int addr = 0;
- for (int i = 1; i < offsets.length; i++) {
- int x = i<<1;
- addr += 2 + ((buffer[x-1] << 8) | (buffer[x] & 0xFF));
- offsets[i] = addr;
- }
- }
-
- // set term to the first term
- private void readFirstTerm() throws IOException {
- term.length = firstTerm.length;
- System.arraycopy(firstTerm.bytes, firstTerm.offset, term.bytes, 0, term.length);
- }
-
- // read term at offset, delta encoded from first term
- private void readTerm(int offset) throws IOException {
- int start = input.readByte() & 0xFF;
- System.arraycopy(firstTerm.bytes, firstTerm.offset, term.bytes, 0, start);
- int suffix = offsets[offset] - offsets[offset-1] - 1;
- input.readBytes(term.bytes, start, suffix);
- term.length = start + suffix;
- }
-
- @Override
- public BytesRef next() throws IOException {
- currentOrd++;
- if (currentOrd >= numValues) {
- return null;
- } else {
- int offset = (int) (currentOrd & INTERVAL_MASK);
- if (offset == 0) {
- // switch to next block
- readHeader();
- readFirstTerm();
- } else {
- readTerm(offset);
- }
- return term;
- }
- }
-
- // binary search reverse index to find smaller
- // range of blocks to search
- long binarySearchIndex(BytesRef text) throws IOException {
- long low = 0;
- long high = numReverseIndexValues - 1;
- while (low <= high) {
- long mid = (low + high) >>> 1;
- reverseTerms.fill(scratch, reverseAddresses.get(mid));
- int cmp = scratch.compareTo(text);
-
- if (cmp < 0) {
- low = mid + 1;
- } else if (cmp > 0) {
- high = mid - 1;
- } else {
- return mid;
- }
- }
- return high;
- }
-
- // binary search against first term in block range
- // to find term's block
- long binarySearchBlock(BytesRef text, long low, long high) throws IOException {
- while (low <= high) {
- long mid = (low + high) >>> 1;
- input.seek(addresses.get(mid));
- term.length = input.readVInt();
- input.readBytes(term.bytes, 0, term.length);
- int cmp = term.compareTo(text);
-
- if (cmp < 0) {
- low = mid + 1;
- } else if (cmp > 0) {
- high = mid - 1;
- } else {
- return mid;
- }
- }
- return high;
- }
-
- @Override
- public SeekStatus seekCeil(BytesRef text) throws IOException {
- // locate block: narrow to block range with index, then search blocks
- final long block;
- long indexPos = binarySearchIndex(text);
- if (indexPos < 0) {
- block = 0;
- } else {
- long low = indexPos << BLOCK_INTERVAL_SHIFT;
- long high = Math.min(numIndexValues - 1, low + BLOCK_INTERVAL_MASK);
- block = Math.max(low, binarySearchBlock(text, low, high));
- }
-
- // position before block, then scan to term.
- input.seek(addresses.get(block));
- currentOrd = (block << INTERVAL_SHIFT) - 1;
-
- while (next() != null) {
- int cmp = term.compareTo(text);
- if (cmp == 0) {
- return SeekStatus.FOUND;
- } else if (cmp > 0) {
- return SeekStatus.NOT_FOUND;
- }
- }
- return SeekStatus.END;
- }
-
- @Override
- public void seekExact(long ord) throws IOException {
- long block = ord >>> INTERVAL_SHIFT;
- if (block != currentOrd >>> INTERVAL_SHIFT) {
- // switch to different block
- input.seek(addresses.get(block));
- readHeader();
- }
-
- currentOrd = ord;
-
- int offset = (int) (ord & INTERVAL_MASK);
- if (offset == 0) {
- readFirstTerm();
- } else {
- input.seek(currentBlockStart + offsets[offset-1]);
- readTerm(offset);
- }
- }
-
- @Override
- public BytesRef term() throws IOException {
- return term;
- }
-
- @Override
- public long ord() throws IOException {
- return currentOrd;
- }
-
- @Override
- public int docFreq() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long totalTermFreq() throws IOException {
- return -1;
- }
-
- @Override
- public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- }
- }
-}
[41/50] [abbrv] lucene-solr:apiv2: SOLR-9381: Snitch for freedisk
uses '/' instead of 'coreRootDirectory'
Posted by no...@apache.org.
SOLR-9381: Snitch for freedisk uses '/' instead of 'coreRootDirectory'
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c56d8323
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c56d8323
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c56d8323
Branch: refs/heads/apiv2
Commit: c56d832349a32fc3f975d4a41a52e7348cc88111
Parents: d9c0f2c
Author: Noble Paul <no...@apache.org>
Authored: Fri Sep 2 17:53:02 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Fri Sep 2 17:53:02 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +++
.../java/org/apache/solr/cloud/rule/ImplicitSnitch.java | 10 +++++-----
.../src/test/org/apache/solr/cloud/rule/RulesTest.java | 7 ++++---
3 files changed, 12 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c56d8323/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3f1b8a5..3b220d2 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -107,6 +107,9 @@ Bug Fixes
* SOLR-9444: Fix path usage for cloud backup/restore. (Hrishikesh Gadre, Uwe Schindler, Varun Thacker)
+* SOLR-9381: Snitch for freedisk uses '/' instead of 'coreRootDirectory' (Tim Owen, noble)
+
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c56d8323/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
index ac1d7ad..09f8e2c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
+++ b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
@@ -20,7 +20,7 @@ import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.InetAddress;
import java.nio.file.Files;
-import java.nio.file.Paths;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -100,21 +100,21 @@ public class ImplicitSnitch extends Snitch implements CoreAdminHandler.Invocable
}
}
- static long getUsableSpaceInGB() throws IOException {
- long space = Files.getFileStore(Paths.get("/")).getUsableSpace();
+ static long getUsableSpaceInGB(Path path) throws IOException {
+ long space = Files.getFileStore(path).getUsableSpace();
long spaceInGB = space / 1024 / 1024 / 1024;
return spaceInGB;
}
public Map<String, Object> invoke(SolrQueryRequest req) {
Map<String, Object> result = new HashMap<>();
+ CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName());
if (req.getParams().getInt(CORES, -1) == 1) {
- CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName());
result.put(CORES, cc.getCoreNames().size());
}
if (req.getParams().getInt(DISK, -1) == 1) {
try {
- final long spaceInGB = getUsableSpaceInGB();
+ final long spaceInGB = getUsableSpaceInGB(cc.getCoreRootDirectory());
result.put(DISK, spaceInGB);
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c56d8323/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java b/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
index f23d475..83f02b1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
@@ -18,6 +18,7 @@ package org.apache.solr.cloud.rule;
import java.lang.invoke.MethodHandles;
+import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
@@ -52,7 +53,7 @@ public class RulesTest extends AbstractFullDistribZkTestBase {
@ShardsFixed(num = 5)
public void doIntegrationTest() throws Exception {
final long minGB = (random().nextBoolean() ? 1 : 0);
- assumeTrue("doIntegrationTest needs minGB="+minGB+" usable disk space", ImplicitSnitch.getUsableSpaceInGB() > minGB);
+ assumeTrue("doIntegrationTest needs minGB="+minGB+" usable disk space", ImplicitSnitch.getUsableSpaceInGB(Paths.get("/")) > minGB);
String rulesColl = "rulesColl";
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
CollectionAdminResponse rsp;
@@ -208,8 +209,8 @@ public class RulesTest extends AbstractFullDistribZkTestBase {
public void testModifyColl() throws Exception {
final long minGB1 = (random().nextBoolean() ? 1 : 0);
final long minGB2 = 5;
- assumeTrue("testModifyColl needs minGB1="+minGB1+" usable disk space", ImplicitSnitch.getUsableSpaceInGB() > minGB1);
- assumeTrue("testModifyColl needs minGB2="+minGB2+" usable disk space", ImplicitSnitch.getUsableSpaceInGB() > minGB2);
+ assumeTrue("testModifyColl needs minGB1="+minGB1+" usable disk space", ImplicitSnitch.getUsableSpaceInGB(Paths.get("/")) > minGB1);
+ assumeTrue("testModifyColl needs minGB2="+minGB2+" usable disk space", ImplicitSnitch.getUsableSpaceInGB(Paths.get("/")) > minGB2);
String rulesColl = "modifyColl";
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
CollectionAdminResponse rsp;
[44/50] [abbrv] lucene-solr:apiv2: SOLR-9460: Disable test that does
not work with Windows
Posted by no...@apache.org.
SOLR-9460: Disable test that does not work with Windows
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/62f8b8d0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/62f8b8d0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/62f8b8d0
Branch: refs/heads/apiv2
Commit: 62f8b8d02a3060547105e7a24e680fac46e1bf39
Parents: ff69d14
Author: Uwe Schindler <us...@apache.org>
Authored: Sat Sep 3 10:48:01 2016 +0200
Committer: Uwe Schindler <us...@apache.org>
Committed: Sat Sep 3 10:48:01 2016 +0200
----------------------------------------------------------------------
.../apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java | 3 +++
1 file changed, 3 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/62f8b8d0/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
index 71107ee..3727620 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithSecureImpersonation.java
@@ -25,6 +25,7 @@ import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
+import org.apache.lucene.util.Constants;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrRequest;
@@ -92,6 +93,8 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
@BeforeClass
public static void startup() throws Exception {
+ assumeFalse("Hadoop does not work on Windows", Constants.WINDOWS);
+
System.setProperty("authenticationPlugin", HttpParamDelegationTokenPlugin.class.getName());
System.setProperty(KerberosPlugin.DELEGATION_TOKEN_ENABLED, "true");
[15/50] [abbrv] lucene-solr:apiv2: LUCENE-7427: in
DocumentsWriterDeleteQueue remove unused private members and tweak javadoc.
Posted by no...@apache.org.
LUCENE-7427: in DocumentsWriterDeleteQueue remove unused private members and tweak javadoc.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4abbdbe7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4abbdbe7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4abbdbe7
Branch: refs/heads/apiv2
Commit: 4abbdbe78a3eb7a9f0d77851d5a0c4380d6b2dbd
Parents: 646b6bf
Author: Christine Poerschke <cp...@apache.org>
Authored: Fri Aug 26 13:44:25 2016 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Fri Aug 26 13:44:25 2016 +0100
----------------------------------------------------------------------
.../lucene/index/DocumentsWriterDeleteQueue.java | 16 ++--------------
1 file changed, 2 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4abbdbe7/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java
index dac2e4c..db0e571 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java
@@ -18,7 +18,6 @@ package org.apache.lucene.index;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.index.DocValuesUpdate.BinaryDocValuesUpdate;
@@ -32,7 +31,8 @@ import org.apache.lucene.util.BytesRef;
* queue. In contrast to other queue implementation we only maintain the
* tail of the queue. A delete queue is always used in a context of a set of
* DWPTs and a global delete pool. Each of the DWPT and the global pool need to
- * maintain their 'own' head of the queue (as a DeleteSlice instance per DWPT).
+ * maintain their 'own' head of the queue (as a DeleteSlice instance per
+ * {@link DocumentsWriterPerThread}).
* The difference between the DWPT and the global pool is that the DWPT starts
* maintaining a head once it has added its first document since for its segments
* private deletes only the deletes after that document are relevant. The global
@@ -71,10 +71,6 @@ final class DocumentsWriterDeleteQueue implements Accountable {
// the current end (latest delete operation) in the delete queue:
private volatile Node<?> tail;
-
- @SuppressWarnings("rawtypes")
- private static final AtomicReferenceFieldUpdater<DocumentsWriterDeleteQueue,Node> tailUpdater = AtomicReferenceFieldUpdater
- .newUpdater(DocumentsWriterDeleteQueue.class, Node.class, "tail");
/** Used to record deletes against all prior (already written to disk) segments. Whenever any segment flushes, we bundle up this set of
* deletes and insert into the buffered updates stream before the newly flushed segment(s). */
@@ -322,17 +318,9 @@ final class DocumentsWriterDeleteQueue implements Accountable {
this.item = item;
}
- @SuppressWarnings("rawtypes")
- static final AtomicReferenceFieldUpdater<Node,Node> nextUpdater = AtomicReferenceFieldUpdater
- .newUpdater(Node.class, Node.class, "next");
-
void apply(BufferedUpdates bufferedDeletes, int docIDUpto) {
throw new IllegalStateException("sentinel item must never be applied");
}
-
- boolean casNext(Node<?> cmp, Node<?> val) {
- return nextUpdater.compareAndSet(this, cmp, val);
- }
}
private static final class TermNode extends Node<Term> {
[36/50] [abbrv] lucene-solr:apiv2: SOLR-9142: rename FFPByHashNumeric
to FFPByHashDV as it's not just for numerics anymore
Posted by no...@apache.org.
SOLR-9142: rename FFPByHashNumeric to FFPByHashDV as it's not just for numerics anymore
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6a4184c6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6a4184c6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6a4184c6
Branch: refs/heads/apiv2
Commit: 6a4184c6742e4ef3764bfc2184015af6b95d31bb
Parents: 7b5df8a
Author: David Smiley <ds...@apache.org>
Authored: Wed Aug 31 16:56:21 2016 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Wed Aug 31 16:56:21 2016 -0400
----------------------------------------------------------------------
.../apache/solr/search/facet/FacetField.java | 2 +-
.../facet/FacetFieldProcessorByHashDV.java | 442 ++++++++++++++++++
.../facet/FacetFieldProcessorByHashNumeric.java | 443 -------------------
.../solr/search/facet/TestJsonFacets.java | 6 +-
4 files changed, 446 insertions(+), 447 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a4184c6/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetField.java b/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
index 4d56513..3f8cb0b 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
@@ -126,7 +126,7 @@ public class FacetField extends FacetRequestSorted {
if (mincount > 0 && prefix == null && (ntype != null || method == FacetMethod.DVHASH)) {
// TODO can we auto-pick for strings when term cardinality is much greater than DocSet cardinality?
// or if we don't know cardinality but DocSet size is very small
- return new FacetFieldProcessorByHashNumeric(fcontext, this, sf);
+ return new FacetFieldProcessorByHashDV(fcontext, this, sf);
} else if (ntype == null) {
// single valued string...
return new FacetFieldProcessorByArrayDV(fcontext, this, sf);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a4184c6/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashDV.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashDV.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashDV.java
new file mode 100644
index 0000000..fb93417
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashDV.java
@@ -0,0 +1,442 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search.facet;
+
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.function.IntFunction;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.util.BitUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LongValues;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.DocSetUtil;
+
+/**
+ * Facets numbers into a hash table. The number is either a raw numeric DocValues value, or
+ * a term global ordinal integer.
+ * Limitations:
+ * <ul>
+ * <li>doesn't handle multiValued, but could easily be added</li>
+ * <li>doesn't handle prefix, but could easily be added</li>
+ * <li>doesn't handle mincount==0 -- you're better off with an array alg</li>
+ * </ul>
+ */
+class FacetFieldProcessorByHashDV extends FacetFieldProcessor {
+ static int MAXIMUM_STARTING_TABLE_SIZE=1024; // must be a power of two, non-final to support setting by tests
+
+ /** a hash table with long keys (what we're counting) and integer values (counts) */
+ private static class LongCounts {
+
+ static final float LOAD_FACTOR = 0.7f;
+
+ long[] vals;
+ int[] counts; // maintain the counts here since we need them to tell if there was actually a value anyway
+ int[] oldToNewMapping;
+
+ int cardinality;
+ int threshold;
+
+ /** sz must be a power of two */
+ LongCounts(int sz) {
+ vals = new long[sz];
+ counts = new int[sz];
+ threshold = (int) (sz * LOAD_FACTOR);
+ }
+
+ /** Current number of slots in the hash table */
+ int numSlots() {
+ return vals.length;
+ }
+
+ private int hash(long val) {
+ // For floats: exponent bits start at bit 23 for single precision,
+ // and bit 52 for double precision.
+ // Many values will only have significant bits just to the right of that,
+ // and the leftmost bits will all be zero.
+
+ // For now, lets just settle to get first 8 significant mantissa bits of double or float in the lowest bits of our hash
+ // The upper bits of our hash will be irrelevant.
+ int h = (int) (val + (val >>> 44) + (val >>> 15));
+ return h;
+ }
+
+ /** returns the slot */
+ int add(long val) {
+ if (cardinality >= threshold) {
+ rehash();
+ }
+
+ int h = hash(val);
+ for (int slot = h & (vals.length-1); ;slot = (slot + ((h>>7)|1)) & (vals.length-1)) {
+ int count = counts[slot];
+ if (count == 0) {
+ counts[slot] = 1;
+ vals[slot] = val;
+ cardinality++;
+ return slot;
+ } else if (vals[slot] == val) {
+ // val is already in the set
+ counts[slot] = count + 1;
+ return slot;
+ }
+ }
+ }
+
+ protected void rehash() {
+ long[] oldVals = vals;
+ int[] oldCounts = counts; // after retrieving the count, this array is reused as a mapping to new array
+ int newCapacity = vals.length << 1;
+ vals = new long[newCapacity];
+ counts = new int[newCapacity];
+ threshold = (int) (newCapacity * LOAD_FACTOR);
+
+ for (int i=0; i<oldVals.length; i++) {
+ int count = oldCounts[i];
+ if (count == 0) {
+ oldCounts[i] = -1;
+ continue;
+ }
+
+ long val = oldVals[i];
+
+ int h = hash(val);
+ int slot = h & (vals.length-1);
+ while (counts[slot] != 0) {
+ slot = (slot + ((h>>7)|1)) & (vals.length-1);
+ }
+ counts[slot] = count;
+ vals[slot] = val;
+ oldCounts[i] = slot;
+ }
+
+ oldToNewMapping = oldCounts;
+ }
+
+ int cardinality() {
+ return cardinality;
+ }
+
+ }
+
+ /** A hack instance of Calc for Term ordinals in DocValues. */
+ // TODO consider making FacetRangeProcessor.Calc facet top level; then less of a hack?
+ private class TermOrdCalc extends FacetRangeProcessor.Calc {
+
+ IntFunction<BytesRef> lookupOrdFunction; // set in collectDocs()!
+
+ TermOrdCalc() throws IOException {
+ super(sf);
+ }
+
+ @Override
+ public long bitsToSortableBits(long globalOrd) {
+ return globalOrd;
+ }
+
+ /** To be returned in "buckets"/"val" */
+ @Override
+ public Comparable bitsToValue(long globalOrd) {
+ BytesRef bytesRef = lookupOrdFunction.apply((int) globalOrd);
+ // note FacetFieldProcessorByArray.findTopSlots also calls SchemaFieldType.toObject
+ return sf.getType().toObject(sf, bytesRef).toString();
+ }
+
+ @Override
+ public String formatValue(Comparable val) {
+ return (String) val;
+ }
+
+ @Override
+ protected Comparable parseStr(String rawval) throws ParseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected Comparable parseAndAddGap(Comparable value, String gap) throws ParseException {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ FacetRangeProcessor.Calc calc;
+ LongCounts table;
+ int allBucketsSlot = -1;
+
+ FacetFieldProcessorByHashDV(FacetContext fcontext, FacetField freq, SchemaField sf) {
+ super(fcontext, freq, sf);
+ if (freq.mincount == 0) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ getClass()+" doesn't support mincount=0");
+ }
+ if (freq.prefix != null) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ getClass()+" doesn't support prefix"); // yet, but it could
+ }
+ FieldInfo fieldInfo = fcontext.searcher.getLeafReader().getFieldInfos().fieldInfo(sf.getName());
+ if (fieldInfo != null &&
+ fieldInfo.getDocValuesType() != DocValuesType.NUMERIC &&
+ fieldInfo.getDocValuesType() != DocValuesType.SORTED) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ getClass()+" only support single valued number/string with docValues");
+ }
+ }
+
+ @Override
+ public void process() throws IOException {
+ super.process();
+ response = calcFacets();
+ table = null;//gc
+ }
+
+ private SimpleOrderedMap<Object> calcFacets() throws IOException {
+
+ if (sf.getType().getNumericType() != null) {
+ calc = FacetRangeProcessor.getNumericCalc(sf);
+ } else {
+ calc = new TermOrdCalc(); // kind of a hack
+ }
+
+ // TODO: Use the number of indexed terms, if present, as an estimate!
+ // Even for NumericDocValues, we could check for a terms index for an estimate.
+ // Our estimation should aim high to avoid expensive rehashes.
+
+ int possibleValues = fcontext.base.size();
+ // size smaller tables so that no resize will be necessary
+ int currHashSize = BitUtil.nextHighestPowerOfTwo((int) (possibleValues * (1 / LongCounts.LOAD_FACTOR) + 1));
+ currHashSize = Math.min(currHashSize, MAXIMUM_STARTING_TABLE_SIZE);
+ table = new LongCounts(currHashSize) {
+ @Override
+ protected void rehash() {
+ super.rehash();
+ doRehash(this);
+ oldToNewMapping = null; // allow for gc
+ }
+ };
+
+ // note: these methods/phases align with FacetFieldProcessorByArray's
+
+ createCollectAcc();
+
+ collectDocs();
+
+ return super.findTopSlots(table.numSlots(), table.cardinality(),
+ slotNum -> calc.bitsToValue(table.vals[slotNum]), // getBucketValFromSlotNum
+ val -> calc.formatValue(val)); // getFieldQueryVal
+ }
+
+ private void createCollectAcc() throws IOException {
+ int numSlots = table.numSlots();
+
+ if (freq.allBuckets) {
+ allBucketsSlot = numSlots++;
+ }
+
+ indexOrderAcc = new SlotAcc(fcontext) {
+ @Override
+ public void collect(int doc, int slot) throws IOException {
+ }
+
+ @Override
+ public int compare(int slotA, int slotB) {
+ long s1 = calc.bitsToSortableBits(table.vals[slotA]);
+ long s2 = calc.bitsToSortableBits(table.vals[slotB]);
+ return Long.compare(s1, s2);
+ }
+
+ @Override
+ public Object getValue(int slotNum) throws IOException {
+ return null;
+ }
+
+ @Override
+ public void reset() {
+ }
+
+ @Override
+ public void resize(Resizer resizer) {
+ }
+ };
+
+ countAcc = new CountSlotAcc(fcontext) {
+ @Override
+ public void incrementCount(int slot, int count) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getCount(int slot) {
+ return table.counts[slot];
+ }
+
+ @Override
+ public Object getValue(int slotNum) {
+ return getCount(slotNum);
+ }
+
+ @Override
+ public void reset() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void collect(int doc, int slot) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int compare(int slotA, int slotB) {
+ return Integer.compare( table.counts[slotA], table.counts[slotB] );
+ }
+
+ @Override
+ public void resize(Resizer resizer) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ // we set the countAcc & indexAcc first so generic ones won't be created for us.
+ super.createCollectAcc(fcontext.base.size(), numSlots);
+
+ if (freq.allBuckets) {
+ allBucketsAcc = new SpecialSlotAcc(fcontext, collectAcc, allBucketsSlot, otherAccs, 0);
+ }
+ }
+
+ private void collectDocs() throws IOException {
+ if (calc instanceof TermOrdCalc) { // Strings
+
+ // TODO support SortedSetDocValues
+ SortedDocValues globalDocValues = FieldUtil.getSortedDocValues(fcontext.qcontext, sf, null);
+ ((TermOrdCalc)calc).lookupOrdFunction = globalDocValues::lookupOrd;
+
+ DocSetUtil.collectSortedDocSet(fcontext.base, fcontext.searcher.getIndexReader(), new SimpleCollector() {
+ SortedDocValues docValues = globalDocValues; // this segment/leaf. NN
+ LongValues toGlobal = LongValues.IDENTITY; // this segment to global ordinal. NN
+
+ @Override public boolean needsScores() { return false; }
+
+ @Override
+ protected void doSetNextReader(LeafReaderContext ctx) throws IOException {
+ setNextReaderFirstPhase(ctx);
+ if (globalDocValues instanceof MultiDocValues.MultiSortedDocValues) {
+ MultiDocValues.MultiSortedDocValues multiDocValues = (MultiDocValues.MultiSortedDocValues) globalDocValues;
+ docValues = multiDocValues.values[ctx.ord];
+ toGlobal = multiDocValues.mapping.getGlobalOrds(ctx.ord);
+ }
+ }
+
+ @Override
+ public void collect(int segDoc) throws IOException {
+ long ord = docValues.getOrd(segDoc);
+ if (ord != -1) {
+ long val = toGlobal.get(ord);
+ collectValFirstPhase(segDoc, val);
+ }
+ }
+ });
+
+ } else { // Numeric:
+
+ // TODO support SortedNumericDocValues
+ DocSetUtil.collectSortedDocSet(fcontext.base, fcontext.searcher.getIndexReader(), new SimpleCollector() {
+ NumericDocValues values = null; //NN
+ Bits docsWithField = null; //NN
+
+ @Override public boolean needsScores() { return false; }
+
+ @Override
+ protected void doSetNextReader(LeafReaderContext ctx) throws IOException {
+ setNextReaderFirstPhase(ctx);
+ values = DocValues.getNumeric(ctx.reader(), sf.getName());
+ docsWithField = DocValues.getDocsWithField(ctx.reader(), sf.getName());
+ }
+
+ @Override
+ public void collect(int segDoc) throws IOException {
+ long val = values.get(segDoc);
+ if (val != 0 || docsWithField.get(segDoc)) {
+ collectValFirstPhase(segDoc, val);
+ }
+ }
+ });
+ }
+ }
+
+ private void collectValFirstPhase(int segDoc, long val) throws IOException {
+ int slot = table.add(val); // this can trigger a rehash
+
+ // Our countAcc is virtual, so this is not needed:
+ // countAcc.incrementCount(slot, 1);
+
+ super.collectFirstPhase(segDoc, slot);
+ }
+
+ private void doRehash(LongCounts table) {
+ if (collectAcc == null && allBucketsAcc == null) return;
+
+ // Our "count" acc is backed by the hash table and will already be rehashed
+ // otherAccs don't need to be rehashed
+
+ int newTableSize = table.numSlots();
+ int numSlots = newTableSize;
+ final int oldAllBucketsSlot = allBucketsSlot;
+ if (oldAllBucketsSlot >= 0) {
+ allBucketsSlot = numSlots++;
+ }
+
+ final int finalNumSlots = numSlots;
+ final int[] mapping = table.oldToNewMapping;
+
+ SlotAcc.Resizer resizer = new SlotAcc.Resizer() {
+ @Override
+ public int getNewSize() {
+ return finalNumSlots;
+ }
+
+ @Override
+ public int getNewSlot(int oldSlot) {
+ if (oldSlot < mapping.length) {
+ return mapping[oldSlot];
+ }
+ if (oldSlot == oldAllBucketsSlot) {
+ return allBucketsSlot;
+ }
+ return -1;
+ }
+ };
+
+ // NOTE: resizing isn't strictly necessary for missing/allBuckets... we could just set the new slot directly
+ if (collectAcc != null) {
+ collectAcc.resize(resizer);
+ }
+ if (allBucketsAcc != null) {
+ allBucketsAcc.resize(resizer);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a4184c6/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
deleted file mode 100644
index 6d5aec5..0000000
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByHashNumeric.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.search.facet;
-
-import java.io.IOException;
-import java.text.ParseException;
-import java.util.function.IntFunction;
-
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.SimpleCollector;
-import org.apache.lucene.util.BitUtil;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LongValues;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocSetUtil;
-
-/**
- * Facets numbers into a hash table. The number is either a raw numeric DocValues value, or
- * a term global ordinal integer.
- * Limitations:
- * <ul>
- * <li>doesn't handle multiValued, but could easily be added</li>
- * <li>doesn't handle prefix, but could easily be added</li>
- * <li>doesn't handle mincount==0 -- you're better off with an array alg</li>
- * </ul>
- */
-// TODO rename: FacetFieldProcessorByHashDV
-class FacetFieldProcessorByHashNumeric extends FacetFieldProcessor {
- static int MAXIMUM_STARTING_TABLE_SIZE=1024; // must be a power of two, non-final to support setting by tests
-
- /** a hash table with long keys (what we're counting) and integer values (counts) */
- private static class LongCounts {
-
- static final float LOAD_FACTOR = 0.7f;
-
- long[] vals;
- int[] counts; // maintain the counts here since we need them to tell if there was actually a value anyway
- int[] oldToNewMapping;
-
- int cardinality;
- int threshold;
-
- /** sz must be a power of two */
- LongCounts(int sz) {
- vals = new long[sz];
- counts = new int[sz];
- threshold = (int) (sz * LOAD_FACTOR);
- }
-
- /** Current number of slots in the hash table */
- int numSlots() {
- return vals.length;
- }
-
- private int hash(long val) {
- // For floats: exponent bits start at bit 23 for single precision,
- // and bit 52 for double precision.
- // Many values will only have significant bits just to the right of that,
- // and the leftmost bits will all be zero.
-
- // For now, lets just settle to get first 8 significant mantissa bits of double or float in the lowest bits of our hash
- // The upper bits of our hash will be irrelevant.
- int h = (int) (val + (val >>> 44) + (val >>> 15));
- return h;
- }
-
- /** returns the slot */
- int add(long val) {
- if (cardinality >= threshold) {
- rehash();
- }
-
- int h = hash(val);
- for (int slot = h & (vals.length-1); ;slot = (slot + ((h>>7)|1)) & (vals.length-1)) {
- int count = counts[slot];
- if (count == 0) {
- counts[slot] = 1;
- vals[slot] = val;
- cardinality++;
- return slot;
- } else if (vals[slot] == val) {
- // val is already in the set
- counts[slot] = count + 1;
- return slot;
- }
- }
- }
-
- protected void rehash() {
- long[] oldVals = vals;
- int[] oldCounts = counts; // after retrieving the count, this array is reused as a mapping to new array
- int newCapacity = vals.length << 1;
- vals = new long[newCapacity];
- counts = new int[newCapacity];
- threshold = (int) (newCapacity * LOAD_FACTOR);
-
- for (int i=0; i<oldVals.length; i++) {
- int count = oldCounts[i];
- if (count == 0) {
- oldCounts[i] = -1;
- continue;
- }
-
- long val = oldVals[i];
-
- int h = hash(val);
- int slot = h & (vals.length-1);
- while (counts[slot] != 0) {
- slot = (slot + ((h>>7)|1)) & (vals.length-1);
- }
- counts[slot] = count;
- vals[slot] = val;
- oldCounts[i] = slot;
- }
-
- oldToNewMapping = oldCounts;
- }
-
- int cardinality() {
- return cardinality;
- }
-
- }
-
- /** A hack instance of Calc for Term ordinals in DocValues. */
- // TODO consider making FacetRangeProcessor.Calc facet top level; then less of a hack?
- private class TermOrdCalc extends FacetRangeProcessor.Calc {
-
- IntFunction<BytesRef> lookupOrdFunction; // set in collectDocs()!
-
- TermOrdCalc() throws IOException {
- super(sf);
- }
-
- @Override
- public long bitsToSortableBits(long globalOrd) {
- return globalOrd;
- }
-
- /** To be returned in "buckets"/"val" */
- @Override
- public Comparable bitsToValue(long globalOrd) {
- BytesRef bytesRef = lookupOrdFunction.apply((int) globalOrd);
- // note FacetFieldProcessorByArray.findTopSlots also calls SchemaFieldType.toObject
- return sf.getType().toObject(sf, bytesRef).toString();
- }
-
- @Override
- public String formatValue(Comparable val) {
- return (String) val;
- }
-
- @Override
- protected Comparable parseStr(String rawval) throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- protected Comparable parseAndAddGap(Comparable value, String gap) throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- }
-
- FacetRangeProcessor.Calc calc;
- LongCounts table;
- int allBucketsSlot = -1;
-
- FacetFieldProcessorByHashNumeric(FacetContext fcontext, FacetField freq, SchemaField sf) {
- super(fcontext, freq, sf);
- if (freq.mincount == 0) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- getClass()+" doesn't support mincount=0");
- }
- if (freq.prefix != null) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- getClass()+" doesn't support prefix"); // yet, but it could
- }
- FieldInfo fieldInfo = fcontext.searcher.getLeafReader().getFieldInfos().fieldInfo(sf.getName());
- if (fieldInfo != null &&
- fieldInfo.getDocValuesType() != DocValuesType.NUMERIC &&
- fieldInfo.getDocValuesType() != DocValuesType.SORTED) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- getClass()+" only support single valued number/string with docValues");
- }
- }
-
- @Override
- public void process() throws IOException {
- super.process();
- response = calcFacets();
- table = null;//gc
- }
-
- private SimpleOrderedMap<Object> calcFacets() throws IOException {
-
- if (sf.getType().getNumericType() != null) {
- calc = FacetRangeProcessor.getNumericCalc(sf);
- } else {
- calc = new TermOrdCalc(); // kind of a hack
- }
-
- // TODO: Use the number of indexed terms, if present, as an estimate!
- // Even for NumericDocValues, we could check for a terms index for an estimate.
- // Our estimation should aim high to avoid expensive rehashes.
-
- int possibleValues = fcontext.base.size();
- // size smaller tables so that no resize will be necessary
- int currHashSize = BitUtil.nextHighestPowerOfTwo((int) (possibleValues * (1 / LongCounts.LOAD_FACTOR) + 1));
- currHashSize = Math.min(currHashSize, MAXIMUM_STARTING_TABLE_SIZE);
- table = new LongCounts(currHashSize) {
- @Override
- protected void rehash() {
- super.rehash();
- doRehash(this);
- oldToNewMapping = null; // allow for gc
- }
- };
-
- // note: these methods/phases align with FacetFieldProcessorByArray's
-
- createCollectAcc();
-
- collectDocs();
-
- return super.findTopSlots(table.numSlots(), table.cardinality(),
- slotNum -> calc.bitsToValue(table.vals[slotNum]), // getBucketValFromSlotNum
- val -> calc.formatValue(val)); // getFieldQueryVal
- }
-
- private void createCollectAcc() throws IOException {
- int numSlots = table.numSlots();
-
- if (freq.allBuckets) {
- allBucketsSlot = numSlots++;
- }
-
- indexOrderAcc = new SlotAcc(fcontext) {
- @Override
- public void collect(int doc, int slot) throws IOException {
- }
-
- @Override
- public int compare(int slotA, int slotB) {
- long s1 = calc.bitsToSortableBits(table.vals[slotA]);
- long s2 = calc.bitsToSortableBits(table.vals[slotB]);
- return Long.compare(s1, s2);
- }
-
- @Override
- public Object getValue(int slotNum) throws IOException {
- return null;
- }
-
- @Override
- public void reset() {
- }
-
- @Override
- public void resize(Resizer resizer) {
- }
- };
-
- countAcc = new CountSlotAcc(fcontext) {
- @Override
- public void incrementCount(int slot, int count) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int getCount(int slot) {
- return table.counts[slot];
- }
-
- @Override
- public Object getValue(int slotNum) {
- return getCount(slotNum);
- }
-
- @Override
- public void reset() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void collect(int doc, int slot) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int compare(int slotA, int slotB) {
- return Integer.compare( table.counts[slotA], table.counts[slotB] );
- }
-
- @Override
- public void resize(Resizer resizer) {
- throw new UnsupportedOperationException();
- }
- };
-
- // we set the countAcc & indexAcc first so generic ones won't be created for us.
- super.createCollectAcc(fcontext.base.size(), numSlots);
-
- if (freq.allBuckets) {
- allBucketsAcc = new SpecialSlotAcc(fcontext, collectAcc, allBucketsSlot, otherAccs, 0);
- }
- }
-
- private void collectDocs() throws IOException {
- if (calc instanceof TermOrdCalc) { // Strings
-
- // TODO support SortedSetDocValues
- SortedDocValues globalDocValues = FieldUtil.getSortedDocValues(fcontext.qcontext, sf, null);
- ((TermOrdCalc)calc).lookupOrdFunction = globalDocValues::lookupOrd;
-
- DocSetUtil.collectSortedDocSet(fcontext.base, fcontext.searcher.getIndexReader(), new SimpleCollector() {
- SortedDocValues docValues = globalDocValues; // this segment/leaf. NN
- LongValues toGlobal = LongValues.IDENTITY; // this segment to global ordinal. NN
-
- @Override public boolean needsScores() { return false; }
-
- @Override
- protected void doSetNextReader(LeafReaderContext ctx) throws IOException {
- setNextReaderFirstPhase(ctx);
- if (globalDocValues instanceof MultiDocValues.MultiSortedDocValues) {
- MultiDocValues.MultiSortedDocValues multiDocValues = (MultiDocValues.MultiSortedDocValues) globalDocValues;
- docValues = multiDocValues.values[ctx.ord];
- toGlobal = multiDocValues.mapping.getGlobalOrds(ctx.ord);
- }
- }
-
- @Override
- public void collect(int segDoc) throws IOException {
- long ord = docValues.getOrd(segDoc);
- if (ord != -1) {
- long val = toGlobal.get(ord);
- collectValFirstPhase(segDoc, val);
- }
- }
- });
-
- } else { // Numeric:
-
- // TODO support SortedNumericDocValues
- DocSetUtil.collectSortedDocSet(fcontext.base, fcontext.searcher.getIndexReader(), new SimpleCollector() {
- NumericDocValues values = null; //NN
- Bits docsWithField = null; //NN
-
- @Override public boolean needsScores() { return false; }
-
- @Override
- protected void doSetNextReader(LeafReaderContext ctx) throws IOException {
- setNextReaderFirstPhase(ctx);
- values = DocValues.getNumeric(ctx.reader(), sf.getName());
- docsWithField = DocValues.getDocsWithField(ctx.reader(), sf.getName());
- }
-
- @Override
- public void collect(int segDoc) throws IOException {
- long val = values.get(segDoc);
- if (val != 0 || docsWithField.get(segDoc)) {
- collectValFirstPhase(segDoc, val);
- }
- }
- });
- }
- }
-
- private void collectValFirstPhase(int segDoc, long val) throws IOException {
- int slot = table.add(val); // this can trigger a rehash
-
- // Our countAcc is virtual, so this is not needed:
- // countAcc.incrementCount(slot, 1);
-
- super.collectFirstPhase(segDoc, slot);
- }
-
- private void doRehash(LongCounts table) {
- if (collectAcc == null && allBucketsAcc == null) return;
-
- // Our "count" acc is backed by the hash table and will already be rehashed
- // otherAccs don't need to be rehashed
-
- int newTableSize = table.numSlots();
- int numSlots = newTableSize;
- final int oldAllBucketsSlot = allBucketsSlot;
- if (oldAllBucketsSlot >= 0) {
- allBucketsSlot = numSlots++;
- }
-
- final int finalNumSlots = numSlots;
- final int[] mapping = table.oldToNewMapping;
-
- SlotAcc.Resizer resizer = new SlotAcc.Resizer() {
- @Override
- public int getNewSize() {
- return finalNumSlots;
- }
-
- @Override
- public int getNewSlot(int oldSlot) {
- if (oldSlot < mapping.length) {
- return mapping[oldSlot];
- }
- if (oldSlot == oldAllBucketsSlot) {
- return allBucketsSlot;
- }
- return -1;
- }
- };
-
- // NOTE: resizing isn't strictly necessary for missing/allBuckets... we could just set the new slot directly
- if (collectAcc != null) {
- collectAcc.resize(resizer);
- }
- if (allBucketsAcc != null) {
- allBucketsAcc.resize(resizer);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a4184c6/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
index 6ab25bb..c83d308 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
@@ -50,8 +50,8 @@ public class TestJsonFacets extends SolrTestCaseHS {
public static void beforeTests() throws Exception {
JSONTestUtil.failRepeatedKeys = true;
- origTableSize = FacetFieldProcessorByHashNumeric.MAXIMUM_STARTING_TABLE_SIZE;
- FacetFieldProcessorByHashNumeric.MAXIMUM_STARTING_TABLE_SIZE=2; // stress test resizing
+ origTableSize = FacetFieldProcessorByHashDV.MAXIMUM_STARTING_TABLE_SIZE;
+ FacetFieldProcessorByHashDV.MAXIMUM_STARTING_TABLE_SIZE=2; // stress test resizing
origDefaultFacetMethod = FacetField.FacetMethod.DEFAULT_METHOD;
// instead of the following, see the constructor
@@ -69,7 +69,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
@AfterClass
public static void afterTests() throws Exception {
JSONTestUtil.failRepeatedKeys = false;
- FacetFieldProcessorByHashNumeric.MAXIMUM_STARTING_TABLE_SIZE=origTableSize;
+ FacetFieldProcessorByHashDV.MAXIMUM_STARTING_TABLE_SIZE=origTableSize;
FacetField.FacetMethod.DEFAULT_METHOD = origDefaultFacetMethod;
if (servers != null) {
servers.stop();
[16/50] [abbrv] lucene-solr:apiv2: SOLR-9436: remove no longer used
acceptsDocsOutOfOrder methods (LUCENE-6179 removed out-of-order scoring)
Posted by no...@apache.org.
SOLR-9436: remove no longer used acceptsDocsOutOfOrder methods (LUCENE-6179 removed out-of-order scoring)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7f3d8652
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7f3d8652
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7f3d8652
Branch: refs/heads/apiv2
Commit: 7f3d86524d0fc5cdf5a517eb266b68b49db81be0
Parents: 4abbdbe
Author: Christine Poerschke <cp...@apache.org>
Authored: Fri Aug 26 13:45:36 2016 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Fri Aug 26 13:45:36 2016 +0100
----------------------------------------------------------------------
.../org/apache/solr/search/CollapsingQParserPlugin.java | 10 ----------
.../test/org/apache/solr/search/TestRankQueryPlugin.java | 4 ----
2 files changed, 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7f3d8652/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index b9d292e..896387e 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -909,11 +909,6 @@ public class CollapsingQParserPlugin extends QParserPlugin {
}
}
- public boolean acceptsDocsOutOfOrder() {
- //Documents must be sent in order to this collector.
- return false;
- }
-
public void setScorer(Scorer scorer) {
this.collapseStrategy.setScorer(scorer);
}
@@ -1074,11 +1069,6 @@ public class CollapsingQParserPlugin extends QParserPlugin {
}
}
- public boolean acceptsDocsOutOfOrder() {
- //Documents must be sent in order to this collector.
- return false;
- }
-
public void setScorer(Scorer scorer) {
this.collapseStrategy.setScorer(scorer);
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7f3d8652/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
index d895697..e3afed3 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
@@ -691,10 +691,6 @@ public class TestRankQueryPlugin extends QParserPlugin {
@Override
public void setScorer(Scorer scorer) throws IOException {}
- public boolean acceptsDocsOutOfOrder() {
- return false;
- }
-
public void collect(int doc) {
list.add(new ScoreDoc(doc+base, (float)values.get(doc)));
}
[48/50] [abbrv] lucene-solr:apiv2: Merge remote-tracking branch
'origin/master'
Posted by no...@apache.org.
Merge remote-tracking branch 'origin/master'
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ecbb588f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ecbb588f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ecbb588f
Branch: refs/heads/apiv2
Commit: ecbb588f978152ba5871d1463ec1d692103794e8
Parents: 6ca9aeb 9ac5c1c
Author: Noble Paul <no...@apache.org>
Authored: Tue Sep 6 11:39:32 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Tue Sep 6 11:39:32 2016 +0530
----------------------------------------------------------------------
.../org/apache/lucene/index/LogMergePolicy.java | 6 +-
solr/CHANGES.txt | 32 +-
.../java/org/apache/solr/cloud/BackupCmd.java | 6 +-
.../org/apache/solr/cloud/DeleteNodeCmd.java | 15 +-
.../org/apache/solr/cloud/DeleteReplicaCmd.java | 144 +++++-
.../org/apache/solr/cloud/DeleteShardCmd.java | 92 +++-
.../org/apache/solr/cloud/ReplaceNodeCmd.java | 5 +-
.../java/org/apache/solr/cloud/RestoreCmd.java | 6 +-
.../org/apache/solr/cloud/SplitShardCmd.java | 24 +-
.../apache/solr/cloud/rule/ImplicitSnitch.java | 10 +-
.../org/apache/solr/core/CoreContainer.java | 4 +-
.../apache/solr/core/backup/BackupManager.java | 34 +-
.../backup/repository/BackupRepository.java | 14 +-
.../backup/repository/HdfsBackupRepository.java | 29 +-
.../repository/LocalFileSystemRepository.java | 36 +-
.../apache/solr/handler/ReplicationHandler.java | 12 +-
.../org/apache/solr/handler/RestoreCore.java | 6 +-
.../org/apache/solr/handler/SnapShooter.java | 11 +-
.../solr/handler/admin/CollectionsHandler.java | 14 +-
.../solr/handler/admin/CoreAdminOperation.java | 7 +-
.../solr/handler/component/FacetComponent.java | 31 +-
.../org/apache/solr/request/SimpleFacets.java | 71 ++-
.../java/org/apache/solr/search/DocSetUtil.java | 33 ++
.../apache/solr/search/SolrIndexSearcher.java | 5 +
.../apache/solr/search/facet/FacetField.java | 70 ++-
.../solr/search/facet/FacetFieldProcessor.java | 150 ++++++-
.../facet/FacetFieldProcessorByArray.java | 144 +-----
.../facet/FacetFieldProcessorByHashDV.java | 442 +++++++++++++++++++
.../facet/FacetFieldProcessorByHashNumeric.java | 439 ------------------
.../org/apache/solr/search/facet/SlotAcc.java | 15 +-
.../processor/DistributedUpdateProcessor.java | 22 +-
.../org/apache/solr/TestRandomFaceting.java | 261 +++++++++--
.../apache/solr/cloud/DeleteReplicaTest.java | 139 +++++-
.../cloud/TestLocalFSCloudBackupRestore.java | 10 +-
.../TestSolrCloudWithSecureImpersonation.java | 25 +-
.../org/apache/solr/cloud/rule/RulesTest.java | 7 +-
.../DistributedFacetExistsSmallTest.java | 236 ++++++++++
.../apache/solr/request/SimpleFacetsTest.java | 286 +++++++++++-
.../solr/search/facet/TestJsonFacets.java | 53 ++-
.../solrj/request/CollectionAdminRequest.java | 12 +-
.../common/params/CollectionAdminParams.java | 9 +-
.../apache/solr/common/params/FacetParams.java | 8 +
.../solr/common/util/JsonRecordReader.java | 27 +-
43 files changed, 2145 insertions(+), 857 deletions(-)
----------------------------------------------------------------------
[22/50] [abbrv] lucene-solr:apiv2: SOLR-9430: Fix locale lookup in
DIH to use BCP47 language tags to be consistent with other
places in Solr. Language names still work for backwards compatibility
Posted by no...@apache.org.
SOLR-9430: Fix locale lookup in DIH <propertyWriter/> to use BCP47 language tags to be consistent with other places in Solr. Language names still work for backwards compatibility
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e99d9706
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e99d9706
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e99d9706
Branch: refs/heads/apiv2
Commit: e99d9706741edb27b5ba373ad6fd84150a076705
Parents: 7d2f42e
Author: Uwe Schindler <us...@apache.org>
Authored: Sat Aug 27 08:38:42 2016 +0200
Committer: Uwe Schindler <us...@apache.org>
Committed: Sat Aug 27 08:38:42 2016 +0200
----------------------------------------------------------------------
solr/CHANGES.txt | 4 +++
.../dataimport/SimplePropertiesWriter.java | 33 +++++++++++++-------
2 files changed, 26 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e99d9706/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 62c6d5f..824cdae 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -83,6 +83,10 @@ Bug Fixes
* SOLR-9439: Shard split clean up logic for older failed splits is faulty. (shalin)
+* SOLR-9430: Fix locale lookup in DIH <propertyWriter/> to use BCP47 language tags
+ to be consistent with other places in Solr. Language names still work for backwards
+ compatibility. (Uwe Schindler, Boris Steiner)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e99d9706/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
index 92527bb..1ee18ef 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
@@ -30,11 +30,13 @@ import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
+import java.util.IllformedLocaleException;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import org.apache.lucene.util.IOUtils;
+import org.apache.solr.common.util.SuppressForbidden;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrResourceLoader;
import org.slf4j.Logger;
@@ -90,16 +92,7 @@ public class SimplePropertiesWriter extends DIHProperties {
}
findDirectory(dataImporter, params);
if(params.get(LOCALE) != null) {
- String localeStr = params.get(LOCALE);
- for (Locale l : Locale.getAvailableLocales()) {
- if(localeStr.equals(l.getDisplayName(Locale.ROOT))) {
- locale = l;
- break;
- }
- }
- if(locale==null) {
- throw new DataImportHandlerException(SEVERE, "Unsupported locale for PropertWriter: " + localeStr);
- }
+ locale = getLocale(params.get(LOCALE));
} else {
locale = Locale.ROOT;
}
@@ -108,7 +101,25 @@ public class SimplePropertiesWriter extends DIHProperties {
} else {
dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", locale);
}
- }
+ }
+
+ @SuppressForbidden(reason = "Usage of outdated locale parsing with Locale#toString() because of backwards compatibility")
+ private Locale getLocale(String name) {
+ if (name == null) {
+ return Locale.ROOT;
+ }
+ for (final Locale l : Locale.getAvailableLocales()) {
+ if(name.equals(l.toString()) || name.equals(l.getDisplayName(Locale.ROOT))) {
+ return locale;
+ }
+ }
+ try {
+ return new Locale.Builder().setLanguageTag(name).build();
+ } catch (IllformedLocaleException ex) {
+ throw new DataImportHandlerException(SEVERE, "Unsupported locale for PropertyWriter: " + name);
+ }
+ }
+
protected void findDirectory(DataImporter dataImporter, Map<String, String> params) {
if(params.get(DIRECTORY) != null) {
configDir = params.get(DIRECTORY);
[09/50] [abbrv] lucene-solr:apiv2: remove deprecated
IndexWriter.isLocked()
Posted by no...@apache.org.
remove deprecated IndexWriter.isLocked()
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d489b8c0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d489b8c0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d489b8c0
Branch: refs/heads/apiv2
Commit: d489b8c05ddbd9c5b4f0a2a0b20439a9c5b64736
Parents: ada7149
Author: Robert Muir <rm...@apache.org>
Authored: Thu Aug 25 12:34:38 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Thu Aug 25 12:34:38 2016 -0400
----------------------------------------------------------------------
.../org/apache/lucene/index/IndexWriter.java | 18 ------------------
.../src/java/org/apache/solr/core/SolrCore.java | 20 +++++++++++++++++++-
2 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d489b8c0/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 0fb23d9..7abf681 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -4685,24 +4685,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
testPoint("finishStartCommit");
}
- /**
- * Returns <code>true</code> iff the index in the named directory is
- * currently locked.
- * @param directory the directory to check for a lock
- * @throws IOException if there is a low-level IO error
- * @deprecated Use of this method can only lead to race conditions. Try
- * to actually obtain a lock instead.
- */
- @Deprecated
- public static boolean isLocked(Directory directory) throws IOException {
- try {
- directory.obtainLock(WRITE_LOCK_NAME).close();
- return false;
- } catch (LockObtainFailedException failed) {
- return true;
- }
- }
-
/** If {@link DirectoryReader#open(IndexWriter)} has
* been called (ie, this writer is in near real-time
* mode), then after a merge completes, this class can be
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d489b8c0/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 2704e4a..90bcd34 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -549,6 +549,24 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
// protect via synchronized(SolrCore.class)
private static Set<String> dirs = new HashSet<>();
+ /**
+ * Returns <code>true</code> iff the index in the named directory is
+ * currently locked.
+ * @param directory the directory to check for a lock
+ * @throws IOException if there is a low-level IO error
+ * @deprecated Use of this method can only lead to race conditions. Try
+ * to actually obtain a lock instead.
+ */
+ @Deprecated
+ private static boolean isWriterLocked(Directory directory) throws IOException {
+ try {
+ directory.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
+ return false;
+ } catch (LockObtainFailedException failed) {
+ return true;
+ }
+ }
+
void initIndex(boolean reload) throws IOException {
String indexDir = getNewIndexDir();
@@ -564,7 +582,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
final String lockType = getSolrConfig().indexConfig.lockType;
Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT, lockType);
try {
- if (IndexWriter.isLocked(dir)) {
+ if (isWriterLocked(dir)) {
log.error(logid + "Solr index directory '{}' is locked (lockType={}). Throwing exception.",
indexDir, lockType);
throw new LockObtainFailedException
[40/50] [abbrv] lucene-solr:apiv2: SOLR-9444: Fix path usage for
cloud backup/restore Merge branch 'SOLR-9444_fix' of
https://github.com/hgadre/lucene-solr This closes #74
Posted by no...@apache.org.
SOLR-9444: Fix path usage for cloud backup/restore
Merge branch 'SOLR-9444_fix' of https://github.com/hgadre/lucene-solr
This closes #74
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d9c0f2c6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d9c0f2c6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d9c0f2c6
Branch: refs/heads/apiv2
Commit: d9c0f2c6b91bd97d7e17a0b6abf16cb9d0f71b52
Parents: e203c9a e138462
Author: Uwe Schindler <us...@apache.org>
Authored: Fri Sep 2 14:15:09 2016 +0200
Committer: Uwe Schindler <us...@apache.org>
Committed: Fri Sep 2 14:15:09 2016 +0200
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/BackupCmd.java | 6 ++--
.../java/org/apache/solr/cloud/RestoreCmd.java | 6 ++--
.../apache/solr/core/backup/BackupManager.java | 34 +++++++++---------
.../backup/repository/BackupRepository.java | 14 ++++++--
.../backup/repository/HdfsBackupRepository.java | 29 +++++++++++++---
.../repository/LocalFileSystemRepository.java | 36 ++++++++++++--------
.../apache/solr/handler/ReplicationHandler.java | 12 ++++---
.../org/apache/solr/handler/RestoreCore.java | 6 ++--
.../org/apache/solr/handler/SnapShooter.java | 11 +++---
.../solr/handler/admin/CoreAdminOperation.java | 7 ++--
.../cloud/TestLocalFSCloudBackupRestore.java | 10 +++++-
12 files changed, 114 insertions(+), 59 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d9c0f2c6/solr/CHANGES.txt
----------------------------------------------------------------------
diff --cc solr/CHANGES.txt
index b22ea5d,adca63e..3f1b8a5
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@@ -103,8 -103,6 +103,10 @@@ Bug Fixe
* SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands (shalin, noble)
+* SOLR-9319: DELETEREPLICA can accept a 'count' and remove appropriate replicas (Nitin Sharma, noble )
+
++* SOLR-9444: Fix path usage for cloud backup/restore. (Hrishikesh Gadre, Uwe Schindler, Varun Thacker)
++
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d9c0f2c6/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --cc solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
index db68913,da8e767..c0db46e
--- a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
@@@ -30,6 -31,13 +31,13 @@@ public class TestLocalFSCloudBackupRest
configureCluster(NUM_SHARDS)// nodes
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
+
+ boolean whitespacesInPath = random().nextBoolean();
+ if (whitespacesInPath) {
- backupLocation = createTempDir("my backup").toFile().getAbsolutePath();
++ backupLocation = createTempDir("my backup").toAbsolutePath().toString();
+ } else {
- backupLocation = createTempDir("mybackup").toFile().getAbsolutePath();
++ backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
+ }
}
@Override
[37/50] [abbrv] lucene-solr:apiv2: SOLR-9461: DELETENODE,
REPLACENODE should pass down the 'async' param to subcommands
Posted by no...@apache.org.
SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e13f7aea
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e13f7aea
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e13f7aea
Branch: refs/heads/apiv2
Commit: e13f7aeafadb56bbf138213865e0d2bf4cd423b2
Parents: 6a4184c
Author: Noble Paul <no...@apache.org>
Authored: Thu Sep 1 18:03:59 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Thu Sep 1 18:03:59 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/DeleteNodeCmd.java | 16 ++++++++++++----
.../java/org/apache/solr/cloud/ReplaceNodeCmd.java | 8 +++++++-
3 files changed, 21 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e13f7aea/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index cc28449..adca63e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -101,6 +101,8 @@ Bug Fixes
* SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive. (shalin)
+* SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands (shalin, noble)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e13f7aea/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
index b3c5055..0fd001a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
@@ -35,6 +36,7 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -53,24 +55,30 @@ public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
}
List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
- cleanupReplicas(results, state, sourceReplicas, ocmh, node);
+ cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
}
static void cleanupReplicas(NamedList results,
ClusterState clusterState,
List<ZkNodeProps> sourceReplicas,
- OverseerCollectionMessageHandler ocmh, String node) throws InterruptedException {
+ OverseerCollectionMessageHandler ocmh,
+ String node,
+ String async) throws InterruptedException {
CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
for (ZkNodeProps sourceReplica : sourceReplicas) {
- log.info("Deleting replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), node);
+ String coll = sourceReplica.getStr(COLLECTION_PROP);
+ String shard = sourceReplica.getStr(SHARD_ID_PROP);
+ log.info("Deleting replica for collection={} shard={} on node={}", coll, shard, node);
NamedList deleteResult = new NamedList();
try {
+ if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
cleanupLatch.countDown();
if (deleteResult.get("failure") != null) {
synchronized (results) {
+
results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
- " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), node));
+ " on node=%s", coll, shard, node));
}
}
});
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e13f7aea/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
index aad9cc7..ad02fc0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
@@ -34,14 +34,18 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.util.StrUtils.formatString;
public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -58,6 +62,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
ocmh.checkRequired(message, "source", "target");
String source = message.getStr("source");
String target = message.getStr("target");
+ String async = message.getStr("async");
boolean parallel = message.getBool("parallel", false);
ClusterState clusterState = zkStateReader.getClusterState();
@@ -78,6 +83,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
NamedList nl = new NamedList();
log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
+ if(async!=null) msg.getProperties().put(ASYNC, async);
final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
msg, nl, () -> {
countDownLatch.countDown();
@@ -136,7 +142,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
// we have reached this far means all replicas could be recreated
//now cleanup the replicas in the source node
- DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source);
+ DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
results.add("success", "REPLACENODE action completed successfully from : " + source + " to : " + target);
}
[23/50] [abbrv] lucene-solr:apiv2: SOLR-9188: blockUnknown property
makes inter-node communication impossible
Posted by no...@apache.org.
SOLR-9188: blockUnknown property makes inter-node communication impossible
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/44c30f05
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/44c30f05
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/44c30f05
Branch: refs/heads/apiv2
Commit: 44c30f0535ceed5f2ad08aa8a9f974d4973774e0
Parents: e99d970
Author: Noble Paul <no...@gmail.com>
Authored: Sun Aug 28 00:36:18 2016 +0530
Committer: Noble Paul <no...@gmail.com>
Committed: Sun Aug 28 00:36:18 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../apache/solr/security/BasicAuthPlugin.java | 3 ++-
.../apache/solr/servlet/SolrDispatchFilter.java | 3 ++-
.../solr/security/BasicAuthIntegrationTest.java | 28 ++++++++++++++++----
4 files changed, 29 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/44c30f05/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 824cdae..a4f918c 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -87,6 +87,8 @@ Bug Fixes
to be consistent with other places in Solr. Language names still work for backwards
compatibility. (Uwe Schindler, Boris Steiner)
+* SOLR-9188: blockUnknown property makes inter-node communication impossible (noble)
+
Optimizations
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/44c30f05/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
index e3f53a2..9dc34e7 100644
--- a/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
@@ -71,6 +71,7 @@ public class BasicAuthPlugin extends AuthenticationPlugin implements ConfigEdita
for (Map.Entry<String, Object> e : command.getDataMap().entrySet()) {
if (PROPS.contains(e.getKey())) {
latestConf.put(e.getKey(), e.getValue());
+ return latestConf;
} else {
command.addError("Unknown property " + e.getKey());
}
@@ -140,7 +141,7 @@ public class BasicAuthPlugin extends AuthenticationPlugin implements ConfigEdita
}
} else {
if (blockUnknown) {
- authenticationFailure(response, "require authentication");
+ authenticationFailure(response, "require authentication for pathinfo :"+ request.getPathInfo());
} else {
request.setAttribute(AuthenticationPlugin.class.getName(), zkAuthentication.getPromptHeaders());
filterChain.doFilter(request, response);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/44c30f05/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index 4a680e5..8c792e9 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -299,7 +299,8 @@ public class SolrDispatchFilter extends BaseSolrFilter {
boolean requestContinues = false;
final AtomicBoolean isAuthenticated = new AtomicBoolean(false);
AuthenticationPlugin authenticationPlugin = cores.getAuthenticationPlugin();
- if (authenticationPlugin == null) {
+ if (authenticationPlugin == null ||
+ PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest)request).getPathInfo())) {
return true;
} else {
//special case when solr is securing inter-node requests
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/44c30f05/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java b/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
index 8a5483a..6070cf6 100644
--- a/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
@@ -193,7 +193,10 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
cloudSolrClient.request(update);
- executeCommand(baseUrl + authzPrefix, cl, "{set-property : { blockUnknown: true}}", "harry", "HarryIsUberCool");
+ executeCommand(baseUrl + authcPrefix, cl, "{set-property : { blockUnknown: true}}", "harry", "HarryIsUberCool");
+ verifySecurityStatus(cl, baseUrl + authcPrefix, "authentication/blockUnknown", "true", 20, "harry", "HarryIsUberCool");
+ verifySecurityStatus(cl, baseUrl + PKIAuthenticationPlugin.PATH + "?wt=json", "key", NOT_NULL_PREDICATE, 20);
+
String[] toolArgs = new String[]{
"status", "-solr", baseUrl};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -211,7 +214,7 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
log.error("RunExampleTool failed due to: " + e +
"; stdout from tool prior to failure: " + baos.toString(StandardCharsets.UTF_8.name()));
}
- executeCommand(baseUrl + authzPrefix, cl, "{set-property : { blockUnknown: false}}", "harry", "HarryIsUberCool");
+ executeCommand(baseUrl + authcPrefix, cl, "{set-property : { blockUnknown: false}}", "harry", "HarryIsUberCool");
} finally {
if (cl != null) {
HttpClientUtil.close(cl);
@@ -219,7 +222,8 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
}
}
- public static void executeCommand(String url, HttpClient cl, String payload, String user, String pwd) throws IOException {
+ public static void executeCommand(String url, HttpClient cl, String payload, String user, String pwd)
+ throws IOException {
HttpPost httpPost;
HttpResponse r;
httpPost = new HttpPost(url);
@@ -231,15 +235,29 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
Utils.consumeFully(r.getEntity());
}
- public static void verifySecurityStatus(HttpClient cl, String url, String objPath, Object expected, int count) throws Exception {
+ public static void verifySecurityStatus(HttpClient cl, String url, String objPath,
+ Object expected, int count) throws Exception {
+ verifySecurityStatus(cl, url, objPath, expected, count, null, null);
+ }
+
+
+ public static void verifySecurityStatus(HttpClient cl, String url, String objPath,
+ Object expected, int count, String user, String pwd)
+ throws Exception {
boolean success = false;
String s = null;
List<String> hierarchy = StrUtils.splitSmart(objPath, '/');
for (int i = 0; i < count; i++) {
HttpGet get = new HttpGet(url);
+ if (user != null) setBasicAuthHeader(get, user, pwd);
HttpResponse rsp = cl.execute(get);
s = EntityUtils.toString(rsp.getEntity());
- Map m = (Map) Utils.fromJSONString(s);
+ Map m = null;
+ try {
+ m = (Map) Utils.fromJSONString(s);
+ } catch (Exception e) {
+ fail("Invalid json " + s);
+ }
Utils.consumeFully(rsp.getEntity());
Object actual = Utils.getObjectByPath(m, true, hierarchy);
if (expected instanceof Predicate) {
[02/50] [abbrv] lucene-solr:apiv2: Add 6.2.0 back compat test indices
Posted by no...@apache.org.
Add 6.2.0 back compat test indices
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/98118028
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/98118028
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/98118028
Branch: refs/heads/apiv2
Commit: 981180284db64daa6102e439ff42b72c964b52df
Parents: 312f456
Author: Mike McCandless <mi...@apache.org>
Authored: Wed Aug 24 18:03:03 2016 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Wed Aug 24 18:03:03 2016 -0400
----------------------------------------------------------------------
.../lucene/index/TestBackwardsCompatibility.java | 4 +++-
.../org/apache/lucene/index/index.6.2.0-cfs.zip | Bin 0 -> 15880 bytes
.../org/apache/lucene/index/index.6.2.0-nocfs.zip | Bin 0 -> 15867 bytes
3 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/98118028/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 03480d7..22b79b4 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -226,7 +226,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
"6.0.1-cfs",
"6.0.1-nocfs",
"6.1.0-cfs",
- "6.1.0-nocfs"
+ "6.1.0-nocfs",
+ "6.2.0-cfs",
+ "6.2.0-nocfs"
};
final String[] unsupportedNames = {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/98118028/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-cfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-cfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-cfs.zip
new file mode 100644
index 0000000..36b6d83
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-cfs.zip differ
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/98118028/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-nocfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-nocfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-nocfs.zip
new file mode 100644
index 0000000..95ae26c
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.6.2.0-nocfs.zip differ
[38/50] [abbrv] lucene-solr:apiv2: [SOLR-9444] Fix path usage for
cloud backup/restore
Posted by no...@apache.org.
[SOLR-9444] Fix path usage for cloud backup/restore
- Refactored code using URI.getPath() API to use URI instance
uniformly.
- During serialization of URI instance, used toASCIIString() API
to generate the appropriate String representation.
- Updated the unit test to use whitespaces in the backup directory path
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e138462a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e138462a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e138462a
Branch: refs/heads/apiv2
Commit: e138462a82800be3811017062868051c14e560e6
Parents: e13f7ae
Author: Hrishikesh Gadre <hg...@cloudera.com>
Authored: Sun Aug 28 14:48:24 2016 -0700
Committer: Hrishikesh Gadre <hg...@cloudera.com>
Committed: Thu Sep 1 16:28:19 2016 -0700
----------------------------------------------------------------------
.../java/org/apache/solr/cloud/BackupCmd.java | 6 ++--
.../java/org/apache/solr/cloud/RestoreCmd.java | 6 ++--
.../apache/solr/core/backup/BackupManager.java | 34 +++++++++---------
.../backup/repository/BackupRepository.java | 14 ++++++--
.../backup/repository/HdfsBackupRepository.java | 29 +++++++++++++---
.../repository/LocalFileSystemRepository.java | 36 ++++++++++++--------
.../apache/solr/handler/ReplicationHandler.java | 12 ++++---
.../org/apache/solr/handler/RestoreCore.java | 6 ++--
.../org/apache/solr/handler/SnapShooter.java | 11 +++---
.../solr/handler/admin/CoreAdminOperation.java | 7 ++--
.../cloud/TestLocalFSCloudBackupRestore.java | 10 +++++-
11 files changed, 112 insertions(+), 59 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
index 679cb07..648eee8 100644
--- a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
@@ -62,7 +62,6 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
String asyncId = message.getStr(ASYNC);
String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
- String location = message.getStr(CoreAdminParams.BACKUP_LOCATION);
Map<String, String> requestMap = new HashMap<>();
Instant startTime = Instant.now();
@@ -72,7 +71,8 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader, collectionName);
// Backup location
- URI backupPath = repository.createURI(location, backupName);
+ URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+ URI backupPath = repository.resolve(location, backupName);
//Validating if the directory already exists.
if (repository.exists(backupPath)) {
@@ -94,7 +94,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
params.set(NAME, slice.getName());
params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
- params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.getPath()); // note: index dir will be here then the "snapshot." + slice name
+ params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
params.set(CORE_NAME_PROP, coreName);
ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
index af2215c..63d5686 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
@@ -79,13 +79,13 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
String asyncId = message.getStr(ASYNC);
String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
- String location = message.getStr(CoreAdminParams.BACKUP_LOCATION);
Map<String, String> requestMap = new HashMap<>();
CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
- URI backupPath = repository.createURI(location, backupName);
+ URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+ URI backupPath = repository.resolve(location, backupName);
ZkStateReader zkStateReader = ocmh.zkStateReader;
BackupManager backupMgr = new BackupManager(repository, zkStateReader, restoreCollectionName);
@@ -195,7 +195,7 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
params.set(NAME, "snapshot." + slice.getName());
- params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.getPath());
+ params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
index 51227e8..e650553 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
@@ -87,12 +87,12 @@ public class BackupManager {
* @return the configuration parameters for the specified backup.
* @throws IOException In case of errors.
*/
- public Properties readBackupProperties(String backupLoc, String backupId) throws IOException {
+ public Properties readBackupProperties(URI backupLoc, String backupId) throws IOException {
Preconditions.checkNotNull(backupLoc);
Preconditions.checkNotNull(backupId);
// Backup location
- URI backupPath = repository.createURI(backupLoc, backupId);
+ URI backupPath = repository.resolve(backupLoc, backupId);
if (!repository.exists(backupPath)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Couldn't restore since doesn't exist: " + backupPath);
}
@@ -113,8 +113,8 @@ public class BackupManager {
* @param props The backup properties
* @throws IOException in case of I/O error
*/
- public void writeBackupProperties(String backupLoc, String backupId, Properties props) throws IOException {
- URI dest = repository.createURI(backupLoc, backupId, BACKUP_PROPS_FILE);
+ public void writeBackupProperties(URI backupLoc, String backupId, Properties props) throws IOException {
+ URI dest = repository.resolve(backupLoc, backupId, BACKUP_PROPS_FILE);
try (Writer propsWriter = new OutputStreamWriter(repository.createOutput(dest), StandardCharsets.UTF_8)) {
props.store(propsWriter, "Backup properties file");
}
@@ -128,10 +128,10 @@ public class BackupManager {
* @return the meta-data information for the backed-up collection.
* @throws IOException in case of errors.
*/
- public DocCollection readCollectionState(String backupLoc, String backupId, String collectionName) throws IOException {
+ public DocCollection readCollectionState(URI backupLoc, String backupId, String collectionName) throws IOException {
Preconditions.checkNotNull(collectionName);
- URI zkStateDir = repository.createURI(backupLoc, backupId, ZK_STATE_DIR);
+ URI zkStateDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR);
try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) {
byte[] arr = new byte[(int) is.length()]; // probably ok since the json file should be small.
is.readBytes(arr, 0, (int) is.length());
@@ -149,9 +149,9 @@ public class BackupManager {
* @param collectionState The collection meta-data to be stored.
* @throws IOException in case of I/O errors.
*/
- public void writeCollectionState(String backupLoc, String backupId, String collectionName,
+ public void writeCollectionState(URI backupLoc, String backupId, String collectionName,
DocCollection collectionState) throws IOException {
- URI dest = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, COLLECTION_PROPS_FILE);
+ URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, COLLECTION_PROPS_FILE);
try (OutputStream collectionStateOs = repository.createOutput(dest)) {
collectionStateOs.write(Utils.toJSON(Collections.singletonMap(collectionName, collectionState)));
}
@@ -166,9 +166,9 @@ public class BackupManager {
* @param targetConfigName The name of the config to be created.
* @throws IOException in case of I/O errors.
*/
- public void uploadConfigDir(String backupLoc, String backupId, String sourceConfigName, String targetConfigName)
+ public void uploadConfigDir(URI backupLoc, String backupId, String sourceConfigName, String targetConfigName)
throws IOException {
- URI source = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, sourceConfigName);
+ URI source = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, sourceConfigName);
String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + targetConfigName;
uploadToZk(zkStateReader.getZkClient(), source, zkPath);
}
@@ -181,10 +181,10 @@ public class BackupManager {
* @param configName The name of the config to be saved.
* @throws IOException in case of I/O errors.
*/
- public void downloadConfigDir(String backupLoc, String backupId, String configName) throws IOException {
- URI dest = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
- repository.createDirectory(repository.createURI(backupLoc, backupId, ZK_STATE_DIR));
- repository.createDirectory(repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR));
+ public void downloadConfigDir(URI backupLoc, String backupId, String configName) throws IOException {
+ URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
+ repository.createDirectory(repository.resolve(backupLoc, backupId, ZK_STATE_DIR));
+ repository.createDirectory(repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR));
repository.createDirectory(dest);
downloadFromZK(zkStateReader.getZkClient(), ZkConfigManager.CONFIGS_ZKNODE + "/" + configName, dest);
@@ -201,11 +201,11 @@ public class BackupManager {
if (children.size() == 0) {
log.info("Writing file {}", file);
byte[] data = zkClient.getData(zkPath + "/" + file, null, null, true);
- try (OutputStream os = repository.createOutput(repository.createURI(dir.getPath(), file))) {
+ try (OutputStream os = repository.createOutput(repository.resolve(dir, file))) {
os.write(data);
}
} else {
- downloadFromZK(zkClient, zkPath + "/" + file, repository.createURI(dir.getPath(), file));
+ downloadFromZK(zkClient, zkPath + "/" + file, repository.resolve(dir, file));
}
}
} catch (KeeperException | InterruptedException e) {
@@ -221,7 +221,7 @@ public class BackupManager {
for (String file : repository.listAll(sourceDir)) {
String zkNodePath = destZkPath + "/" + file;
- URI path = repository.createURI(sourceDir.getPath(), file);
+ URI path = repository.resolve(sourceDir, file);
PathType t = repository.getPathType(path);
switch (t) {
case FILE: {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
index 8950ce7..875be18 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
@@ -57,13 +57,23 @@ public interface BackupRepository extends NamedListInitializedPlugin, Closeable
<T> T getConfigProperty(String name);
/**
- * This method creates a URI using the specified path components (as method arguments).
+ * This method returns the URI representation for the specified path.
+ * Note - the specified path could be a fully qualified URI OR a relative path for a file-system.
*
+ * @param path The path specified by the user.
+ * @return the URI representation of the user supplied value
+ */
+ URI createURI(String path);
+
+ /**
+ * This method resolves a URI using the specified path components (as method arguments).
+ *
+ * @param baseUri The base URI to use for creating the path
* @param pathComponents
* The directory (or file-name) to be included in the URI.
* @return A URI containing absolute path
*/
- URI createURI(String... pathComponents);
+ URI resolve(URI baseUri, String... pathComponents);
/**
* This method checks if the specified path exists in this repository.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
index bb148de..f12d9fd 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
@@ -20,6 +20,7 @@ package org.apache.solr.core.backup.repository;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
+import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@@ -88,11 +89,31 @@ public class HdfsBackupRepository implements BackupRepository {
}
@Override
- public URI createURI(String... pathComponents) {
- Path result = baseHdfsPath;
- for (String p : pathComponents) {
- result = new Path(result, p);
+ public URI createURI(String location) {
+ Preconditions.checkNotNull(location);
+
+ URI result = null;
+ try {
+ result = new URI(location);
+ if (!result.isAbsolute()) {
+ result = resolve(this.baseHdfsPath.toUri(), location);
+ }
+ } catch (URISyntaxException ex) {
+ result = resolve(this.baseHdfsPath.toUri(), location);
+ }
+
+ return result;
+ }
+
+ @Override
+ public URI resolve(URI baseUri, String... pathComponents) {
+ Preconditions.checkArgument(baseUri.isAbsolute());
+
+ Path result = new Path(baseUri);
+ for (String path : pathComponents) {
+ result = new Path(result, path);
}
+
return result.toUri();
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
index 86c4110..4ac2558 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
@@ -20,19 +20,20 @@ package org.apache.solr.core.backup.repository;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
+import java.net.URISyntaxException;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
+
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.SimpleFSDirectory;
-import org.apache.lucene.util.Constants;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.DirectoryFactory;
@@ -58,21 +59,28 @@ public class LocalFileSystemRepository implements BackupRepository {
}
@Override
- public URI createURI(String... pathComponents) {
- Preconditions.checkArgument(pathComponents.length > 0);
-
- String basePath = Preconditions.checkNotNull(pathComponents[0]);
- // Note the URI.getPath() invocation on Windows platform generates an invalid URI.
- // Refer to http://stackoverflow.com/questions/9834776/java-nio-file-path-issue
- // Since the caller may have used this method to generate the string representation
- // for the pathComponents, we implement a work-around specifically for Windows platform
- // to remove the leading '/' character.
- if (Constants.WINDOWS) {
- basePath = basePath.replaceFirst("^/(.:/)", "$1");
+ public URI createURI(String location) {
+ Preconditions.checkNotNull(location);
+
+ URI result = null;
+ try {
+ result = new URI(location);
+ if (!result.isAbsolute()) {
+ result = Paths.get(location).toUri();
+ }
+ } catch (URISyntaxException ex) {
+ result = Paths.get(location).toUri();
}
- Path result = Paths.get(basePath);
- for (int i = 1; i < pathComponents.length; i++) {
+ return result;
+ }
+
+ @Override
+ public URI resolve(URI baseUri, String... pathComponents) {
+ Preconditions.checkArgument(pathComponents.length > 0);
+
+ Path result = Paths.get(baseUri);
+ for (int i = 0; i < pathComponents.length; i++) {
result = result.resolve(pathComponents[i]);
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
index aee3b97..84e1ba2 100644
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
@@ -443,14 +443,15 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
location = core.getDataDir();
}
+ URI locationUri = repo.createURI(location);
+
//If name is not provided then look for the last unnamed( the ones with the snapshot.timestamp format)
//snapshot folder since we allow snapshots to be taken without providing a name. Pick the latest timestamp.
if (name == null) {
- URI basePath = repo.createURI(location);
- String[] filePaths = repo.listAll(basePath);
+ String[] filePaths = repo.listAll(locationUri);
List<OldBackupDirectory> dirs = new ArrayList<>();
for (String f : filePaths) {
- OldBackupDirectory obd = new OldBackupDirectory(basePath, f);
+ OldBackupDirectory obd = new OldBackupDirectory(locationUri, f);
if (obd.getTimestamp().isPresent()) {
dirs.add(obd);
}
@@ -465,7 +466,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
name = "snapshot." + name;
}
- RestoreCore restoreCore = new RestoreCore(repo, core, location, name);
+ RestoreCore restoreCore = new RestoreCore(repo, core, locationUri, name);
try {
MDC.put("RestoreCore.core", core.getName());
MDC.put("RestoreCore.backupLocation", location);
@@ -561,7 +562,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
}
// small race here before the commit point is saved
- SnapShooter snapShooter = new SnapShooter(repo, core, location, params.get(NAME), commitName);
+ URI locationUri = repo.createURI(location);
+ SnapShooter snapShooter = new SnapShooter(repo, core, locationUri, params.get(NAME), commitName);
snapShooter.validateCreateSnapshot();
snapShooter.createSnapAsync(indexCommit, numberToKeep, (nl) -> snapShootDetails = nl);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
index 6aef35c..62cb93f 100644
--- a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
+++ b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
@@ -44,11 +44,11 @@ public class RestoreCore implements Callable<Boolean> {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final String backupName;
- private final String backupLocation;
+ private final URI backupLocation;
private final SolrCore core;
private final BackupRepository backupRepo;
- public RestoreCore(BackupRepository backupRepo, SolrCore core, String location, String name) {
+ public RestoreCore(BackupRepository backupRepo, SolrCore core, URI location, String name) {
this.backupRepo = backupRepo;
this.core = core;
this.backupLocation = location;
@@ -62,7 +62,7 @@ public class RestoreCore implements Callable<Boolean> {
public boolean doRestore() throws Exception {
- URI backupPath = backupRepo.createURI(backupLocation, backupName);
+ URI backupPath = backupRepo.resolve(backupLocation, backupName);
SimpleDateFormat dateFormat = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT);
String restoreIndexName = "restore." + dateFormat.format(new Date());
String restoreIndexPath = core.getDataDir() + restoreIndexName;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
index e12649d..52f4889 100644
--- a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
+++ b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
@@ -19,6 +19,7 @@ package org.apache.solr.handler;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.URI;
+import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
@@ -75,17 +76,17 @@ public class SnapShooter {
} else {
snapDirStr = core.getCoreDescriptor().getInstanceDir().resolve(location).normalize().toString();
}
- initialize(new LocalFileSystemRepository(), core, snapDirStr, snapshotName, null);
+ initialize(new LocalFileSystemRepository(), core, Paths.get(snapDirStr).toUri(), snapshotName, null);
}
- public SnapShooter(BackupRepository backupRepo, SolrCore core, String location, String snapshotName, String commitName) {
+ public SnapShooter(BackupRepository backupRepo, SolrCore core, URI location, String snapshotName, String commitName) {
initialize(backupRepo, core, location, snapshotName, commitName);
}
- private void initialize(BackupRepository backupRepo, SolrCore core, String location, String snapshotName, String commitName) {
+ private void initialize(BackupRepository backupRepo, SolrCore core, URI location, String snapshotName, String commitName) {
this.solrCore = Preconditions.checkNotNull(core);
this.backupRepo = Preconditions.checkNotNull(backupRepo);
- this.baseSnapDirPath = backupRepo.createURI(Preconditions.checkNotNull(location)).normalize();
+ this.baseSnapDirPath = location;
this.snapshotName = snapshotName;
if (snapshotName != null) {
directoryName = "snapshot." + snapshotName;
@@ -93,7 +94,7 @@ public class SnapShooter {
SimpleDateFormat fmt = new SimpleDateFormat(DATE_FMT, Locale.ROOT);
directoryName = "snapshot." + fmt.format(new Date());
}
- this.snapshotDirPath = backupRepo.createURI(location, directoryName);
+ this.snapshotDirPath = backupRepo.resolve(location, directoryName);
this.commitName = commitName;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
index e4103c5..dfc7a6f 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
@@ -18,6 +18,7 @@ package org.apache.solr.handler.admin;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
+import java.net.URI;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
@@ -803,8 +804,9 @@ enum CoreAdminOperation implements CoreAdminOp {
// parameter is not supplied, the latest index commit is backed-up.
String commitName = params.get(CoreAdminParams.COMMIT_NAME);
+ URI locationUri = repository.createURI(location);
try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
- SnapShooter snapShooter = new SnapShooter(repository, core, location, name, commitName);
+ SnapShooter snapShooter = new SnapShooter(repository, core, locationUri, name, commitName);
// validateCreateSnapshot will create parent dirs instead of throw; that choice is dubious.
// But we want to throw. One reason is that
// this dir really should, in fact must, already exist here if triggered via a collection backup on a shared
@@ -847,8 +849,9 @@ enum CoreAdminOperation implements CoreAdminOp {
+ " parameter or as a default repository property");
}
+ URI locationUri = repository.createURI(location);
try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
- RestoreCore restoreCore = new RestoreCore(repository, core, location, name);
+ RestoreCore restoreCore = new RestoreCore(repository, core, locationUri, name);
boolean success = restoreCore.doRestore();
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to restore core=" + core.getName());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e138462a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
index db68913..da8e767 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
@@ -24,12 +24,20 @@ import org.junit.BeforeClass;
* such file-system would be exposed via local file-system API.
*/
public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+ private static String backupLocation;
@BeforeClass
public static void setupClass() throws Exception {
configureCluster(NUM_SHARDS)// nodes
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
+
+ boolean whitespacesInPath = random().nextBoolean();
+ if (whitespacesInPath) {
+ backupLocation = createTempDir("my backup").toFile().getAbsolutePath();
+ } else {
+ backupLocation = createTempDir("mybackup").toFile().getAbsolutePath();
+ }
}
@Override
@@ -44,6 +52,6 @@ public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTes
@Override
public String getBackupLocation() {
- return createTempDir().toFile().getAbsolutePath();
+ return backupLocation;
}
}
[14/50] [abbrv] lucene-solr:apiv2: LUCENE-7416: Simplify MatchNoDocs
rewrite in BQ (using Java 8 streams);
add another special case: MUST_NOT with MatchAllDocsQuery also produces no
results
Posted by no...@apache.org.
LUCENE-7416: Simplify MatchNoDocs rewrite in BQ (using Java 8 streams); add another special case: MUST_NOT with MatchAllDocsQuery also produces no results
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/646b6bfd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/646b6bfd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/646b6bfd
Branch: refs/heads/apiv2
Commit: 646b6bfd2c23df36f911a99fd2807b85a961a36b
Parents: 63b2e80
Author: Uwe Schindler <us...@apache.org>
Authored: Fri Aug 26 09:28:49 2016 +0200
Committer: Uwe Schindler <us...@apache.org>
Committed: Fri Aug 26 09:28:49 2016 +0200
----------------------------------------------------------------------
lucene/CHANGES.txt | 2 +-
.../org/apache/lucene/search/BooleanQuery.java | 14 +++++-----
.../lucene/search/TestBooleanRewrites.java | 28 ++++++++++++++++++++
3 files changed, 37 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/646b6bfd/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 00de5a5..214badc 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -26,7 +26,7 @@ Optimizations
* LUCENE-7416: BooleanQuery optimizes queries that have queries that occur both
in the sets of SHOULD and FILTER clauses, or both in MUST/FILTER and MUST_NOT
- clauses. (Spyros Kapnissis via Adrien Grand)
+ clauses. (Spyros Kapnissis via Adrien Grand, Uwe Schindler)
Other
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/646b6bfd/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index b2477e8..e67d7f4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -29,6 +29,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
+import java.util.function.Predicate;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanClause.Occur;
@@ -273,14 +274,15 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
// Check whether some clauses are both required and excluded
- if (clauseSets.get(Occur.MUST_NOT).size() > 0) {
- final Set<Query> reqAndExclQueries = new HashSet<Query>(clauseSets.get(Occur.FILTER));
- reqAndExclQueries.addAll(clauseSets.get(Occur.MUST));
- reqAndExclQueries.retainAll(clauseSets.get(Occur.MUST_NOT));
-
- if (reqAndExclQueries.isEmpty() == false) {
+ final Collection<Query> mustNotClauses = clauseSets.get(Occur.MUST_NOT);
+ if (!mustNotClauses.isEmpty()) {
+ final Predicate<Query> p = clauseSets.get(Occur.MUST)::contains;
+ if (mustNotClauses.stream().anyMatch(p.or(clauseSets.get(Occur.FILTER)::contains))) {
return new MatchNoDocsQuery("FILTER or MUST clause also in MUST_NOT");
}
+ if (mustNotClauses.contains(new MatchAllDocsQuery())) {
+ return new MatchNoDocsQuery("MUST_NOT clause is MatchAllDocsQuery");
+ }
}
// remove FILTER clauses that are also MUST clauses
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/646b6bfd/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
index 4470841..3ec2dd3 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
@@ -265,6 +265,34 @@ public class TestBooleanRewrites extends LuceneTestCase {
assertEquals(new MatchNoDocsQuery(), searcher.rewrite(bq2));
}
+ // MatchAllQuery as MUST_NOT clause cannot return anything
+ public void testMatchAllMustNot() throws IOException {
+ IndexSearcher searcher = newSearcher(new MultiReader());
+
+ // Test Must with MatchAll MustNot
+ BooleanQuery bq = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.FILTER)
+ .add(new TermQuery(new Term("foo", "bad")), Occur.SHOULD)
+ //
+ .add(new MatchAllDocsQuery(), Occur.MUST_NOT)
+ .build();
+
+ assertEquals(new MatchNoDocsQuery(), searcher.rewrite(bq));
+
+ // Test Must with MatchAll MustNot and other MustNot
+ BooleanQuery bq2 = new BooleanQuery.Builder()
+ .add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.FILTER)
+ .add(new TermQuery(new Term("foo", "bad")), Occur.SHOULD)
+ //
+ .add(new TermQuery(new Term("foo", "bor")), Occur.MUST_NOT)
+ .add(new MatchAllDocsQuery(), Occur.MUST_NOT)
+ .build();
+
+ assertEquals(new MatchNoDocsQuery(), searcher.rewrite(bq2));
+ }
+
public void testRemoveMatchAllFilter() throws IOException {
IndexSearcher searcher = newSearcher(new MultiReader());
[28/50] [abbrv] lucene-solr:apiv2: SOLR-9188: Trying revert a change
and fix the unexpected IOException in jenkins failure.
Posted by no...@apache.org.
SOLR-9188: Trying revert a change and fix the unexpected IOException in jenkins failure.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0ed8c2a7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0ed8c2a7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0ed8c2a7
Branch: refs/heads/apiv2
Commit: 0ed8c2a7ad7038f99bff3322b06edf948a61dfe0
Parents: 738d527
Author: Noble Paul <no...@apache.org>
Authored: Mon Aug 29 12:24:42 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Mon Aug 29 12:24:42 2016 +0530
----------------------------------------------------------------------
solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ed8c2a7/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
index 9dc34e7..49c02d7 100644
--- a/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
@@ -141,7 +141,7 @@ public class BasicAuthPlugin extends AuthenticationPlugin implements ConfigEdita
}
} else {
if (blockUnknown) {
- authenticationFailure(response, "require authentication for pathinfo :"+ request.getPathInfo());
+ authenticationFailure(response, "require authentication");
} else {
request.setAttribute(AuthenticationPlugin.class.getName(), zkAuthentication.getPromptHeaders());
filterChain.doFilter(request, response);
[49/50] [abbrv] lucene-solr:apiv2: SOLR-9127: Excel workbook (.xlsx)
response writer. use 'wt=xlsx'
Posted by no...@apache.org.
SOLR-9127: Excel workbook (.xlsx) response writer. use 'wt=xlsx'
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1a61fb68
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1a61fb68
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1a61fb68
Branch: refs/heads/apiv2
Commit: 1a61fb68588d489dfab966dc3e3a894cae7d5b5f
Parents: ecbb588
Author: Noble Paul <no...@apache.org>
Authored: Tue Sep 6 13:22:18 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Tue Sep 6 13:22:18 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 4 +-
.../handler/extraction/XLSXResponseWriter.java | 414 +++++++++++++++++++
.../extraction/solr/collection1/conf/schema.xml | 2 +
.../extraction/TestXLSXResponseWriter.java | 257 ++++++++++++
.../src/java/org/apache/solr/core/SolrCore.java | 8 +-
5 files changed, 683 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a61fb68/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0d507e3..43f1e95 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -78,7 +78,9 @@ prefix, then you will now get an error as these options are incompatible with nu
New Features
----------------------
* SOLR-5725: facet.method=enum can bypass exact counts calculation with facet.exists=true, it just returns 1 for
- terms which exists in result docset. (Alexey Kozhemiakin, Sebastian Koziel, Radoslaw Zielinski via Mikhail Khludnev)
+ terms which exists in result docset. (Alexey Kozhemiakin, Sebastian Koziel, Radoslaw Zielinski via Mikhail Khludnev)
+
+* SOLR-9127: Excel workbook (.xlsx) response writer. use 'wt=xlsx' (Tony Moriarty, noble)
Bug Fixes
----------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a61fb68/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
new file mode 100644
index 0000000..27a30d1
--- /dev/null
+++ b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
@@ -0,0 +1,414 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.extraction;
+
+import java.io.CharArrayWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import org.apache.lucene.index.IndexableField;
+import org.apache.poi.ss.usermodel.Cell;
+import org.apache.poi.ss.usermodel.Font;
+import org.apache.poi.ss.usermodel.IndexedColors;
+import org.apache.poi.ss.usermodel.Row;
+import org.apache.poi.ss.usermodel.Sheet;
+import org.apache.poi.xssf.streaming.SXSSFWorkbook;
+import org.apache.poi.xssf.usermodel.XSSFCellStyle;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.BasicResultContext;
+import org.apache.solr.response.RawResponseWriter;
+import org.apache.solr.response.ResultContext;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.response.TextResponseWriter;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.schema.StrField;
+import org.apache.solr.search.DocList;
+import org.apache.solr.search.ReturnFields;
+
+public class XLSXResponseWriter extends RawResponseWriter {
+
+ @Override
+ public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
+ // throw away arraywriter just to satisfy super requirements; we're grabbing
+ // all writes before they go to it anyway
+ XLSXWriter w = new XLSXWriter(new CharArrayWriter(), req, rsp);
+
+ LinkedHashMap<String,String> reqNamesMap = new LinkedHashMap<>();
+ LinkedHashMap<String,Integer> reqWidthsMap = new LinkedHashMap<>();
+
+ Iterator<String> paramNamesIter = req.getParams().getParameterNamesIterator();
+ while (paramNamesIter.hasNext()) {
+ String nextParam = paramNamesIter.next();
+ if (nextParam.startsWith("colname.")) {
+ String field = nextParam.substring("colname.".length());
+ reqNamesMap.put(field, req.getParams().get(nextParam));
+ } else if (nextParam.startsWith("colwidth.")) {
+ String field = nextParam.substring("colwidth.".length());
+ reqWidthsMap.put(field, req.getParams().getInt(nextParam));
+ }
+ }
+
+ try {
+ w.writeResponse(out, reqNamesMap, reqWidthsMap);
+ } finally {
+ w.close();
+ }
+ }
+
+ @Override
+ public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
+ return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
+ }
+}
+
+class XLSXWriter extends TextResponseWriter {
+
+ SolrQueryRequest req;
+ SolrQueryResponse rsp;
+
+ class SerialWriteWorkbook {
+ SXSSFWorkbook swb;
+ Sheet sh;
+
+ XSSFCellStyle headerStyle;
+ int rowIndex;
+ Row curRow;
+ int cellIndex;
+
+ SerialWriteWorkbook() {
+ this.swb = new SXSSFWorkbook(100);
+ this.sh = this.swb.createSheet();
+
+ this.rowIndex = 0;
+
+ this.headerStyle = (XSSFCellStyle)swb.createCellStyle();
+ this.headerStyle.setFillBackgroundColor(IndexedColors.BLACK.getIndex());
+ //solid fill
+ this.headerStyle.setFillPattern((short)1);
+ Font headerFont = swb.createFont();
+ headerFont.setFontHeightInPoints((short)14);
+ headerFont.setBoldweight(Font.BOLDWEIGHT_BOLD);
+ headerFont.setColor(IndexedColors.WHITE.getIndex());
+ this.headerStyle.setFont(headerFont);
+ }
+
+ void addRow() {
+ curRow = sh.createRow(rowIndex++);
+ cellIndex = 0;
+ }
+
+ void setHeaderRow() {
+ curRow.setHeightInPoints((short)21);
+ }
+
+ //sets last created cell to have header style
+ void setHeaderCell() {
+ curRow.getCell(cellIndex - 1).setCellStyle(this.headerStyle);
+ }
+
+ //set the width of the most recently created column
+ void setColWidth(int charWidth) {
+ //width in poi is units of 1/256th of a character width for some reason
+ this.sh.setColumnWidth(cellIndex - 1, 256*charWidth);
+ }
+
+ void writeCell(String value) {
+ Cell cell = curRow.createCell(cellIndex++);
+ cell.setCellValue(value);
+ }
+
+ void flush(OutputStream out) {
+ try {
+ swb.write(out);
+ } catch (IOException e) {
+ StringWriter sw = new StringWriter();
+ e.printStackTrace(new PrintWriter(sw));
+ String stacktrace = sw.toString();
+ }finally {
+ swb.dispose();
+ }
+ }
+ }
+
+ private SerialWriteWorkbook wb = new SerialWriteWorkbook();
+
+ static class XLField {
+ String name;
+ SchemaField sf;
+ }
+
+ private Map<String,XLField> xlFields = new LinkedHashMap<String,XLField>();
+
+ public XLSXWriter(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp){
+ super(writer, req, rsp);
+ this.req = req;
+ this.rsp = rsp;
+ }
+
+ public void writeResponse(OutputStream out, LinkedHashMap<String, String> colNamesMap,
+ LinkedHashMap<String, Integer> colWidthsMap) throws IOException {
+ SolrParams params = req.getParams();
+
+ Collection<String> fields = returnFields.getRequestedFieldNames();
+ Object responseObj = rsp.getValues().get("response");
+ boolean returnOnlyStored = false;
+ if (fields==null||returnFields.hasPatternMatching()) {
+ if (responseObj instanceof SolrDocumentList) {
+ // get the list of fields from the SolrDocumentList
+ if(fields==null) {
+ fields = new LinkedHashSet<String>();
+ }
+ for (SolrDocument sdoc: (SolrDocumentList)responseObj) {
+ fields.addAll(sdoc.getFieldNames());
+ }
+ } else {
+ // get the list of fields from the index
+ Iterable<String> all = req.getSearcher().getFieldNames();
+ if (fields == null) {
+ fields = Sets.newHashSet(all);
+ } else {
+ Iterables.addAll(fields, all);
+ }
+ }
+ if (returnFields.wantsScore()) {
+ fields.add("score");
+ } else {
+ fields.remove("score");
+ }
+ returnOnlyStored = true;
+ }
+
+ for (String field : fields) {
+ if (!returnFields.wantsField(field)) {
+ continue;
+ }
+ if (field.equals("score")) {
+ XLField xlField = new XLField();
+ xlField.name = "score";
+ xlFields.put("score", xlField);
+ continue;
+ }
+
+ SchemaField sf = schema.getFieldOrNull(field);
+ if (sf == null) {
+ FieldType ft = new StrField();
+ sf = new SchemaField(field, ft);
+ }
+
+ // Return only stored fields, unless an explicit field list is specified
+ if (returnOnlyStored && sf != null && !sf.stored()) {
+ continue;
+ }
+
+ XLField xlField = new XLField();
+ xlField.name = field;
+ xlField.sf = sf;
+ xlFields.put(field, xlField);
+ }
+
+
+
+ wb.addRow();
+ //write header
+ for (XLField xlField : xlFields.values()) {
+ String printName = xlField.name;
+ int colWidth = 14;
+
+ String niceName = colNamesMap.get(xlField.name);
+ if (niceName != null) {
+ printName = niceName;
+ }
+
+ Integer niceWidth = colWidthsMap.get(xlField.name);
+ if (niceWidth != null) {
+ colWidth = niceWidth.intValue();
+ }
+
+ writeStr(xlField.name, printName, false);
+ wb.setColWidth(colWidth);
+ wb.setHeaderCell();
+ }
+ wb.setHeaderRow();
+ wb.addRow();
+
+ if (responseObj instanceof ResultContext) {
+ writeDocuments(null, (ResultContext)responseObj );
+ }
+ else if (responseObj instanceof DocList) {
+ ResultContext ctx = new BasicResultContext((DocList)responseObj, returnFields, null, null, req);
+ writeDocuments(null, ctx );
+ } else if (responseObj instanceof SolrDocumentList) {
+ writeSolrDocumentList(null, (SolrDocumentList)responseObj, returnFields );
+ }
+
+ wb.flush(out);
+ wb = null;
+ }
+
+ @Override
+ public void close() throws IOException {
+ super.close();
+ }
+
+ @Override
+ public void writeNamedList(String name, NamedList val) throws IOException {
+ }
+
+ @Override
+ public void writeStartDocumentList(String name,
+ long start, int size, long numFound, Float maxScore) throws IOException
+ {
+ // nothing
+ }
+
+ @Override
+ public void writeEndDocumentList() throws IOException
+ {
+ // nothing
+ }
+
+ //NOTE: a document cannot currently contain another document
+ List tmpList;
+ @Override
+ public void writeSolrDocument(String name, SolrDocument doc, ReturnFields returnFields, int idx ) throws IOException {
+ if (tmpList == null) {
+ tmpList = new ArrayList(1);
+ tmpList.add(null);
+ }
+
+ for (XLField xlField : xlFields.values()) {
+ Object val = doc.getFieldValue(xlField.name);
+ int nVals = val instanceof Collection ? ((Collection)val).size() : (val==null ? 0 : 1);
+ if (nVals == 0) {
+ writeNull(xlField.name);
+ continue;
+ }
+
+ if ((xlField.sf != null && xlField.sf.multiValued()) || nVals > 1) {
+ Collection values;
+ // normalize to a collection
+ if (val instanceof Collection) {
+ values = (Collection)val;
+ } else {
+ tmpList.set(0, val);
+ values = tmpList;
+ }
+
+ writeArray(xlField.name, values.iterator());
+
+ } else {
+ // normalize to first value
+ if (val instanceof Collection) {
+ Collection values = (Collection)val;
+ val = values.iterator().next();
+ }
+ writeVal(xlField.name, val);
+ }
+ }
+ wb.addRow();
+ }
+
+ @Override
+ public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
+ wb.writeCell(val);
+ }
+
+ @Override
+ public void writeMap(String name, Map val, boolean excludeOuter, boolean isFirstVal) throws IOException {
+ }
+
+ @Override
+ public void writeArray(String name, Iterator val) throws IOException {
+ StringBuffer output = new StringBuffer();
+ while (val.hasNext()) {
+ Object v = val.next();
+ if (v instanceof IndexableField) {
+ IndexableField f = (IndexableField)v;
+ if (v instanceof Date) {
+ output.append(((Date) val).toInstant().toString() + "; ");
+ } else {
+ output.append(f.stringValue() + "; ");
+ }
+ } else {
+ output.append(v.toString() + "; ");
+ }
+ }
+ if (output.length() > 0) {
+ output.deleteCharAt(output.length()-1);
+ output.deleteCharAt(output.length()-1);
+ }
+ writeStr(name, output.toString(), false);
+ }
+
+ @Override
+ public void writeNull(String name) throws IOException {
+ wb.writeCell("");
+ }
+
+ @Override
+ public void writeInt(String name, String val) throws IOException {
+ wb.writeCell(val);
+ }
+
+ @Override
+ public void writeLong(String name, String val) throws IOException {
+ wb.writeCell(val);
+ }
+
+ @Override
+ public void writeBool(String name, String val) throws IOException {
+ wb.writeCell(val);
+ }
+
+ @Override
+ public void writeFloat(String name, String val) throws IOException {
+ wb.writeCell(val);
+ }
+
+ @Override
+ public void writeDouble(String name, String val) throws IOException {
+ wb.writeCell(val);
+ }
+
+ @Override
+ public void writeDate(String name, Date val) throws IOException {
+ writeDate(name, val.toInstant().toString());
+ }
+
+ @Override
+ public void writeDate(String name, String val) throws IOException {
+ wb.writeCell(val);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a61fb68/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
index 35d123f..bd9adbe 100644
--- a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
+++ b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
@@ -415,6 +415,7 @@
-->
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
+ <dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
@@ -422,6 +423,7 @@
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
+ <dynamicField name="*_dt1" type="date" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="*_sI" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a61fb68/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/TestXLSXResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/TestXLSXResponseWriter.java b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/TestXLSXResponseWriter.java
new file mode 100644
index 0000000..fd4e63d
--- /dev/null
+++ b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/TestXLSXResponseWriter.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.extraction;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.time.Instant;
+import java.util.Date;
+
+import org.apache.poi.ss.usermodel.Cell;
+import org.apache.poi.ss.usermodel.Row;
+import org.apache.poi.xssf.usermodel.XSSFWorkbook;
+import org.apache.poi.xssf.usermodel.XSSFSheet;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.QueryResponseWriter;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.response.RawResponseWriter;
+import org.apache.solr.search.SolrReturnFields;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestXLSXResponseWriter extends SolrTestCaseJ4 {
+
+ private static XLSXResponseWriter writerXlsx;
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ System.setProperty("enable.update.log", "false");
+ initCore("solrconfig.xml","schema.xml",getFile("extraction/solr").getAbsolutePath());
+ createIndex();
+ //find a reference to the default response writer so we can redirect its output later
+ SolrCore testCore = h.getCore();
+ QueryResponseWriter writer = testCore.getQueryResponseWriter("xlsx");
+ if (writer instanceof XLSXResponseWriter) {
+ writerXlsx = (XLSXResponseWriter) testCore.getQueryResponseWriter("xlsx");
+ } else {
+ throw new Exception("XLSXResponseWriter not registered with solr core");
+ }
+ }
+
+ public static void createIndex() {
+ assertU(adoc("id","1", "foo_i","-1", "foo_s","hi", "foo_l","12345678987654321", "foo_b","false", "foo_f","1.414","foo_d","-1.0E300","foo_dt1","2000-01-02T03:04:05Z"));
+ assertU(adoc("id","2", "v_ss","hi", "v_ss","there", "v2_ss","nice", "v2_ss","output", "shouldbeunstored","foo"));
+ assertU(adoc("id","3", "shouldbeunstored","foo"));
+ assertU(adoc("id","4", "foo_s1","foo"));
+ assertU(commit());
+ }
+
+ @AfterClass
+ public static void cleanupWriter() throws Exception {
+ writerXlsx = null;
+ }
+
+ @Test
+ public void testStructuredDataViaBaseWriters() throws IOException, Exception {
+ SolrQueryResponse rsp = new SolrQueryResponse();
+ // Don't send a ContentStream back, this will fall back to the configured base writer.
+ // But abuse the CONTENT key to ensure writer is also checking type
+ rsp.add(RawResponseWriter.CONTENT, "test");
+ rsp.add("foo", "bar");
+
+ SolrQueryRequest r = req();
+
+ // check Content-Type
+ assertEquals("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", writerXlsx.getContentType(r, rsp));
+
+ // test our basic types,and that fields come back in the requested order
+ XSSFSheet resultSheet = getWSResultForQuery(req("q","id:1", "wt","xlsx", "fl","id,foo_s,foo_i,foo_l,foo_b,foo_f,foo_d,foo_dt1"));
+
+ assertEquals("id,foo_s,foo_i,foo_l,foo_b,foo_f,foo_d,foo_dt1\n1,hi,-1,12345678987654321,F,1.414,-1.0E300,2000-01-02T03:04:05Z\n"
+ , getStringFromSheet(resultSheet));
+
+ resultSheet = getWSResultForQuery(req("q","id:1^0", "wt","xlsx", "fl","id,score,foo_s"));
+ // test retrieving score
+ assertEquals("id,score,foo_s\n1,0.0,hi\n", getStringFromSheet(resultSheet));
+
+ resultSheet = getWSResultForQuery(req("q","id:1^0", "wt","xlsx", "colname.id", "I.D.", "colwidth.id", "10",
+ "fl","id,score,foo_s"));
+ // test override colname/width
+ assertEquals("I.D.,score,foo_s\n1,0.0,hi\n", getStringFromSheet(resultSheet));
+ // test colwidth (value returned is in 256ths of a character as per excel standard)
+ assertEquals(10*256, resultSheet.getColumnWidth(0));
+
+ resultSheet = getWSResultForQuery(req("q","id:2", "wt","xlsx", "fl","id,v_ss"));
+ // test multivalued
+ assertEquals("id,v_ss\n2,hi; there\n", getStringFromSheet(resultSheet));
+
+ // test retrieving fields from index
+ resultSheet = getWSResultForQuery(req("q","*:*", "wt","xslx", "fl","*,score"));
+ String result = getStringFromSheet(resultSheet);
+ for (String field : "id,foo_s,foo_i,foo_l,foo_b,foo_f,foo_d,foo_dt1,v_ss,v2_ss,score".split(",")) {
+ assertTrue(result.indexOf(field) >= 0);
+ }
+
+ // test null values
+ resultSheet = getWSResultForQuery(req("q","id:2", "wt","xlsx", "fl","id,foo_s,v_ss"));
+ assertEquals("id,foo_s,v_ss\n2,,hi; there\n", getStringFromSheet(resultSheet));
+
+ // now test SolrDocumentList
+ SolrDocument d = new SolrDocument();
+ SolrDocument d1 = d;
+ d.addField("id","1");
+ d.addField("foo_i",-1);
+ d.addField("foo_s","hi");
+ d.addField("foo_l","12345678987654321L");
+ d.addField("foo_b",false);
+ d.addField("foo_f",1.414f);
+ d.addField("foo_d",-1.0E300);
+ d.addField("foo_dt1", new Date(Instant.parse("2000-01-02T03:04:05Z").toEpochMilli()));
+ d.addField("score", "2.718");
+
+ d = new SolrDocument();
+ SolrDocument d2 = d;
+ d.addField("id","2");
+ d.addField("v_ss","hi");
+ d.addField("v_ss","there");
+ d.addField("v2_ss","nice");
+ d.addField("v2_ss","output");
+ d.addField("score", "89.83");
+ d.addField("shouldbeunstored","foo");
+
+ SolrDocumentList sdl = new SolrDocumentList();
+ sdl.add(d1);
+ sdl.add(d2);
+
+ SolrQueryRequest req = req("q","*:*");
+ rsp = new SolrQueryResponse();
+ rsp.addResponse(sdl);
+
+ rsp.setReturnFields( new SolrReturnFields("id,foo_s", req) );
+
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("id,foo_s\n1,hi\n2,\n", getStringFromSheet(resultSheet));
+
+ // try scores
+ rsp.setReturnFields( new SolrReturnFields("id,score,foo_s", req) );
+
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("id,score,foo_s\n1,2.718,hi\n2,89.83,\n", getStringFromSheet(resultSheet));
+
+ // get field values from docs... should be ordered and not include score unless requested
+ rsp.setReturnFields( new SolrReturnFields("*", req) );
+
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("id,foo_i,foo_s,foo_l,foo_b,foo_f,foo_d,foo_dt1,v_ss,v2_ss\n" +
+ "1,-1,hi,12345678987654321L,false,1.414,-1.0E300,2000-01-02T03:04:05Z,,\n" +
+ "2,,,,,,,,hi; there,nice; output\n", getStringFromSheet(resultSheet));
+
+ // get field values and scores - just check that the scores are there... we don't guarantee where
+ rsp.setReturnFields( new SolrReturnFields("*,score", req) );
+ resultSheet = getWSResultForQuery(req, rsp);
+ String s = getStringFromSheet(resultSheet);
+ assertTrue(s.indexOf("score") >=0 && s.indexOf("2.718") > 0 && s.indexOf("89.83") > 0 );
+
+ // Test field globs
+ rsp.setReturnFields( new SolrReturnFields("id,foo*", req) );
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("id,foo_i,foo_s,foo_l,foo_b,foo_f,foo_d,foo_dt1\n" +
+ "1,-1,hi,12345678987654321L,false,1.414,-1.0E300,2000-01-02T03:04:05Z\n" +
+ "2,,,,,,,\n", getStringFromSheet(resultSheet));
+
+ rsp.setReturnFields( new SolrReturnFields("id,*_d*", req) );
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("id,foo_d,foo_dt1\n" +
+ "1,-1.0E300,2000-01-02T03:04:05Z\n" +
+ "2,,\n", getStringFromSheet(resultSheet));
+
+ // Test function queries
+ rsp.setReturnFields( new SolrReturnFields("sum(1,1),id,exists(foo_s1),div(9,1),foo_f", req) );
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("sum(1,1),id,exists(foo_s1),div(9,1),foo_f\n" +
+ ",1,,,1.414\n" +
+ ",2,,,\n", getStringFromSheet(resultSheet));
+
+ // Test transformers
+ rsp.setReturnFields( new SolrReturnFields("mydocid:[docid],[explain]", req) );
+ resultSheet = getWSResultForQuery(req, rsp);
+ assertEquals("mydocid,[explain]\n" +
+ ",\n" +
+ ",\n", getStringFromSheet(resultSheet));
+
+ req.close();
+ }
+
+
+ @Test
+ public void testPseudoFields() throws Exception {
+ // Use Pseudo Field
+ SolrQueryRequest req = req("q","id:1", "wt","xlsx", "fl","XXX:id,foo_s");
+ XSSFSheet resultSheet = getWSResultForQuery(req);
+ assertEquals("XXX,foo_s\n1,hi\n", getStringFromSheet(resultSheet));
+
+ String txt = getStringFromSheet(getWSResultForQuery(req("q","id:1", "wt","xlsx", "fl","XXX:id,YYY:[docid],FOO:foo_s")));
+ String[] lines = txt.split("\n");
+ assertEquals(2, lines.length);
+ assertEquals("XXX,YYY,FOO", lines[0] );
+ assertEquals("1,0,hi", lines[1] );
+
+ //assertions specific to multiple pseudofields functions like abs, div, exists, etc.. (SOLR-5423)
+ String funcText = getStringFromSheet(getWSResultForQuery(req("q","*", "wt","xlsx", "fl","XXX:id,YYY:exists(foo_s1)")));
+ String[] funcLines = funcText.split("\n");
+ assertEquals(5, funcLines.length);
+ assertEquals("XXX,YYY", funcLines[0] );
+ assertEquals("1,false", funcLines[1] );
+ assertEquals("3,false", funcLines[3] );
+ }
+
+ // returns first worksheet as XLSXResponseWriter only returns one sheet
+ private XSSFSheet getWSResultForQuery(SolrQueryRequest req) throws IOException, Exception {
+ SolrQueryResponse rsp = h.queryAndResponse("standard", req);
+ return getWSResultForQuery(req, rsp);
+ }
+
+ private XSSFSheet getWSResultForQuery(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, Exception {
+ ByteArrayOutputStream xmlBout = new ByteArrayOutputStream();
+ writerXlsx.write(xmlBout, req, rsp);
+ XSSFWorkbook output = new XSSFWorkbook(new ByteArrayInputStream(xmlBout.toByteArray()));
+ XSSFSheet sheet = output.getSheetAt(0);
+ req.close();
+ output.close();
+ return sheet;
+ }
+
+ private String getStringFromSheet(XSSFSheet sheet) {
+ StringBuilder output = new StringBuilder();
+ for (Row row: sheet) {
+ for (Cell cell: row) {
+ output.append(cell.getStringCellValue());
+ output.append(",");
+ }
+ output.setLength(output.length() - 1);
+ output.append("\n");
+ }
+ return output.toString();
+ }
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a61fb68/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 90bcd34..c837fba 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -2215,6 +2215,12 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
m.put("smile", new SmileResponseWriter());
m.put(ReplicationHandler.FILE_STREAM, getFileStreamWriter());
DEFAULT_RESPONSE_WRITERS = Collections.unmodifiableMap(m);
+ try {
+ m.put("xlsx",
+ (QueryResponseWriter) Class.forName("org.apache.solr.handler.extraction.XLSXResponseWriter").newInstance());
+ } catch (Exception e) {
+ //don't worry; solrcell contrib not in class path
+ }
}
private static BinaryResponseWriter getFileStreamWriter() {
@@ -2237,7 +2243,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
}
public interface RawWriter {
- public void write(OutputStream os) throws IOException ;
+ void write(OutputStream os) throws IOException ;
}
/** Configure the query response writers. There will always be a default writer; additional
[06/50] [abbrv] lucene-solr:apiv2: simplify test to use
CannedTokenStream
Posted by no...@apache.org.
simplify test to use CannedTokenStream
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/13acba8b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/13acba8b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/13acba8b
Branch: refs/heads/apiv2
Commit: 13acba8b4e712222a35f61412ed5b1f45cbec3ee
Parents: f6253d5
Author: Robert Muir <rm...@apache.org>
Authored: Thu Aug 25 12:07:53 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Thu Aug 25 12:07:53 2016 -0400
----------------------------------------------------------------------
.../analysis/miscellaneous/TestTrimFilter.java | 46 +-------------------
1 file changed, 2 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/13acba8b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
index 75a6e74..380f52a 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
@@ -20,17 +20,12 @@ import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
/**
*/
@@ -43,7 +38,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase {
char[] whitespace = " ".toCharArray();
char[] empty = "".toCharArray();
- TokenStream ts = new IterTokenStream(new Token(new String(a, 0, a.length), 1, 5),
+ TokenStream ts = new CannedTokenStream(new Token(new String(a, 0, a.length), 1, 5),
new Token(new String(b, 0, b.length), 6, 10),
new Token(new String(ccc, 0, ccc.length), 11, 15),
new Token(new String(whitespace, 0, whitespace.length), 16, 20),
@@ -53,43 +48,6 @@ public class TestTrimFilter extends BaseTokenStreamTestCase {
assertTokenStreamContents(ts, new String[] { "a", "b", "cCc", "", ""});
}
- /**
- * @deprecated (3.0) does not support custom attributes
- */
- @Deprecated
- private static class IterTokenStream extends TokenStream {
- final Token tokens[];
- int index = 0;
- CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
- OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
- PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
- FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
- TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
- PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
-
- public IterTokenStream(Token... tokens) {
- super();
- this.tokens = tokens;
- }
-
- @Override
- public boolean incrementToken() throws IOException {
- if (index >= tokens.length)
- return false;
- else {
- clearAttributes();
- Token token = tokens[index++];
- termAtt.setEmpty().append(token);
- offsetAtt.setOffset(token.startOffset(), token.endOffset());
- posIncAtt.setPositionIncrement(token.getPositionIncrement());
- flagsAtt.setFlags(token.getFlags());
- typeAtt.setType(token.type());
- payloadAtt.setPayload(token.getPayload());
- return true;
- }
- }
- }
-
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Analyzer a = new Analyzer() {