You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sh...@apache.org on 2016/09/12 09:33:38 UTC
[01/15] lucene-solr:branch_6_2: SOLR-6744: Consider uniqueKey rename
when handling shard responses in distributed search
Repository: lucene-solr
Updated Branches:
refs/heads/branch_6_2 272ceb7c0 -> b34f9b6fe
SOLR-6744: Consider uniqueKey rename when handling shard responses in distributed search
(cherry picked from commit 43d0343)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/11f2f006
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/11f2f006
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/11f2f006
Branch: refs/heads/branch_6_2
Commit: 11f2f0066ed770e5f938cb5c5d714c6e3c3029c3
Parents: 272ceb7
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Sat Aug 27 04:40:48 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 09:57:27 2016 +0530
----------------------------------------------------------------------
.../solr/handler/component/QueryComponent.java | 5 ++++-
.../java/org/apache/solr/search/ReturnFields.java | 7 +++++++
.../org/apache/solr/search/SolrReturnFields.java | 11 +++++++++++
.../DistributedQueryComponentCustomSortTest.java | 15 +++++++++++++--
4 files changed, 35 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/11f2f006/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index 722241a..fe4bceb 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -1306,6 +1306,10 @@ public class QueryComponent extends SearchComponent
String keyFieldName = rb.req.getSchema().getUniqueKeyField().getName();
boolean removeKeyField = !rb.rsp.getReturnFields().wantsField(keyFieldName);
+ if (rb.rsp.getReturnFields().getFieldRenames().get(keyFieldName) != null) {
+ // if id was renamed we need to use the new name
+ keyFieldName = rb.rsp.getReturnFields().getFieldRenames().get(keyFieldName);
+ }
for (ShardResponse srsp : sreq.responses) {
if (srsp.getException() != null) {
@@ -1331,7 +1335,6 @@ public class QueryComponent extends SearchComponent
continue;
}
SolrDocumentList docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
-
for (SolrDocument doc : docs) {
Object id = doc.getFieldValue(keyFieldName);
ShardDoc sdoc = rb.resultIds.get(id.toString());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/11f2f006/solr/core/src/java/org/apache/solr/search/ReturnFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ReturnFields.java b/solr/core/src/java/org/apache/solr/search/ReturnFields.java
index dabfdd6..ec2b878 100644
--- a/solr/core/src/java/org/apache/solr/search/ReturnFields.java
+++ b/solr/core/src/java/org/apache/solr/search/ReturnFields.java
@@ -16,6 +16,7 @@
*/
package org.apache.solr.search;
+import java.util.Map;
import java.util.Set;
import org.apache.solr.response.transform.DocTransformer;
@@ -53,6 +54,12 @@ public abstract class ReturnFields {
*/
public abstract Set<String> getRequestedFieldNames();
+ /**
+ * Get the fields which have been renamed
+ * @return a mapping of renamed fields
+ */
+ public abstract Map<String,String> getFieldRenames();
+
/** Returns <code>true</code> if the specified field should be returned. */
public abstract boolean wantsField(String name);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/11f2f006/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
index 6382f45..2b1b303 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
@@ -35,9 +35,11 @@ import org.apache.solr.response.transform.TransformerFactory;
import org.apache.solr.response.transform.ValueSourceAugmenter;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
/**
@@ -64,6 +66,7 @@ public class SolrReturnFields extends ReturnFields {
protected DocTransformer transformer;
protected boolean _wantsScore = false;
protected boolean _wantsAllFields = false;
+ protected Map<String,String> renameFields = Collections.emptyMap();
public SolrReturnFields() {
_wantsAllFields = true;
@@ -129,6 +132,9 @@ public class SolrReturnFields extends ReturnFields {
}
augmenters.addTransformer( new RenameFieldTransformer( from, to, copy ) );
}
+ if (rename.size() > 0 ) {
+ renameFields = rename.asShallowMap();
+ }
if( !_wantsAllFields && !globs.isEmpty() ) {
// TODO??? need to fill up the fields with matching field names in the index
// and add them to okFieldNames?
@@ -145,6 +151,11 @@ public class SolrReturnFields extends ReturnFields {
}
}
+ @Override
+ public Map<String,String> getFieldRenames() {
+ return renameFields;
+ }
+
// like getId, but also accepts dashes for legacy fields
public static String getFieldName(StrParser sp) {
sp.eatws();
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/11f2f006/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
index 9e9401b..4b3e92a 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java
@@ -60,12 +60,19 @@ public class DistributedQueryComponentCustomSortTest extends BaseDistributedSear
index(id, "12", "text", "d", "payload", ByteBuffer.wrap(new byte[] { 0x34, (byte)0xdd, 0x4d })); // 7
index(id, "13", "text", "d", "payload", ByteBuffer.wrap(new byte[] { (byte)0x80, 0x11, 0x33 })); // 12
commit();
-
+
QueryResponse rsp;
+
rsp = query("q", "*:*", "fl", "id", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
rsp = query("q", "*:*", "fl", "id", "sort", "payload desc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 11, 13, 8, 9, 5, 3, 12, 10, 2, 4, 6, 1, 7);
+
+ // SOLR-6744
+ rsp = query("q", "*:*", "fl", "key:id", "sort", "payload asc", "rows", "20");
+ assertFieldValues(rsp.getResults(), "key", 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
+ rsp = query("q", "*:*", "fl", "key:id,id:text", "sort", "payload asc", "rows", "20");
+ assertFieldValues(rsp.getResults(), "key", 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
rsp = query("q", "text:a", "fl", "id", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 1, 3, 5, 9);
@@ -76,7 +83,11 @@ public class DistributedQueryComponentCustomSortTest extends BaseDistributedSear
assertFieldValues(rsp.getResults(), id, 4, 2, 10);
rsp = query("q", "text:b", "fl", "id", "sort", "payload desc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 10, 2, 4);
-
+
+ // SOLR-6744
+ rsp = query("q", "text:b", "fl", "key:id", "sort", "payload asc", "rows", "20");
+ assertFieldValues(rsp.getResults(), id, null, null, null);
+
rsp = query("q", "text:c", "fl", "id", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 7, 6, 8);
rsp = query("q", "text:c", "fl", "id", "sort", "payload desc", "rows", "20");
[11/15] lucene-solr:branch_6_2: SOLR-9381: Snitch for freedisk uses
'/' instead of 'coreRootDirectory'
Posted by sh...@apache.org.
SOLR-9381: Snitch for freedisk uses '/' instead of 'coreRootDirectory'
(cherry picked from commit ced96c5)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f4673ca4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f4673ca4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f4673ca4
Branch: refs/heads/branch_6_2
Commit: f4673ca4763964950eaedf2e172f2f5015669da0
Parents: 9d3a2a0
Author: Noble Paul <no...@apache.org>
Authored: Fri Sep 2 17:53:02 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:10:34 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/rule/ImplicitSnitch.java | 10 +++++-----
.../src/test/org/apache/solr/cloud/rule/RulesTest.java | 7 ++++---
3 files changed, 11 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f4673ca4/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 8028b01..cb04779 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -46,6 +46,8 @@ Bug Fixes
* SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands (shalin, noble)
* SOLR-9444: Fix path usage for cloud backup/restore. (Hrishikesh Gadre, Uwe Schindler, Varun Thacker)
+
+* SOLR-9381: Snitch for freedisk uses '/' instead of 'coreRootDirectory' (Tim Owen, noble)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f4673ca4/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
index ac1d7ad..09f8e2c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
+++ b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
@@ -20,7 +20,7 @@ import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.InetAddress;
import java.nio.file.Files;
-import java.nio.file.Paths;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -100,21 +100,21 @@ public class ImplicitSnitch extends Snitch implements CoreAdminHandler.Invocable
}
}
- static long getUsableSpaceInGB() throws IOException {
- long space = Files.getFileStore(Paths.get("/")).getUsableSpace();
+ static long getUsableSpaceInGB(Path path) throws IOException {
+ long space = Files.getFileStore(path).getUsableSpace();
long spaceInGB = space / 1024 / 1024 / 1024;
return spaceInGB;
}
public Map<String, Object> invoke(SolrQueryRequest req) {
Map<String, Object> result = new HashMap<>();
+ CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName());
if (req.getParams().getInt(CORES, -1) == 1) {
- CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName());
result.put(CORES, cc.getCoreNames().size());
}
if (req.getParams().getInt(DISK, -1) == 1) {
try {
- final long spaceInGB = getUsableSpaceInGB();
+ final long spaceInGB = getUsableSpaceInGB(cc.getCoreRootDirectory());
result.put(DISK, spaceInGB);
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f4673ca4/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java b/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
index f23d475..83f02b1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/rule/RulesTest.java
@@ -18,6 +18,7 @@ package org.apache.solr.cloud.rule;
import java.lang.invoke.MethodHandles;
+import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
@@ -52,7 +53,7 @@ public class RulesTest extends AbstractFullDistribZkTestBase {
@ShardsFixed(num = 5)
public void doIntegrationTest() throws Exception {
final long minGB = (random().nextBoolean() ? 1 : 0);
- assumeTrue("doIntegrationTest needs minGB="+minGB+" usable disk space", ImplicitSnitch.getUsableSpaceInGB() > minGB);
+ assumeTrue("doIntegrationTest needs minGB="+minGB+" usable disk space", ImplicitSnitch.getUsableSpaceInGB(Paths.get("/")) > minGB);
String rulesColl = "rulesColl";
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
CollectionAdminResponse rsp;
@@ -208,8 +209,8 @@ public class RulesTest extends AbstractFullDistribZkTestBase {
public void testModifyColl() throws Exception {
final long minGB1 = (random().nextBoolean() ? 1 : 0);
final long minGB2 = 5;
- assumeTrue("testModifyColl needs minGB1="+minGB1+" usable disk space", ImplicitSnitch.getUsableSpaceInGB() > minGB1);
- assumeTrue("testModifyColl needs minGB2="+minGB2+" usable disk space", ImplicitSnitch.getUsableSpaceInGB() > minGB2);
+ assumeTrue("testModifyColl needs minGB1="+minGB1+" usable disk space", ImplicitSnitch.getUsableSpaceInGB(Paths.get("/")) > minGB1);
+ assumeTrue("testModifyColl needs minGB2="+minGB2+" usable disk space", ImplicitSnitch.getUsableSpaceInGB(Paths.get("/")) > minGB2);
String rulesColl = "modifyColl";
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
CollectionAdminResponse rsp;
[07/15] lucene-solr:branch_6_2: SOLR-9188: blockUnknown property
makes inter-node communication impossible
Posted by sh...@apache.org.
SOLR-9188: blockUnknown property makes inter-node communication impossible
(cherry picked from commit b3526c5)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/988c2149
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/988c2149
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/988c2149
Branch: refs/heads/branch_6_2
Commit: 988c2149802285a9be9f8036bf803ca610e27cad
Parents: 83cd8c1
Author: Noble Paul <no...@apache.org>
Authored: Mon Aug 29 18:38:56 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:06:04 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../org/apache/solr/security/BasicAuthPlugin.java | 1 +
.../org/apache/solr/servlet/SolrDispatchFilter.java | 1 +
.../solr/security/BasicAuthIntegrationTest.java | 15 ++++++++++++---
4 files changed, 16 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/988c2149/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 6d99b02..4856048 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -38,6 +38,8 @@ Bug Fixes
compatibility. (Uwe Schindler, Boris Steiner)
* SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers. (Tim Owen via Mark Miller)
+
+* SOLR-9188: blockUnknown property makes inter-node communication impossible (noble)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/988c2149/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
index e3f53a2..49c02d7 100644
--- a/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/BasicAuthPlugin.java
@@ -71,6 +71,7 @@ public class BasicAuthPlugin extends AuthenticationPlugin implements ConfigEdita
for (Map.Entry<String, Object> e : command.getDataMap().entrySet()) {
if (PROPS.contains(e.getKey())) {
latestConf.put(e.getKey(), e.getValue());
+ return latestConf;
} else {
command.addError("Unknown property " + e.getKey());
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/988c2149/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index 4a680e5..1b8c386 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -302,6 +302,7 @@ public class SolrDispatchFilter extends BaseSolrFilter {
if (authenticationPlugin == null) {
return true;
} else {
+ if( PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest) request).getPathInfo()) ) return true;
//special case when solr is securing inter-node requests
String header = ((HttpServletRequest) request).getHeader(PKIAuthenticationPlugin.HEADER);
if (header != null && cores.getPkiAuthenticationPlugin() != null)
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/988c2149/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java b/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
index 5cb322f..f0d8b60 100644
--- a/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
@@ -194,7 +194,7 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
cloudSolrClient.request(update);
- executeCommand(baseUrl + authzPrefix, cl, "{set-property : { blockUnknown: true}}", "harry", "HarryIsUberCool");
+ executeCommand(baseUrl + authcPrefix, cl, "{set-property : { blockUnknown: true}}", "harry", "HarryIsUberCool");
String[] toolArgs = new String[]{
"status", "-solr", baseUrl};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -212,7 +212,7 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
log.error("RunExampleTool failed due to: " + e +
"; stdout from tool prior to failure: " + baos.toString(StandardCharsets.UTF_8.name()));
}
- executeCommand(baseUrl + authzPrefix, cl, "{set-property : { blockUnknown: false}}", "harry", "HarryIsUberCool");
+ executeCommand(baseUrl + authcPrefix, cl, "{set-property : { blockUnknown: false}}", "harry", "HarryIsUberCool");
} finally {
if (cl != null) {
HttpClientUtil.close(cl);
@@ -232,12 +232,21 @@ public class BasicAuthIntegrationTest extends TestMiniSolrCloudClusterBase {
Utils.consumeFully(r.getEntity());
}
- public static void verifySecurityStatus(HttpClient cl, String url, String objPath, Object expected, int count) throws Exception {
+ public static void verifySecurityStatus(HttpClient cl, String url, String objPath,
+ Object expected, int count) throws Exception {
+ verifySecurityStatus(cl, url, objPath, expected, count, null, null);
+ }
+
+
+ public static void verifySecurityStatus(HttpClient cl, String url, String objPath,
+ Object expected, int count, String user, String pwd)
+ throws Exception {
boolean success = false;
String s = null;
List<String> hierarchy = StrUtils.splitSmart(objPath, '/');
for (int i = 0; i < count; i++) {
HttpGet get = new HttpGet(url);
+ if (user != null) setBasicAuthHeader(get, user, pwd);
HttpResponse rsp = cl.execute(get);
s = EntityUtils.toString(rsp.getEntity());
Map m = (Map) Utils.fromJSONString(s);
[12/15] lucene-solr:branch_6_2: SOLR-9488: Shard split can fail to
write commit data on shutdown/restart causing replicas to recover without
replicating the index (cherry picked from commit 0c5c0df)
Posted by sh...@apache.org.
SOLR-9488: Shard split can fail to write commit data on shutdown/restart causing replicas to recover without replicating the index
(cherry picked from commit 0c5c0df)
(cherry picked from commit 7370407)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0af3d4e8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0af3d4e8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0af3d4e8
Branch: refs/heads/branch_6_2
Commit: 0af3d4e8e6ff1fa38d59739f30cfffea65a8033c
Parents: f4673ca
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Sep 7 21:06:50 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:11:20 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +
.../org/apache/solr/handler/IndexFetcher.java | 6 +-
.../solr/update/DirectUpdateHandler2.java | 3 +
.../apache/solr/update/SolrIndexSplitter.java | 16 +-
.../org/apache/solr/cloud/ShardSplitTest.java | 152 +++++++++++++++++++
5 files changed, 177 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0af3d4e8/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index cb04779..64a0b9d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -48,6 +48,9 @@ Bug Fixes
* SOLR-9444: Fix path usage for cloud backup/restore. (Hrishikesh Gadre, Uwe Schindler, Varun Thacker)
* SOLR-9381: Snitch for freedisk uses '/' instead of 'coreRootDirectory' (Tim Owen, noble)
+
+* SOLR-9488: Shard split can fail to write commit data on shutdown/restart causing replicas to recover
+ without replicating the index. This can cause data loss. (shalin)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0af3d4e8/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index 634a9e0..c66b3ab 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -294,6 +294,9 @@ public class IndexFetcher {
long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
long latestGeneration = (Long) response.get(GENERATION);
+ LOG.info("Master's generation: " + latestGeneration);
+ LOG.info("Master's version: " + latestVersion);
+
// TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
if (commit == null) {
@@ -312,6 +315,7 @@ public class IndexFetcher {
}
}
+ LOG.info("Slave's generation: " + commit.getGeneration());
if (latestVersion == 0L) {
if (forceReplication && commit.getGeneration() != 0) {
@@ -339,8 +343,6 @@ public class IndexFetcher {
successfulInstall = true;
return true;
}
- LOG.info("Master's generation: " + latestGeneration);
- LOG.info("Slave's generation: " + commit.getGeneration());
LOG.info("Starting replication process");
// get the list of files first
fetchFileList(latestGeneration);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0af3d4e8/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
index 0bdefa7..12552cd 100644
--- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
+++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
@@ -519,6 +519,9 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
@SuppressForbidden(reason = "Need currentTimeMillis, commit time should be used only for debugging purposes, " +
" but currently suspiciously used for replication as well")
+ /*
+ Also see SolrIndexSplitter.setCommitData
+ */
private void setCommitData(IndexWriter iw) {
final Map<String,String> commitData = new HashMap<>();
commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY,
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0af3d4e8/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
index 5f1ea0e..14e7063 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
@@ -19,7 +19,9 @@ package org.apache.solr.update;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import org.apache.lucene.index.CodecReader;
import org.apache.lucene.index.FilterCodecReader;
@@ -40,6 +42,7 @@ import org.apache.lucene.util.IOUtils;
import org.apache.solr.common.cloud.CompositeIdRouter;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.HashBasedRouter;
+import org.apache.solr.common.util.SuppressForbidden;
import org.apache.solr.core.SolrCore;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.BitsFilteredPostingsEnum;
@@ -134,6 +137,11 @@ public class SolrIndexSplitter {
CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
}
+ // we commit explicitly instead of sending a CommitUpdateCommand through the processor chain
+ // because the sub-shard cores will just ignore such a commit because the update log is not
+ // in active state at this time.
+ setCommitData(iw);
+ iw.commit();
success = true;
} finally {
if (iwRef != null) {
@@ -151,7 +159,13 @@ public class SolrIndexSplitter {
}
-
+ @SuppressForbidden(reason = "Need currentTimeMillis, commit time should be used only for debugging purposes, " +
+ " but currently suspiciously used for replication as well")
+ private void setCommitData(IndexWriter iw) {
+ final Map<String,String> commitData = new HashMap<>();
+ commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis()));
+ iw.setLiveCommitData(commitData.entrySet());
+ }
FixedBitSet[] split(LeafReaderContext readerContext) throws IOException {
LeafReader reader = readerContext.reader();
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0af3d4e8/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index 389660f..0000e7b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -25,6 +25,8 @@ import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.lucene.util.LuceneTestCase.Slow;
@@ -32,14 +34,17 @@ import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
import org.apache.solr.common.cloud.CompositeIdRouter;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
@@ -57,6 +62,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -72,6 +78,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
schemaString = "schema15.xml"; // we need a string id
}
+ @Override
+ public void distribSetUp() throws Exception {
+ super.distribSetUp();
+ useFactory(null);
+ }
+
@Test
public void test() throws Exception {
@@ -92,6 +104,146 @@ public class ShardSplitTest extends BasicDistributedZkTest {
//waitForThingsToLevelOut(15);
}
+ /*
+ Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
+ Add a replica. Ensure count matches in leader and replica.
+ */
+ public void testSplitStaticIndexReplication() throws Exception {
+ waitForThingsToLevelOut(15);
+
+ DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ Replica replica = defCol.getReplicas().get(0);
+ String nodeName = replica.getNodeName();
+
+ String collectionName = "testSplitStaticIndexReplication";
+ CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
+ create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
+ create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
+ create.process(cloudClient);
+ try (CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
+ client.setDefaultCollection(collectionName);
+ StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, client, "i1", true);
+ try {
+ thread.start();
+ Thread.sleep(1000); // give the indexer sometime to do its work
+ thread.safeStop();
+ thread.join();
+ client.commit();
+ controlClient.commit();
+
+ CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
+ splitShard.setShardName(SHARD1);
+ String asyncId = splitShard.processAsync(client);
+ RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
+ if (state == RequestStatusState.COMPLETED) {
+ waitForRecoveriesToFinish(collectionName, true);
+ // let's wait to see parent shard become inactive
+ CountDownLatch latch = new CountDownLatch(1);
+ client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
+ @Override
+ public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+ Slice parent = collectionState.getSlice(SHARD1);
+ Slice slice10 = collectionState.getSlice(SHARD1_0);
+ Slice slice11 = collectionState.getSlice(SHARD1_1);
+ if (slice10 != null && slice11 != null &&
+ parent.getState() == Slice.State.INACTIVE &&
+ slice10.getState() == Slice.State.ACTIVE &&
+ slice11.getState() == Slice.State.ACTIVE) {
+ latch.countDown();
+ return true; // removes the watch
+ }
+ return false;
+ }
+ });
+ latch.await(1, TimeUnit.MINUTES);
+ if (latch.getCount() != 0) {
+ // sanity check
+ fail("Sub-shards did not become active even after waiting for 1 minute");
+ }
+
+ int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
+
+ // restart the sub-shard leader node
+ boolean restarted = false;
+ for (JettySolrRunner jetty : jettys) {
+ int port = jetty.getBaseUrl().getPort();
+ if (replica.getStr(BASE_URL_PROP).contains(":" + port)) {
+ ChaosMonkey.kill(jetty);
+ ChaosMonkey.start(jetty);
+ restarted = true;
+ break;
+ }
+ }
+ if (!restarted) {
+ // sanity check
+ fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
+ }
+
+ // add a new replica for the sub-shard
+ CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collectionName, SHARD1_0);
+ // use control client because less chances of it being the node being restarted
+ // this is to avoid flakiness of test because of NoHttpResponseExceptions
+ String control_collection = client.getZkStateReader().getClusterState().getCollection("control_collection").getReplicas().get(0).getStr(BASE_URL_PROP);
+ try (HttpSolrClient control = new HttpSolrClient.Builder(control_collection).withHttpClient(client.getLbClient().getHttpClient()).build()) {
+ state = addReplica.processAndWait(control, 30);
+ }
+ if (state == RequestStatusState.COMPLETED) {
+ CountDownLatch newReplicaLatch = new CountDownLatch(1);
+ client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
+ @Override
+ public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+ if (liveNodes.size() != liveNodeCount) {
+ return false;
+ }
+ Slice slice = collectionState.getSlice(SHARD1_0);
+ if (slice.getReplicas().size() == 2) {
+ if (!slice.getReplicas().stream().anyMatch(r -> r.getState() == Replica.State.RECOVERING)) {
+ // we see replicas and none of them are recovering
+ newReplicaLatch.countDown();
+ return true;
+ }
+ }
+ return false;
+ }
+ });
+ newReplicaLatch.await(30, TimeUnit.SECONDS);
+ // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
+ // handle new shards/replica so well.
+ ClusterState clusterState = client.getZkStateReader().getClusterState();
+ DocCollection collection = clusterState.getCollection(collectionName);
+ int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
+ assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
+ } else {
+ fail("Adding a replica to sub-shard did not complete even after waiting for 30 seconds!. Saw state = " + state.getKey());
+ }
+ } else {
+ fail("We expected shard split to succeed on a static index but it didn't. Found state = " + state.getKey());
+ }
+ } finally {
+ thread.safeStop();
+ thread.join();
+ }
+ }
+ }
+
+ private int assertConsistentReplicas(Slice shard) throws SolrServerException, IOException {
+ long numFound = Long.MIN_VALUE;
+ int count = 0;
+ for (Replica replica : shard.getReplicas()) {
+ HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
+ .withHttpClient(cloudClient.getLbClient().getHttpClient()).build();
+ QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
+ log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(), replica.getCoreUrl());
+ if (numFound == Long.MIN_VALUE) {
+ numFound = response.getResults().getNumFound();
+ } else {
+ assertEquals("Shard " + shard.getName() + " replicas do not have same number of documents", numFound, response.getResults().getNumFound());
+ }
+ count++;
+ }
+ return count;
+ }
+
/**
* Used to test that we can split a shard when a previous split event
* left sub-shards in construction or recovery state.
[14/15] lucene-solr:branch_6_2: SOLR-9461: Remove unused imports
Posted by sh...@apache.org.
SOLR-9461: Remove unused imports
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e5677475
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e5677475
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e5677475
Branch: refs/heads/branch_6_2
Commit: e56774757ece0c11afea16b12b5014d383263f41
Parents: d5e75a4
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Mon Sep 12 13:34:44 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 13:34:44 2016 +0530
----------------------------------------------------------------------
solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java | 1 -
solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java | 3 ---
2 files changed, 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e5677475/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
index 0fd001a..afb95a2 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
@@ -27,7 +27,6 @@ import java.util.concurrent.TimeUnit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e5677475/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
index ad02fc0..92c9afe 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
@@ -34,10 +34,8 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -45,7 +43,6 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.util.StrUtils.formatString;
public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
[02/15] lucene-solr:branch_6_2: SOLR-6744: Consider uniqueKey rename
when handling shard responses in distributed search
Posted by sh...@apache.org.
SOLR-6744: Consider uniqueKey rename when handling shard responses in distributed search
(cherry picked from commit 43d0343)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/49993513
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/49993513
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/49993513
Branch: refs/heads/branch_6_2
Commit: 49993513a602b1072611120443a83a636972aaf1
Parents: 11f2f00
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Mon Sep 12 09:58:06 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 09:58:06 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/49993513/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2ba1bf2..12c5d2a 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -25,6 +25,8 @@ Bug Fixes
* SOLR-9494: Use of {!collapse} sometimes doesn't correctly return true for Collector.needsScores(), especially when the
query was cached. This can cause an exception when 'q' is a SpanQuery or potentially others. (David Smiley)
+* SOLR-6744: fl renaming / alias of uniqueKey field generates null pointer exception in SolrCloud configuration
+ (Mike Drob via Tom�s Fern�ndez L�bbe)
================== 6.2.0 ==================
[10/15] lucene-solr:branch_6_2: SOLR-9444: Fix path usage for cloud
backup/restore Merge branch 'SOLR-9444_fix' of
https://github.com/hgadre/lucene-solr This closes #74
Posted by sh...@apache.org.
SOLR-9444: Fix path usage for cloud backup/restore
Merge branch 'SOLR-9444_fix' of https://github.com/hgadre/lucene-solr
This closes #74
(cherry picked from commit 8347944)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9d3a2a08
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9d3a2a08
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9d3a2a08
Branch: refs/heads/branch_6_2
Commit: 9d3a2a0879be200cc3e4a4e3c2aa0f705ee08c12
Parents: f546ef8
Author: Uwe Schindler <us...@apache.org>
Authored: Fri Sep 2 17:45:09 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:09:48 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/BackupCmd.java | 6 ++--
.../java/org/apache/solr/cloud/RestoreCmd.java | 6 ++--
.../apache/solr/core/backup/BackupManager.java | 34 +++++++++---------
.../backup/repository/BackupRepository.java | 14 ++++++--
.../backup/repository/HdfsBackupRepository.java | 29 +++++++++++++---
.../repository/LocalFileSystemRepository.java | 36 ++++++++++++--------
.../apache/solr/handler/ReplicationHandler.java | 12 ++++---
.../org/apache/solr/handler/RestoreCore.java | 6 ++--
.../org/apache/solr/handler/SnapShooter.java | 11 +++---
.../solr/handler/admin/CoreAdminOperation.java | 7 ++--
.../cloud/TestLocalFSCloudBackupRestore.java | 10 +++++-
12 files changed, 114 insertions(+), 59 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 6d1bd96..8028b01 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -44,6 +44,8 @@ Bug Fixes
* SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive. (shalin)
* SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands (shalin, noble)
+
+* SOLR-9444: Fix path usage for cloud backup/restore. (Hrishikesh Gadre, Uwe Schindler, Varun Thacker)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
index 679cb07..648eee8 100644
--- a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
@@ -62,7 +62,6 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
String asyncId = message.getStr(ASYNC);
String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
- String location = message.getStr(CoreAdminParams.BACKUP_LOCATION);
Map<String, String> requestMap = new HashMap<>();
Instant startTime = Instant.now();
@@ -72,7 +71,8 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader, collectionName);
// Backup location
- URI backupPath = repository.createURI(location, backupName);
+ URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+ URI backupPath = repository.resolve(location, backupName);
//Validating if the directory already exists.
if (repository.exists(backupPath)) {
@@ -94,7 +94,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
params.set(NAME, slice.getName());
params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
- params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.getPath()); // note: index dir will be here then the "snapshot." + slice name
+ params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
params.set(CORE_NAME_PROP, coreName);
ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
index af2215c..63d5686 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
@@ -79,13 +79,13 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
String asyncId = message.getStr(ASYNC);
String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
- String location = message.getStr(CoreAdminParams.BACKUP_LOCATION);
Map<String, String> requestMap = new HashMap<>();
CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
- URI backupPath = repository.createURI(location, backupName);
+ URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+ URI backupPath = repository.resolve(location, backupName);
ZkStateReader zkStateReader = ocmh.zkStateReader;
BackupManager backupMgr = new BackupManager(repository, zkStateReader, restoreCollectionName);
@@ -195,7 +195,7 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
params.set(NAME, "snapshot." + slice.getName());
- params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.getPath());
+ params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
index 51227e8..e650553 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
@@ -87,12 +87,12 @@ public class BackupManager {
* @return the configuration parameters for the specified backup.
* @throws IOException In case of errors.
*/
- public Properties readBackupProperties(String backupLoc, String backupId) throws IOException {
+ public Properties readBackupProperties(URI backupLoc, String backupId) throws IOException {
Preconditions.checkNotNull(backupLoc);
Preconditions.checkNotNull(backupId);
// Backup location
- URI backupPath = repository.createURI(backupLoc, backupId);
+ URI backupPath = repository.resolve(backupLoc, backupId);
if (!repository.exists(backupPath)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Couldn't restore since doesn't exist: " + backupPath);
}
@@ -113,8 +113,8 @@ public class BackupManager {
* @param props The backup properties
* @throws IOException in case of I/O error
*/
- public void writeBackupProperties(String backupLoc, String backupId, Properties props) throws IOException {
- URI dest = repository.createURI(backupLoc, backupId, BACKUP_PROPS_FILE);
+ public void writeBackupProperties(URI backupLoc, String backupId, Properties props) throws IOException {
+ URI dest = repository.resolve(backupLoc, backupId, BACKUP_PROPS_FILE);
try (Writer propsWriter = new OutputStreamWriter(repository.createOutput(dest), StandardCharsets.UTF_8)) {
props.store(propsWriter, "Backup properties file");
}
@@ -128,10 +128,10 @@ public class BackupManager {
* @return the meta-data information for the backed-up collection.
* @throws IOException in case of errors.
*/
- public DocCollection readCollectionState(String backupLoc, String backupId, String collectionName) throws IOException {
+ public DocCollection readCollectionState(URI backupLoc, String backupId, String collectionName) throws IOException {
Preconditions.checkNotNull(collectionName);
- URI zkStateDir = repository.createURI(backupLoc, backupId, ZK_STATE_DIR);
+ URI zkStateDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR);
try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) {
byte[] arr = new byte[(int) is.length()]; // probably ok since the json file should be small.
is.readBytes(arr, 0, (int) is.length());
@@ -149,9 +149,9 @@ public class BackupManager {
* @param collectionState The collection meta-data to be stored.
* @throws IOException in case of I/O errors.
*/
- public void writeCollectionState(String backupLoc, String backupId, String collectionName,
+ public void writeCollectionState(URI backupLoc, String backupId, String collectionName,
DocCollection collectionState) throws IOException {
- URI dest = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, COLLECTION_PROPS_FILE);
+ URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, COLLECTION_PROPS_FILE);
try (OutputStream collectionStateOs = repository.createOutput(dest)) {
collectionStateOs.write(Utils.toJSON(Collections.singletonMap(collectionName, collectionState)));
}
@@ -166,9 +166,9 @@ public class BackupManager {
* @param targetConfigName The name of the config to be created.
* @throws IOException in case of I/O errors.
*/
- public void uploadConfigDir(String backupLoc, String backupId, String sourceConfigName, String targetConfigName)
+ public void uploadConfigDir(URI backupLoc, String backupId, String sourceConfigName, String targetConfigName)
throws IOException {
- URI source = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, sourceConfigName);
+ URI source = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, sourceConfigName);
String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + targetConfigName;
uploadToZk(zkStateReader.getZkClient(), source, zkPath);
}
@@ -181,10 +181,10 @@ public class BackupManager {
* @param configName The name of the config to be saved.
* @throws IOException in case of I/O errors.
*/
- public void downloadConfigDir(String backupLoc, String backupId, String configName) throws IOException {
- URI dest = repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
- repository.createDirectory(repository.createURI(backupLoc, backupId, ZK_STATE_DIR));
- repository.createDirectory(repository.createURI(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR));
+ public void downloadConfigDir(URI backupLoc, String backupId, String configName) throws IOException {
+ URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
+ repository.createDirectory(repository.resolve(backupLoc, backupId, ZK_STATE_DIR));
+ repository.createDirectory(repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR));
repository.createDirectory(dest);
downloadFromZK(zkStateReader.getZkClient(), ZkConfigManager.CONFIGS_ZKNODE + "/" + configName, dest);
@@ -201,11 +201,11 @@ public class BackupManager {
if (children.size() == 0) {
log.info("Writing file {}", file);
byte[] data = zkClient.getData(zkPath + "/" + file, null, null, true);
- try (OutputStream os = repository.createOutput(repository.createURI(dir.getPath(), file))) {
+ try (OutputStream os = repository.createOutput(repository.resolve(dir, file))) {
os.write(data);
}
} else {
- downloadFromZK(zkClient, zkPath + "/" + file, repository.createURI(dir.getPath(), file));
+ downloadFromZK(zkClient, zkPath + "/" + file, repository.resolve(dir, file));
}
}
} catch (KeeperException | InterruptedException e) {
@@ -221,7 +221,7 @@ public class BackupManager {
for (String file : repository.listAll(sourceDir)) {
String zkNodePath = destZkPath + "/" + file;
- URI path = repository.createURI(sourceDir.getPath(), file);
+ URI path = repository.resolve(sourceDir, file);
PathType t = repository.getPathType(path);
switch (t) {
case FILE: {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
index 8950ce7..875be18 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
@@ -57,13 +57,23 @@ public interface BackupRepository extends NamedListInitializedPlugin, Closeable
<T> T getConfigProperty(String name);
/**
- * This method creates a URI using the specified path components (as method arguments).
+ * This method returns the URI representation for the specified path.
+ * Note - the specified path could be a fully qualified URI OR a relative path for a file-system.
*
+ * @param path The path specified by the user.
+ * @return the URI representation of the user supplied value
+ */
+ URI createURI(String path);
+
+ /**
+ * This method resolves a URI using the specified path components (as method arguments).
+ *
+ * @param baseUri The base URI to use for creating the path
* @param pathComponents
* The directory (or file-name) to be included in the URI.
* @return A URI containing absolute path
*/
- URI createURI(String... pathComponents);
+ URI resolve(URI baseUri, String... pathComponents);
/**
* This method checks if the specified path exists in this repository.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
index bb148de..f12d9fd 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
@@ -20,6 +20,7 @@ package org.apache.solr.core.backup.repository;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
+import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@@ -88,11 +89,31 @@ public class HdfsBackupRepository implements BackupRepository {
}
@Override
- public URI createURI(String... pathComponents) {
- Path result = baseHdfsPath;
- for (String p : pathComponents) {
- result = new Path(result, p);
+ public URI createURI(String location) {
+ Preconditions.checkNotNull(location);
+
+ URI result = null;
+ try {
+ result = new URI(location);
+ if (!result.isAbsolute()) {
+ result = resolve(this.baseHdfsPath.toUri(), location);
+ }
+ } catch (URISyntaxException ex) {
+ result = resolve(this.baseHdfsPath.toUri(), location);
+ }
+
+ return result;
+ }
+
+ @Override
+ public URI resolve(URI baseUri, String... pathComponents) {
+ Preconditions.checkArgument(baseUri.isAbsolute());
+
+ Path result = new Path(baseUri);
+ for (String path : pathComponents) {
+ result = new Path(result, path);
}
+
return result.toUri();
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
index 86c4110..4ac2558 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
@@ -20,19 +20,20 @@ package org.apache.solr.core.backup.repository;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
+import java.net.URISyntaxException;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
+
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.SimpleFSDirectory;
-import org.apache.lucene.util.Constants;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.DirectoryFactory;
@@ -58,21 +59,28 @@ public class LocalFileSystemRepository implements BackupRepository {
}
@Override
- public URI createURI(String... pathComponents) {
- Preconditions.checkArgument(pathComponents.length > 0);
-
- String basePath = Preconditions.checkNotNull(pathComponents[0]);
- // Note the URI.getPath() invocation on Windows platform generates an invalid URI.
- // Refer to http://stackoverflow.com/questions/9834776/java-nio-file-path-issue
- // Since the caller may have used this method to generate the string representation
- // for the pathComponents, we implement a work-around specifically for Windows platform
- // to remove the leading '/' character.
- if (Constants.WINDOWS) {
- basePath = basePath.replaceFirst("^/(.:/)", "$1");
+ public URI createURI(String location) {
+ Preconditions.checkNotNull(location);
+
+ URI result = null;
+ try {
+ result = new URI(location);
+ if (!result.isAbsolute()) {
+ result = Paths.get(location).toUri();
+ }
+ } catch (URISyntaxException ex) {
+ result = Paths.get(location).toUri();
}
- Path result = Paths.get(basePath);
- for (int i = 1; i < pathComponents.length; i++) {
+ return result;
+ }
+
+ @Override
+ public URI resolve(URI baseUri, String... pathComponents) {
+ Preconditions.checkArgument(pathComponents.length > 0);
+
+ Path result = Paths.get(baseUri);
+ for (int i = 0; i < pathComponents.length; i++) {
result = result.resolve(pathComponents[i]);
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
index 139489a..759b7c6 100644
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
@@ -436,14 +436,15 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
location = core.getDataDir();
}
+ URI locationUri = repo.createURI(location);
+
//If name is not provided then look for the last unnamed( the ones with the snapshot.timestamp format)
//snapshot folder since we allow snapshots to be taken without providing a name. Pick the latest timestamp.
if (name == null) {
- URI basePath = repo.createURI(location);
- String[] filePaths = repo.listAll(basePath);
+ String[] filePaths = repo.listAll(locationUri);
List<OldBackupDirectory> dirs = new ArrayList<>();
for (String f : filePaths) {
- OldBackupDirectory obd = new OldBackupDirectory(basePath, f);
+ OldBackupDirectory obd = new OldBackupDirectory(locationUri, f);
if (obd.getTimestamp().isPresent()) {
dirs.add(obd);
}
@@ -458,7 +459,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
name = "snapshot." + name;
}
- RestoreCore restoreCore = new RestoreCore(repo, core, location, name);
+ RestoreCore restoreCore = new RestoreCore(repo, core, locationUri, name);
try {
MDC.put("RestoreCore.core", core.getName());
MDC.put("RestoreCore.backupLocation", location);
@@ -554,7 +555,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
}
// small race here before the commit point is saved
- SnapShooter snapShooter = new SnapShooter(repo, core, location, params.get(NAME), commitName);
+ URI locationUri = repo.createURI(location);
+ SnapShooter snapShooter = new SnapShooter(repo, core, locationUri, params.get(NAME), commitName);
snapShooter.validateCreateSnapshot();
snapShooter.createSnapAsync(indexCommit, numberToKeep, (nl) -> snapShootDetails = nl);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
index 6aef35c..62cb93f 100644
--- a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
+++ b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
@@ -44,11 +44,11 @@ public class RestoreCore implements Callable<Boolean> {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final String backupName;
- private final String backupLocation;
+ private final URI backupLocation;
private final SolrCore core;
private final BackupRepository backupRepo;
- public RestoreCore(BackupRepository backupRepo, SolrCore core, String location, String name) {
+ public RestoreCore(BackupRepository backupRepo, SolrCore core, URI location, String name) {
this.backupRepo = backupRepo;
this.core = core;
this.backupLocation = location;
@@ -62,7 +62,7 @@ public class RestoreCore implements Callable<Boolean> {
public boolean doRestore() throws Exception {
- URI backupPath = backupRepo.createURI(backupLocation, backupName);
+ URI backupPath = backupRepo.resolve(backupLocation, backupName);
SimpleDateFormat dateFormat = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT);
String restoreIndexName = "restore." + dateFormat.format(new Date());
String restoreIndexPath = core.getDataDir() + restoreIndexName;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
index e12649d..52f4889 100644
--- a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
+++ b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
@@ -19,6 +19,7 @@ package org.apache.solr.handler;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.URI;
+import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
@@ -75,17 +76,17 @@ public class SnapShooter {
} else {
snapDirStr = core.getCoreDescriptor().getInstanceDir().resolve(location).normalize().toString();
}
- initialize(new LocalFileSystemRepository(), core, snapDirStr, snapshotName, null);
+ initialize(new LocalFileSystemRepository(), core, Paths.get(snapDirStr).toUri(), snapshotName, null);
}
- public SnapShooter(BackupRepository backupRepo, SolrCore core, String location, String snapshotName, String commitName) {
+ public SnapShooter(BackupRepository backupRepo, SolrCore core, URI location, String snapshotName, String commitName) {
initialize(backupRepo, core, location, snapshotName, commitName);
}
- private void initialize(BackupRepository backupRepo, SolrCore core, String location, String snapshotName, String commitName) {
+ private void initialize(BackupRepository backupRepo, SolrCore core, URI location, String snapshotName, String commitName) {
this.solrCore = Preconditions.checkNotNull(core);
this.backupRepo = Preconditions.checkNotNull(backupRepo);
- this.baseSnapDirPath = backupRepo.createURI(Preconditions.checkNotNull(location)).normalize();
+ this.baseSnapDirPath = location;
this.snapshotName = snapshotName;
if (snapshotName != null) {
directoryName = "snapshot." + snapshotName;
@@ -93,7 +94,7 @@ public class SnapShooter {
SimpleDateFormat fmt = new SimpleDateFormat(DATE_FMT, Locale.ROOT);
directoryName = "snapshot." + fmt.format(new Date());
}
- this.snapshotDirPath = backupRepo.createURI(location, directoryName);
+ this.snapshotDirPath = backupRepo.resolve(location, directoryName);
this.commitName = commitName;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
index e4103c5..dfc7a6f 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
@@ -18,6 +18,7 @@ package org.apache.solr.handler.admin;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
+import java.net.URI;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
@@ -803,8 +804,9 @@ enum CoreAdminOperation implements CoreAdminOp {
// parameter is not supplied, the latest index commit is backed-up.
String commitName = params.get(CoreAdminParams.COMMIT_NAME);
+ URI locationUri = repository.createURI(location);
try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
- SnapShooter snapShooter = new SnapShooter(repository, core, location, name, commitName);
+ SnapShooter snapShooter = new SnapShooter(repository, core, locationUri, name, commitName);
// validateCreateSnapshot will create parent dirs instead of throw; that choice is dubious.
// But we want to throw. One reason is that
// this dir really should, in fact must, already exist here if triggered via a collection backup on a shared
@@ -847,8 +849,9 @@ enum CoreAdminOperation implements CoreAdminOp {
+ " parameter or as a default repository property");
}
+ URI locationUri = repository.createURI(location);
try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
- RestoreCore restoreCore = new RestoreCore(repository, core, location, name);
+ RestoreCore restoreCore = new RestoreCore(repository, core, locationUri, name);
boolean success = restoreCore.doRestore();
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to restore core=" + core.getName());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9d3a2a08/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
index db68913..c0db46e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
@@ -24,12 +24,20 @@ import org.junit.BeforeClass;
* such file-system would be exposed via local file-system API.
*/
public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+ private static String backupLocation;
@BeforeClass
public static void setupClass() throws Exception {
configureCluster(NUM_SHARDS)// nodes
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
+
+ boolean whitespacesInPath = random().nextBoolean();
+ if (whitespacesInPath) {
+ backupLocation = createTempDir("my backup").toAbsolutePath().toString();
+ } else {
+ backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
+ }
}
@Override
@@ -44,6 +52,6 @@ public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTes
@Override
public String getBackupLocation() {
- return createTempDir().toFile().getAbsolutePath();
+ return backupLocation;
}
}
[09/15] lucene-solr:branch_6_2: SOLR-9461: DELETENODE,
REPLACENODE should pass down the 'async' param to subcommands
Posted by sh...@apache.org.
SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands
(cherry picked from commit e0e72e6)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f546ef88
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f546ef88
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f546ef88
Branch: refs/heads/branch_6_2
Commit: f546ef885ecfefadc8fa508928bde539dca4562a
Parents: 2e2ef47
Author: Noble Paul <no...@apache.org>
Authored: Thu Sep 1 18:03:59 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:08:52 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/DeleteNodeCmd.java | 16 ++++++++++++----
.../java/org/apache/solr/cloud/ReplaceNodeCmd.java | 8 +++++++-
3 files changed, 21 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f546ef88/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index d9c77c3..6d1bd96 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -42,6 +42,8 @@ Bug Fixes
* SOLR-9188: blockUnknown property makes inter-node communication impossible (noble)
* SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive. (shalin)
+
+* SOLR-9461: DELETENODE, REPLACENODE should pass down the 'async' param to subcommands (shalin, noble)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f546ef88/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
index b3c5055..0fd001a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
@@ -35,6 +36,7 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -53,24 +55,30 @@ public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
}
List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
- cleanupReplicas(results, state, sourceReplicas, ocmh, node);
+ cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
}
static void cleanupReplicas(NamedList results,
ClusterState clusterState,
List<ZkNodeProps> sourceReplicas,
- OverseerCollectionMessageHandler ocmh, String node) throws InterruptedException {
+ OverseerCollectionMessageHandler ocmh,
+ String node,
+ String async) throws InterruptedException {
CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
for (ZkNodeProps sourceReplica : sourceReplicas) {
- log.info("Deleting replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), node);
+ String coll = sourceReplica.getStr(COLLECTION_PROP);
+ String shard = sourceReplica.getStr(SHARD_ID_PROP);
+ log.info("Deleting replica for collection={} shard={} on node={}", coll, shard, node);
NamedList deleteResult = new NamedList();
try {
+ if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
cleanupLatch.countDown();
if (deleteResult.get("failure") != null) {
synchronized (results) {
+
results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
- " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), node));
+ " on node=%s", coll, shard, node));
}
}
});
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f546ef88/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
index aad9cc7..ad02fc0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
@@ -34,14 +34,18 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.util.StrUtils.formatString;
public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -58,6 +62,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
ocmh.checkRequired(message, "source", "target");
String source = message.getStr("source");
String target = message.getStr("target");
+ String async = message.getStr("async");
boolean parallel = message.getBool("parallel", false);
ClusterState clusterState = zkStateReader.getClusterState();
@@ -78,6 +83,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
NamedList nl = new NamedList();
log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
+ if(async!=null) msg.getProperties().put(ASYNC, async);
final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
msg, nl, () -> {
countDownLatch.countDown();
@@ -136,7 +142,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
// we have reached this far means all replicas could be recreated
//now cleanup the replicas in the source node
- DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source);
+ DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
results.add("success", "REPLACENODE action completed successfully from : " + source + " to : " + target);
}
[13/15] lucene-solr:branch_6_2: SOLR-9490: Fixed bugs in BoolField
that caused it to erroneously return "false" for all docs depending on usage
Posted by sh...@apache.org.
SOLR-9490: Fixed bugs in BoolField that caused it to erroneously return "false" for all docs depending on usage
(cherry picked from commit 60ce8d7c549ef90cd6aaa9297bf31aeb3dd3417e)
(cherry picked from commit d59715f)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d5e75a4d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d5e75a4d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d5e75a4d
Branch: refs/heads/branch_6_2
Commit: d5e75a4d4165dd39febdc373258cae12ca0eedae
Parents: 0af3d4e
Author: Chris Hostetter <ho...@apache.org>
Authored: Sat Sep 10 00:15:09 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:12:26 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 3 +++
solr/core/src/java/org/apache/solr/schema/BoolField.java | 10 ++++++----
.../org/apache/solr/client/solrj/SolrExampleTests.java | 7 +++++--
3 files changed, 14 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d5e75a4d/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 64a0b9d..3c3038b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -51,6 +51,9 @@ Bug Fixes
* SOLR-9488: Shard split can fail to write commit data on shutdown/restart causing replicas to recover
without replicating the index. This can cause data loss. (shalin)
+
+* SOLR-9490: Fixed bugs in BoolField that caused it to erroneously return "false" for all docs depending
+ on usage (Colvin Cowie, Dan Fox, hossman)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d5e75a4d/solr/core/src/java/org/apache/solr/schema/BoolField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/BoolField.java b/solr/core/src/java/org/apache/solr/schema/BoolField.java
index 1ecdb59..a9acfc5 100644
--- a/solr/core/src/java/org/apache/solr/schema/BoolField.java
+++ b/solr/core/src/java/org/apache/solr/schema/BoolField.java
@@ -128,11 +128,13 @@ public class BoolField extends PrimitiveFieldType {
@Override
public String toExternal(IndexableField f) {
- if (f.binaryValue() == null) {
- return null;
+ if (null != f.binaryValue()) {
+ return indexedToReadable(f.binaryValue().utf8ToString());
}
-
- return indexedToReadable(f.binaryValue().utf8ToString());
+ if (null != f.stringValue()) {
+ return indexedToReadable(f.stringValue());
+ }
+ return null;
}
@Override
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d5e75a4d/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
index 1dd074e..4f3f83d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
@@ -182,12 +182,15 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase
// test a second query, test making a copy of the main query
SolrQuery query2 = query.getCopy();
query2.addFilterQuery("inStock:true");
+ Assert.assertFalse(query.getFilterQueries() == query2.getFilterQueries());
response = client.query( query2 );
Assert.assertEquals(1, query2.getFilterQueries().length);
Assert.assertEquals(0, response.getStatus());
Assert.assertEquals(2, response.getResults().getNumFound() );
- Assert.assertFalse(query.getFilterQueries() == query2.getFilterQueries());
-
+ for (SolrDocument outDoc : response.getResults()) {
+ assertEquals(true, outDoc.getFieldValue("inStock"));
+ }
+
// sanity check round tripping of params...
query = new SolrQuery("foo");
query.addFilterQuery("{!field f=inStock}true");
[15/15] lucene-solr:branch_6_2: SOLR-9445: Fix failures in
TestLocalFSCloudBackupRestore due to changed code path which return
SolrExceptions instead of SolrServerExceptions (cherry picked from commit
df9a642)
Posted by sh...@apache.org.
SOLR-9445: Fix failures in TestLocalFSCloudBackupRestore due to changed code path which return SolrExceptions instead of SolrServerExceptions
(cherry picked from commit df9a642)
(cherry picked from commit 7cf7bfd)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b34f9b6f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b34f9b6f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b34f9b6f
Branch: refs/heads/branch_6_2
Commit: b34f9b6fe590a46aef7792e0313f88721f1caded
Parents: e567747
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sun Aug 28 00:38:53 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 14:30:00 2016 +0530
----------------------------------------------------------------------
.../solr/cloud/AbstractCloudBackupRestoreTestCase.java | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b34f9b6f/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
index fd74eaf..8e7a4b0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
@@ -33,6 +33,7 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.DocCollection;
@@ -147,9 +148,8 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
try {
backup.process(solrClient);
fail("This request should have failed since the cluster property value for backup location property is invalid.");
- } catch (SolrServerException ex) {
- assertTrue(ex.getCause() instanceof RemoteSolrException);
- assertEquals(ErrorCode.SERVER_ERROR.code, ((RemoteSolrException)ex.getCause()).code());
+ } catch (SolrException ex) {
+ assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
}
String restoreCollectionName = collectionName + "_invalidrequest";
@@ -158,9 +158,8 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
try {
restore.process(solrClient);
fail("This request should have failed since the cluster property value for backup location property is invalid.");
- } catch (SolrServerException ex) {
- assertTrue(ex.getCause() instanceof RemoteSolrException);
- assertEquals(ErrorCode.SERVER_ERROR.code, ((RemoteSolrException)ex.getCause()).code());
+ } catch (SolrException ex) {
+ assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
}
}
[03/15] lucene-solr:branch_6_2: SOLR-9445: Admin requests are retried
by CloudSolrClient and LBHttpSolrClient on failure (cherry picked from commit
ae40929)
Posted by sh...@apache.org.
SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure
(cherry picked from commit ae40929)
(cherry picked from commit 5556a9b)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9920ac97
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9920ac97
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9920ac97
Branch: refs/heads/branch_6_2
Commit: 9920ac97a45b9dda0587886d832483faa9aaa39b
Parents: 4999351
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sat Aug 27 09:08:02 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 09:59:51 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../solr/client/solrj/impl/CloudSolrClient.java | 13 +---
.../client/solrj/impl/LBHttpSolrClient.java | 20 +++---
.../apache/solr/common/params/CommonParams.java | 10 +++
.../client/solrj/impl/CloudSolrClientTest.java | 68 ++++++++++++++++++--
5 files changed, 87 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9920ac97/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 12c5d2a..f9c0373 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -28,6 +28,8 @@ Bug Fixes
* SOLR-6744: fl renaming / alias of uniqueKey field generates null pointer exception in SolrCloud configuration
(Mike Drob via Tom�s Fern�ndez L�bbe)
+* SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure. (shalin)
+
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9920ac97/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
index 4bce970..cf2b5a7 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
@@ -22,7 +22,6 @@ import java.net.ConnectException;
import java.net.SocketException;
import java.nio.file.Path;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -85,11 +84,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
-import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
-import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
-import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
/**
* SolrJ client class to communicate with SolrCloud.
@@ -996,12 +991,6 @@ public class CloudSolrClient extends SolrClient {
collection = (reqParams != null) ? reqParams.get("collection", getDefaultCollection()) : getDefaultCollection();
return requestWithRetryOnStaleState(request, 0, collection);
}
- private static final Set<String> ADMIN_PATHS = new HashSet<>(Arrays.asList(
- CORES_HANDLER_PATH,
- COLLECTIONS_HANDLER_PATH,
- CONFIGSETS_HANDLER_PATH,
- AUTHC_PATH,
- AUTHZ_PATH));
/**
* As this class doesn't watch external collections on the client side,
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9920ac97/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
index 9b7d3fe..9daa408 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
@@ -53,6 +53,8 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.slf4j.MDC;
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+
/**
* LBHttpSolrClient or "LoadBalanced HttpSolrClient" is a load balancing wrapper around
* {@link HttpSolrClient}. This is useful when you
@@ -321,7 +323,7 @@ public class LBHttpSolrClient extends SolrClient {
public Rsp request(Req req) throws SolrServerException, IOException {
Rsp rsp = new Rsp();
Exception ex = null;
- boolean isUpdate = req.request instanceof IsUpdateRequest;
+ boolean isNonRetryable = req.request instanceof IsUpdateRequest || ADMIN_PATHS.contains(req.request.getPath());
List<ServerWrapper> skipped = null;
long timeAllowedNano = getTimeAllowedInNanos(req.getRequest());
@@ -352,7 +354,7 @@ public class LBHttpSolrClient extends SolrClient {
MDC.put("LBHttpSolrClient.url", serverStr);
HttpSolrClient client = makeSolrClient(serverStr);
- ex = doRequest(client, req, rsp, isUpdate, false, null);
+ ex = doRequest(client, req, rsp, isNonRetryable, false, null);
if (ex == null) {
return rsp; // SUCCESS
}
@@ -368,7 +370,7 @@ public class LBHttpSolrClient extends SolrClient {
break;
}
- ex = doRequest(wrapper.client, req, rsp, isUpdate, true, wrapper.getKey());
+ ex = doRequest(wrapper.client, req, rsp, isNonRetryable, true, wrapper.getKey());
if (ex == null) {
return rsp; // SUCCESS
}
@@ -395,7 +397,7 @@ public class LBHttpSolrClient extends SolrClient {
return e;
}
- protected Exception doRequest(HttpSolrClient client, Req req, Rsp rsp, boolean isUpdate,
+ protected Exception doRequest(HttpSolrClient client, Req req, Rsp rsp, boolean isNonRetryable,
boolean isZombie, String zombieKey) throws SolrServerException, IOException {
Exception ex = null;
try {
@@ -407,7 +409,7 @@ public class LBHttpSolrClient extends SolrClient {
} catch (SolrException e) {
// we retry on 404 or 403 or 503 or 500
// unless it's an update - then we only retry on connect exception
- if (!isUpdate && RETRY_CODES.contains(e.code())) {
+ if (!isNonRetryable && RETRY_CODES.contains(e.code())) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
// Server is alive but the request was likely malformed or invalid
@@ -417,22 +419,22 @@ public class LBHttpSolrClient extends SolrClient {
throw e;
}
} catch (SocketException e) {
- if (!isUpdate || e instanceof ConnectException) {
+ if (!isNonRetryable || e instanceof ConnectException) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
throw e;
}
} catch (SocketTimeoutException e) {
- if (!isUpdate) {
+ if (!isNonRetryable) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
throw e;
}
} catch (SolrServerException e) {
Throwable rootCause = e.getRootCause();
- if (!isUpdate && rootCause instanceof IOException) {
+ if (!isNonRetryable && rootCause instanceof IOException) {
ex = (!isZombie) ? addZombie(client, e) : e;
- } else if (isUpdate && rootCause instanceof ConnectException) {
+ } else if (isNonRetryable && rootCause instanceof ConnectException) {
ex = (!isZombie) ? addZombie(client, e) : e;
} else {
throw e;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9920ac97/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
index 5ccd70f..b830b41 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CommonParams.java
@@ -16,7 +16,10 @@
*/
package org.apache.solr.common.params;
+import java.util.Arrays;
+import java.util.HashSet;
import java.util.Locale;
+import java.util.Set;
/**
@@ -178,6 +181,13 @@ public interface CommonParams {
public static final String AUTHC_PATH = "/admin/authentication";
public static final String ZK_PATH = "/admin/zookeeper";
+ public static final Set<String> ADMIN_PATHS = new HashSet<>(Arrays.asList(
+ CORES_HANDLER_PATH,
+ COLLECTIONS_HANDLER_PATH,
+ CONFIGSETS_HANDLER_PATH,
+ AUTHC_PATH,
+ AUTHZ_PATH));
+
/** valid values for: <code>echoParams</code> */
public enum EchoParamStyle {
EXPLICIT,
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9920ac97/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index 4e8a403..a16e38e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -22,6 +22,7 @@ import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -38,11 +39,13 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.cloud.AbstractDistribZkTestBase;
import org.apache.solr.cloud.SolrCloudTestCase;
@@ -60,6 +63,9 @@ import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.handler.admin.ConfigSetsHandler;
+import org.apache.solr.handler.admin.CoreAdminHandler;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
@@ -80,10 +86,11 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
private static final String id = "id";
private static final int TIMEOUT = 30;
+ private static final int NODE_COUNT = 3;
@BeforeClass
public static void setupCluster() throws Exception {
- configureCluster(3)
+ configureCluster(NODE_COUNT)
.addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
.configure();
@@ -384,6 +391,11 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
private Long getNumRequests(String baseUrl, String collectionName) throws
SolrServerException, IOException {
+ return getNumRequests(baseUrl, collectionName, "QUERYHANDLER", "standard", false);
+ }
+
+ private Long getNumRequests(String baseUrl, String collectionName, String category, String key, boolean returnNumErrors) throws
+ SolrServerException, IOException {
NamedList<Object> resp;
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "/"+ collectionName)) {
@@ -392,14 +404,60 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/admin/mbeans");
params.set("stats", "true");
- params.set("key", "standard");
- params.set("cat", "QUERYHANDLER");
+ params.set("key", key);
+ params.set("cat", category);
// use generic request to avoid extra processing of queries
QueryRequest req = new QueryRequest(params);
resp = client.request(req);
}
- return (Long) resp.findRecursive("solr-mbeans", "QUERYHANDLER",
- "standard", "stats", "requests");
+ return (Long) resp.findRecursive("solr-mbeans", category, key, "stats", returnNumErrors ? "errors" : "requests");
+ }
+
+ @Test
+ public void testNonRetryableRequests() throws Exception {
+ try (CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
+ // important to have one replica on each node
+ RequestStatusState state = CollectionAdminRequest.createCollection("foo", "conf", 1, NODE_COUNT).processAndWait(client, 60);
+ if (state == RequestStatusState.COMPLETED) {
+ AbstractDistribZkTestBase.waitForRecoveriesToFinish("foo", client.getZkStateReader(), true, true, TIMEOUT);
+ client.setDefaultCollection("foo");
+
+ Map<String, String> adminPathToMbean = new HashMap<>(CommonParams.ADMIN_PATHS.size());
+ adminPathToMbean.put(CommonParams.COLLECTIONS_HANDLER_PATH, CollectionsHandler.class.getName());
+ adminPathToMbean.put(CommonParams.CORES_HANDLER_PATH, CoreAdminHandler.class.getName());
+ adminPathToMbean.put(CommonParams.CONFIGSETS_HANDLER_PATH, ConfigSetsHandler.class.getName());
+ // we do not add the authc/authz handlers because they do not currently expose any mbeans
+
+ for (String adminPath : adminPathToMbean.keySet()) {
+ long errorsBefore = 0;
+ for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
+ Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERYHANDLER", adminPathToMbean.get(adminPath), true);
+ errorsBefore += numRequests;
+ log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
+ }
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("qt", adminPath);
+ params.set("action", "foobar"); // this should cause an error
+ QueryRequest req = new QueryRequest(params);
+ try {
+ NamedList<Object> resp = client.request(req);
+ fail("call to foo for admin path " + adminPath + " should have failed");
+ } catch (Exception e) {
+ // expected
+ }
+ long errorsAfter = 0;
+ for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
+ Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERYHANDLER", adminPathToMbean.get(adminPath), true);
+ errorsAfter += numRequests;
+ log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
+ }
+ assertEquals(errorsBefore + 1, errorsAfter);
+ }
+ } else {
+ fail("Collection could not be created within 60 seconds");
+ }
+ }
}
@Test
[05/15] lucene-solr:branch_6_2: SOLR-9430: Fix locale lookup in DIH
to use BCP47 language tags to be consistent with other
places in Solr. Language names still work for backwards compatibility
Posted by sh...@apache.org.
SOLR-9430: Fix locale lookup in DIH <propertyWriter/> to use BCP47 language tags to be consistent with other places in Solr. Language names still work for backwards compatibility
(cherry picked from commit 545ce38)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ebff9d6b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ebff9d6b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ebff9d6b
Branch: refs/heads/branch_6_2
Commit: ebff9d6b8553897acde758ee4943508e64ffd5fe
Parents: 348b3e8
Author: Uwe Schindler <us...@apache.org>
Authored: Sat Aug 27 12:08:42 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:03:14 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 4 +++
.../dataimport/SimplePropertiesWriter.java | 33 +++++++++++++-------
2 files changed, 26 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ebff9d6b/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index d5024b8..26441a0 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -32,6 +32,10 @@ Bug Fixes
* SOLR-9439: Shard split clean up logic for older failed splits is faulty. The delete shard API
has also been made more resilient against failures resulting from non-existent cores. (shalin)
+
+* SOLR-9430: Fix locale lookup in DIH <propertyWriter/> to use BCP47 language tags
+ to be consistent with other places in Solr. Language names still work for backwards
+ compatibility. (Uwe Schindler, Boris Steiner)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ebff9d6b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
index 92527bb..1ee18ef 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
@@ -30,11 +30,13 @@ import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
+import java.util.IllformedLocaleException;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import org.apache.lucene.util.IOUtils;
+import org.apache.solr.common.util.SuppressForbidden;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrResourceLoader;
import org.slf4j.Logger;
@@ -90,16 +92,7 @@ public class SimplePropertiesWriter extends DIHProperties {
}
findDirectory(dataImporter, params);
if(params.get(LOCALE) != null) {
- String localeStr = params.get(LOCALE);
- for (Locale l : Locale.getAvailableLocales()) {
- if(localeStr.equals(l.getDisplayName(Locale.ROOT))) {
- locale = l;
- break;
- }
- }
- if(locale==null) {
- throw new DataImportHandlerException(SEVERE, "Unsupported locale for PropertWriter: " + localeStr);
- }
+ locale = getLocale(params.get(LOCALE));
} else {
locale = Locale.ROOT;
}
@@ -108,7 +101,25 @@ public class SimplePropertiesWriter extends DIHProperties {
} else {
dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", locale);
}
- }
+ }
+
+ @SuppressForbidden(reason = "Usage of outdated locale parsing with Locale#toString() because of backwards compatibility")
+ private Locale getLocale(String name) {
+ if (name == null) {
+ return Locale.ROOT;
+ }
+ for (final Locale l : Locale.getAvailableLocales()) {
+ if(name.equals(l.toString()) || name.equals(l.getDisplayName(Locale.ROOT))) {
+ return locale;
+ }
+ }
+ try {
+ return new Locale.Builder().setLanguageTag(name).build();
+ } catch (IllformedLocaleException ex) {
+ throw new DataImportHandlerException(SEVERE, "Unsupported locale for PropertyWriter: " + name);
+ }
+ }
+
protected void findDirectory(DataImporter dataImporter, Map<String, String> params) {
if(params.get(DIRECTORY) != null) {
configDir = params.get(DIRECTORY);
[08/15] lucene-solr:branch_6_2: SOLR-9455: Deleting a sub-shard in
recovery state can mark parent shard as inactive (cherry picked from commit
2700b95)
Posted by sh...@apache.org.
SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive
(cherry picked from commit 2700b95)
(cherry picked from commit 937439a)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2e2ef473
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2e2ef473
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2e2ef473
Branch: refs/heads/branch_6_2
Commit: 2e2ef4738cdfbb96a9502920c4ae2ea388be509b
Parents: 988c214
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Aug 30 00:01:17 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:06:45 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 ++
.../java/org/apache/solr/cloud/DeleteShardCmd.java | 14 ++++++++++++++
2 files changed, 16 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2e2ef473/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 4856048..d9c77c3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -40,6 +40,8 @@ Bug Fixes
* SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers. (Tim Owen via Mark Miller)
* SOLR-9188: blockUnknown property makes inter-node communication impossible (noble)
+
+* SOLR-9455: Deleting a sub-shard in recovery state can mark parent shard as inactive. (shalin)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2e2ef473/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
index f2ae5ca..a7f6d5b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
@@ -23,6 +23,7 @@ import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
+import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@@ -73,6 +74,19 @@ public class DeleteShardCmd implements Cmd {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
+ ". Only non-active (or custom-hashed) slices can be deleted.");
}
+
+ if (state == Slice.State.RECOVERY) {
+ // mark the slice as 'construction' and only then try to delete the cores
+ // see SOLR-9455
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+ propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
+ propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ inQueue.offer(Utils.toJSON(m));
+ }
+
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
String asyncId = message.getStr(ASYNC);
[06/15] lucene-solr:branch_6_2: SOLR-9389: HDFS Transaction logs stay
open for writes which leaks Xceivers.
Posted by sh...@apache.org.
SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers.
(cherry picked from commit aaee4c8)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/83cd8c12
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/83cd8c12
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/83cd8c12
Branch: refs/heads/branch_6_2
Commit: 83cd8c12ecd53198eea51ff418c7e1901b4a9dde
Parents: ebff9d6
Author: markrmiller <ma...@apache.org>
Authored: Fri Aug 26 23:09:59 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:04:20 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../apache/solr/update/HdfsTransactionLog.java | 141 ++++++++++---------
.../org/apache/solr/update/HdfsUpdateLog.java | 9 +-
.../org/apache/solr/update/TransactionLog.java | 5 +
.../java/org/apache/solr/update/UpdateLog.java | 2 +
5 files changed, 93 insertions(+), 66 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/83cd8c12/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 26441a0..6d99b02 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -36,6 +36,8 @@ Bug Fixes
* SOLR-9430: Fix locale lookup in DIH <propertyWriter/> to use BCP47 language tags
to be consistent with other places in Solr. Language names still work for backwards
compatibility. (Uwe Schindler, Boris Steiner)
+
+* SOLR-9389: HDFS Transaction logs stay open for writes which leaks Xceivers. (Tim Owen via Mark Miller)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/83cd8c12/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
index 7ccbb95..e725127 100644
--- a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
+++ b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
@@ -64,7 +64,7 @@ public class HdfsTransactionLog extends TransactionLog {
Path tlogFile;
-
+ private long finalLogSize;
private FSDataOutputStream tlogOutStream;
private FileSystem fs;
@@ -144,13 +144,8 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public boolean endsWithCommit() throws IOException {
- long size;
- synchronized (this) {
- fos.flush();
- tlogOutStream.hflush();
- size = fos.size();
- }
-
+ ensureFlushed();
+ long size = getLogSize();
// the end of the file should have the end message (added during a commit) plus a 4 byte size
byte[] buf = new byte[ END_MESSAGE.length() ];
@@ -159,11 +154,10 @@ public class HdfsTransactionLog extends TransactionLog {
FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile), pos);
try {
- //ChannelFastInputStream is = new ChannelFastInputStream(channel, pos);
- dis.read(buf);
- for (int i=0; i<buf.length; i++) {
- if (buf[i] != END_MESSAGE.charAt(i)) return false;
- }
+ dis.read(buf);
+ for (int i=0; i<buf.length; i++) {
+ if (buf[i] != END_MESSAGE.charAt(i)) return false;
+ }
} finally {
dis.close();
}
@@ -176,10 +170,8 @@ public class HdfsTransactionLog extends TransactionLog {
public void rollback(long pos) throws IOException {
synchronized (this) {
assert snapshot_size == pos;
- fos.flush();
- tlogOutStream.hflush();
+ ensureFlushed();
// TODO: how do we rollback with hdfs?? We need HDFS-3107
- //raf.setLength(pos);
fos.setWritten(pos);
assert fos.size() == pos;
numRecords = snapshot_numRecords;
@@ -233,8 +225,10 @@ public class HdfsTransactionLog extends TransactionLog {
endRecord(pos);
- fos.flush(); // flush since this will be the last record in a log fill
- tlogOutStream.hflush();
+ ensureFlushed(); // flush since this will be the last record in a log fill
+
+ // now the commit command is written we will never write to this log again
+ closeOutput();
//assert fos.size() == channel.size();
@@ -255,19 +249,7 @@ public class HdfsTransactionLog extends TransactionLog {
try {
// make sure any unflushed buffer has been flushed
- synchronized (this) {
- // TODO: optimize this by keeping track of what we have flushed up to
- fos.flushBuffer();
-
- // flush to hdfs
- tlogOutStream.hflush();
- /***
- System.out.println("###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos);
- if (fos.size() != raf.length() || pos >= fos.size() ) {
- throw new RuntimeException("ERROR" + "###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos);
- }
- ***/
- }
+ ensureFlushed();
FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile),
pos);
@@ -284,6 +266,52 @@ public class HdfsTransactionLog extends TransactionLog {
}
@Override
+ public void closeOutput() {
+ try {
+ doCloseOutput();
+ } catch (IOException e) {
+ log.error("Could not close tlog output", e);
+ // This situation is not fatal to the caller
+ }
+ }
+
+ private void doCloseOutput() throws IOException {
+ synchronized (this) {
+ if (fos == null) return;
+ if (debug) {
+ log.debug("Closing output for " + tlogFile);
+ }
+ fos.flushBuffer();
+ finalLogSize = fos.size();
+ fos = null;
+ }
+
+ tlogOutStream.hflush();
+ tlogOutStream.close();
+ tlogOutStream = null;
+ }
+
+ private void ensureFlushed() throws IOException {
+ synchronized (this) {
+ if (fos != null) {
+ fos.flush();
+ tlogOutStream.hflush();
+ }
+ }
+ }
+
+ @Override
+ public long getLogSize() {
+ synchronized (this) {
+ if (fos != null) {
+ return fos.size();
+ } else {
+ return finalLogSize;
+ }
+ }
+ }
+
+ @Override
public void finish(UpdateLog.SyncLevel syncLevel) {
if (syncLevel == UpdateLog.SyncLevel.NONE) return;
try {
@@ -309,12 +337,7 @@ public class HdfsTransactionLog extends TransactionLog {
log.debug("Closing tlog" + this);
}
- synchronized (this) {
- fos.flushBuffer();
- }
-
- tlogOutStream.hflush();
- tlogOutStream.close();
+ doCloseOutput();
} catch (IOException e) {
log.error("Exception closing tlog.", e);
@@ -359,17 +382,19 @@ public class HdfsTransactionLog extends TransactionLog {
public HDFSLogReader(long startingPos) {
super();
incref();
+ initStream(startingPos);
+ }
+
+ private void initStream(long pos) {
try {
synchronized (HdfsTransactionLog.this) {
- fos.flushBuffer();
- sz = fos.size();
+ ensureFlushed();
+ sz = getLogSize();
}
-
- tlogOutStream.hflush();
-
+
FSDataInputStream fdis = fs.open(tlogFile);
- fis = new FSDataFastInputStream(fdis, startingPos);
+ fis = new FSDataFastInputStream(fdis, pos);
} catch (IOException e) {
throw new RuntimeException(e);
}
@@ -385,10 +410,10 @@ public class HdfsTransactionLog extends TransactionLog {
synchronized (HdfsTransactionLog.this) {
if (trace) {
- log.trace("Reading log record. pos="+pos+" currentSize="+fos.size());
+ log.trace("Reading log record. pos="+pos+" currentSize="+getLogSize());
}
- if (pos >= fos.size()) {
+ if (pos >= getLogSize()) {
return null;
}
}
@@ -398,16 +423,8 @@ public class HdfsTransactionLog extends TransactionLog {
if (pos >= sz) {
log.info("Read available inputstream data, opening new inputstream pos={} sz={}", pos, sz);
- synchronized (HdfsTransactionLog.this) {
- fos.flushBuffer();
- sz = fos.size();
- }
-
- tlogOutStream.hflush();
fis.close();
-
- FSDataInputStream fdis = fs.open(tlogFile);
- fis = new FSDataFastInputStream(fdis, pos);
+ initStream(pos);
}
if (pos == 0) {
@@ -415,7 +432,7 @@ public class HdfsTransactionLog extends TransactionLog {
// shouldn't currently happen - header and first record are currently written at the same time
synchronized (HdfsTransactionLog.this) {
- if (fis.position() >= fos.size()) {
+ if (fis.position() >= getLogSize()) {
return null;
}
pos = fis.position();
@@ -443,7 +460,7 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public String toString() {
synchronized (HdfsTransactionLog.this) {
- return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}";
+ return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + getLogSize() + "}";
}
}
@@ -454,7 +471,7 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public long currentSize() {
- return fos.size();
+ return getLogSize();
}
}
@@ -478,12 +495,8 @@ public class HdfsTransactionLog extends TransactionLog {
long sz;
synchronized (HdfsTransactionLog.this) {
- fos.flushBuffer();
-
- // this must be an hflush
- tlogOutStream.hflush();
- sz = fos.size();
- //assert sz == channel.size();
+ ensureFlushed();
+ sz = getLogSize();
}
fis = new FSDataFastInputStream(fs.open(tlogFile), 0);
@@ -554,7 +567,7 @@ public class HdfsTransactionLog extends TransactionLog {
@Override
public String toString() {
synchronized (HdfsTransactionLog.this) {
- return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}";
+ return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + getLogSize() + "}";
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/83cd8c12/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
index 4cbcf4f..764b099 100644
--- a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
@@ -219,8 +219,13 @@ public class HdfsUpdateLog extends UpdateLog {
// It's possible that at abnormal close both "tlog" and "prevTlog" were
// uncapped.
for (TransactionLog ll : logs) {
- newestLogsOnStartup.addFirst(ll);
- if (newestLogsOnStartup.size() >= 2) break;
+ if (newestLogsOnStartup.size() < 2) {
+ newestLogsOnStartup.addFirst(ll);
+ } else {
+ // We're never going to modify old non-recovery logs - no need to hold their output open
+ log.info("Closing output for old non-recovery log " + ll);
+ ll.closeOutput();
+ }
}
try {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/83cd8c12/solr/core/src/java/org/apache/solr/update/TransactionLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
index f7213ed..997485a 100644
--- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java
+++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
@@ -520,6 +520,11 @@ public class TransactionLog implements Closeable {
}
}
+ /** Move to a read-only state, closing and releasing resources while keeping the log available for reads */
+ public void closeOutput() {
+
+ }
+
public void finish(UpdateLog.SyncLevel syncLevel) {
if (syncLevel == UpdateLog.SyncLevel.NONE) return;
try {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/83cd8c12/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 2f55e40..7a53383 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -821,11 +821,13 @@ public class UpdateLog implements PluginInfoInitialized {
try {
if (ll.endsWithCommit()) {
+ ll.closeOutput();
ll.decref();
continue;
}
} catch (IOException e) {
log.error("Error inspecting tlog " + ll, e);
+ ll.closeOutput();
ll.decref();
continue;
}
[04/15] lucene-solr:branch_6_2: SOLR-9439: Shard split clean up logic
for older failed splits is faulty (cherry picked from commit 7d2f42e)
Posted by sh...@apache.org.
SOLR-9439: Shard split clean up logic for older failed splits is faulty
(cherry picked from commit 7d2f42e)
(cherry picked from commit 97b6216)
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/348b3e8f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/348b3e8f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/348b3e8f
Branch: refs/heads/branch_6_2
Commit: 348b3e8fe43afbde8a5fe3b5658aee60935276c4
Parents: 9920ac9
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Sat Aug 27 09:08:53 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Mon Sep 12 10:02:16 2016 +0530
----------------------------------------------------------------------
solr/CHANGES.txt | 2 +
.../org/apache/solr/cloud/SplitShardCmd.java | 62 ++++++++++++++++----
.../org/apache/solr/core/CoreContainer.java | 7 ++-
.../org/apache/solr/util/TestInjection.java | 20 +++++++
.../org/apache/solr/cloud/ShardSplitTest.java | 54 +++++++++++++++++
5 files changed, 130 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348b3e8f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f9c0373..d5024b8 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -30,6 +30,8 @@ Bug Fixes
* SOLR-9445: Admin requests are retried by CloudSolrClient and LBHttpSolrClient on failure. (shalin)
+* SOLR-9439: Shard split clean up logic for older failed splits is faulty. The delete shard API
+ has also been made more resilient against failures resulting from non-existent cores. (shalin)
================== 6.2.0 ==================
Versions of Major Components
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348b3e8f/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
index d7bbf66..4463285 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
@@ -46,6 +46,7 @@ import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.util.TestInjection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -79,6 +80,7 @@ public class SplitShardCmd implements Cmd {
log.info("Split shard invoked");
ZkStateReader zkStateReader = ocmh.zkStateReader;
+ zkStateReader.forceUpdateCollection(collectionName);
String splitKey = message.getStr("split.key");
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
@@ -197,7 +199,10 @@ public class SplitShardCmd implements Cmd {
subSlices.add(subSlice);
String subShardName = collectionName + "_" + subSlice + "_replica1";
subShardNames.add(subShardName);
+ }
+ boolean oldShardsDeleted = false;
+ for (String subSlice : subSlices) {
Slice oSlice = collection.getSlice(subSlice);
if (oSlice != null) {
final Slice.State state = oSlice.getState();
@@ -206,24 +211,33 @@ public class SplitShardCmd implements Cmd {
"Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
} else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
// delete the shards
- for (String sub : subSlices) {
- log.info("Sub-shard: {} already exists therefore requesting its deletion", sub);
- Map<String, Object> propMap = new HashMap<>();
- propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
- propMap.put(COLLECTION_PROP, collectionName);
- propMap.put(SHARD_ID_PROP, sub);
- ZkNodeProps m = new ZkNodeProps(propMap);
- try {
- ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
- } catch (Exception e) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + sub,
- e);
- }
+ log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
+ propMap.put(COLLECTION_PROP, collectionName);
+ propMap.put(SHARD_ID_PROP, subSlice);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ try {
+ ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
+ } catch (SolrException e) {
+ throwIfNotNonExistentCoreException(subSlice, e);
+ } catch (Exception e) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
}
+
+ oldShardsDeleted = true;
}
}
}
+ if (oldShardsDeleted) {
+ // refresh the locally cached cluster state
+ zkStateReader.forceUpdateCollection(collectionName);
+ clusterState = zkStateReader.getClusterState();
+ collection = clusterState.getCollection(collectionName);
+ }
+
final String asyncId = message.getStr(ASYNC);
Map<String, String> requestMap = new HashMap<>();
@@ -406,6 +420,8 @@ public class SplitShardCmd implements Cmd {
replicas.add(propMap);
}
+ assert TestInjection.injectSplitFailureBeforeReplicaCreation();
+
// we must set the slice state into recovery before actually creating the replica cores
// this ensures that the logic inside Overseer to update sub-shard state to 'active'
// always gets a chance to execute. See SOLR-7673
@@ -455,4 +471,24 @@ public class SplitShardCmd implements Cmd {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
}
}
+
+ private void throwIfNotNonExistentCoreException(String subSlice, SolrException e) {
+ Throwable t = e;
+ String cause = null;
+ while (t != null) {
+ if (t instanceof SolrException) {
+ SolrException solrException = (SolrException) t;
+ cause = solrException.getMetadata("cause");
+ if (cause != null && !"NonExistentCore".equals(cause)) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
+ }
+ }
+ t = t.getCause();
+ }
+ if (!"NonExistentCore".equals(cause)) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348b3e8f/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index b4442df..7f80b13 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -996,8 +996,11 @@ public class CoreContainer {
}
CoreDescriptor cd = solrCores.getCoreDescriptor(name);
- if (cd == null)
- throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
+ if (cd == null) {
+ SolrException solrException = new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
+ solrException.setMetadata("cause", "NonExistentCore");
+ throw solrException;
+ }
boolean close = solrCores.isLoadedNotPendingClose(name);
SolrCore core = solrCores.remove(name);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348b3e8f/solr/core/src/java/org/apache/solr/util/TestInjection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java
index 03de74d..efd80bf 100644
--- a/solr/core/src/java/org/apache/solr/util/TestInjection.java
+++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java
@@ -113,6 +113,8 @@ public class TestInjection {
public static String randomDelayInCoreCreation = null;
public static int randomDelayMaxInCoreCreationInSec = 10;
+
+ public static String splitFailureBeforeReplicaCreation = null;
private static Set<Timer> timers = Collections.synchronizedSet(new HashSet<Timer>());
@@ -124,6 +126,7 @@ public class TestInjection {
updateLogReplayRandomPause = null;
updateRandomPause = null;
randomDelayInCoreCreation = null;
+ splitFailureBeforeReplicaCreation = null;
for (Timer timer : timers) {
timer.cancel();
@@ -285,6 +288,23 @@ public class TestInjection {
return true;
}
+
+ public static boolean injectSplitFailureBeforeReplicaCreation() {
+ if (splitFailureBeforeReplicaCreation != null) {
+ Random rand = random();
+ if (null == rand) return true;
+
+ Pair<Boolean,Integer> pair = parseValue(splitFailureBeforeReplicaCreation);
+ boolean enabled = pair.first();
+ int chanceIn100 = pair.second();
+ if (enabled && rand.nextInt(100) >= (100 - chanceIn100)) {
+ log.info("Injecting failure in creating replica for sub-shard");
+ throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to create replica");
+ }
+ }
+
+ return true;
+ }
private static Pair<Boolean,Integer> parseValue(String raw) {
Matcher m = ENABLED_PERCENT.matcher(raw);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348b3e8f/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index 21dc257..389660f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -41,6 +41,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.HashBasedRouter;
import org.apache.solr.common.cloud.Replica;
@@ -50,6 +51,7 @@ import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TestInjection;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -90,6 +92,58 @@ public class ShardSplitTest extends BasicDistributedZkTest {
//waitForThingsToLevelOut(15);
}
+ /**
+ * Used to test that we can split a shard when a previous split event
+ * left sub-shards in construction or recovery state.
+ *
+ * See SOLR-9439
+ */
+ @Test
+ public void testSplitAfterFailedSplit() throws Exception {
+ waitForThingsToLevelOut(15);
+
+ TestInjection.splitFailureBeforeReplicaCreation = "true:100"; // we definitely want split to fail
+ try {
+ try {
+ CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ splitShard.setShardName(SHARD1);
+ splitShard.process(cloudClient);
+ fail("Shard split was not supposed to succeed after failure injection!");
+ } catch (Exception e) {
+ // expected
+ }
+
+ // assert that sub-shards cores exist and sub-shard is in construction state
+ ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+ zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ ClusterState state = zkStateReader.getClusterState();
+ DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+
+ Slice shard10 = collection.getSlice(SHARD1_0);
+ assertEquals(Slice.State.CONSTRUCTION, shard10.getState());
+ assertEquals(1, shard10.getReplicas().size());
+
+ Slice shard11 = collection.getSlice(SHARD1_1);
+ assertEquals(Slice.State.CONSTRUCTION, shard11.getState());
+ assertEquals(1, shard11.getReplicas().size());
+
+ // lets retry the split
+ TestInjection.reset(); // let the split succeed
+ try {
+ CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+ splitShard.setShardName(SHARD1);
+ splitShard.process(cloudClient);
+ // Yay!
+ } catch (Exception e) {
+ log.error("Shard split failed", e);
+ fail("Shard split did not succeed after a previous failed split attempt left sub-shards in construction state");
+ }
+
+ } finally {
+ TestInjection.reset();
+ }
+ }
+
@Test
public void testSplitShardWithRule() throws Exception {
waitForThingsToLevelOut(15);