You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2013/07/01 23:31:21 UTC
svn commit: r1498702 [2/2] - in /lucene/dev/branches/branch_4x: ./ lucene/
lucene/tools/ lucene/tools/junit4/ solr/ solr/core/
solr/core/src/java/org/apache/solr/
solr/core/src/java/org/apache/solr/client/solrj/embedded/
solr/core/src/java/org/apache/s...
Modified: lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/TransactionLog.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/TransactionLog.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/TransactionLog.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/TransactionLog.java Mon Jul 1 21:31:19 2013
@@ -74,7 +74,7 @@ public class TransactionLog {
FastOutputStream fos; // all accesses to this stream should be synchronized on "this" (The TransactionLog)
int numRecords;
- volatile boolean deleteOnClose = true; // we can delete old tlogs since they are currently only used for real-time-get (and in the future, recovery)
+ protected volatile boolean deleteOnClose = true; // we can delete old tlogs since they are currently only used for real-time-get (and in the future, recovery)
AtomicInteger refcount = new AtomicInteger(1);
Map<String,Integer> globalStringMap = new HashMap<String, Integer>();
@@ -97,7 +97,7 @@ public class TransactionLog {
};
public class LogCodec extends JavaBinCodec {
- public LogCodec() {
+ public LogCodec(JavaBinCodec.ObjectResolver resolver) {
super(resolver);
}
@@ -190,6 +190,9 @@ public class TransactionLog {
}
}
+ // for subclasses
+ protected TransactionLog() {}
+
/** Returns the number of records in the log (currently includes the header and an optional commit).
* Note: currently returns 0 for reopened existing log files.
*/
@@ -244,7 +247,7 @@ public class TransactionLog {
public long writeData(Object o) {
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
try {
long pos = fos.size(); // if we had flushed, this should be equal to channel.position()
codec.init(fos);
@@ -259,7 +262,7 @@ public class TransactionLog {
private void readHeader(FastInputStream fis) throws IOException {
// read existing header
fis = fis != null ? fis : new ChannelFastInputStream(channel, 0);
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
Map header = (Map)codec.unmarshal(fis);
fis.readInt(); // skip size
@@ -275,7 +278,7 @@ public class TransactionLog {
}
}
- private void addGlobalStrings(Collection<String> strings) {
+ protected void addGlobalStrings(Collection<String> strings) {
if (strings == null) return;
int origSize = globalStringMap.size();
for (String s : strings) {
@@ -296,7 +299,7 @@ public class TransactionLog {
}
}
- private void writeLogHeader(LogCodec codec) throws IOException {
+ protected void writeLogHeader(LogCodec codec) throws IOException {
long pos = fos.size();
assert pos == 0;
@@ -308,7 +311,7 @@ public class TransactionLog {
endRecord(pos);
}
- private void endRecord(long startRecordPosition) throws IOException {
+ protected void endRecord(long startRecordPosition) throws IOException {
fos.writeInt((int)(fos.size() - startRecordPosition));
numRecords++;
}
@@ -332,7 +335,7 @@ public class TransactionLog {
int lastAddSize;
public long write(AddUpdateCommand cmd, int flags) {
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
SolrInputDocument sdoc = cmd.getSolrInputDocument();
try {
@@ -374,7 +377,7 @@ public class TransactionLog {
}
public long writeDelete(DeleteUpdateCommand cmd, int flags) {
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
try {
checkWriteHeader(codec, null);
@@ -404,7 +407,7 @@ public class TransactionLog {
}
public long writeDeleteByQuery(DeleteUpdateCommand cmd, int flags) {
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
try {
checkWriteHeader(codec, null);
@@ -430,7 +433,7 @@ public class TransactionLog {
public long writeCommit(CommitUpdateCommand cmd, int flags) {
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
synchronized (this) {
try {
long pos = fos.size(); // if we had flushed, this should be equal to channel.position()
@@ -478,7 +481,7 @@ public class TransactionLog {
}
ChannelFastInputStream fis = new ChannelFastInputStream(channel, pos);
- LogCodec codec = new LogCodec();
+ LogCodec codec = new LogCodec(resolver);
return codec.readVal(fis);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
@@ -528,7 +531,7 @@ public class TransactionLog {
}
}
- private void close() {
+ protected void close() {
try {
if (debug) {
log.debug("Closing tlog" + this);
@@ -569,19 +572,22 @@ public class TransactionLog {
/** Returns a single threaded reverse reader */
public ReverseReader getReverseReader() throws IOException {
- return new ReverseReader();
+ return new FSReverseReader();
}
public class LogReader {
- ChannelFastInputStream fis;
- private LogCodec codec = new LogCodec();
+ private ChannelFastInputStream fis;
+ private LogCodec codec = new LogCodec(resolver);
public LogReader(long startingPos) {
incref();
fis = new ChannelFastInputStream(channel, startingPos);
}
+ // for classes that extend
+ protected LogReader() {}
+
/** Returns the next object from the log, or null if none available.
*
* @return The log record, or null if EOF
@@ -637,9 +643,30 @@ public class TransactionLog {
}
- public class ReverseReader {
+ public abstract class ReverseReader {
+
+
+
+ /** Returns the next object from the log, or null if none available.
+ *
+ * @return The log record, or null if EOF
+ * @throws IOException If there is a low-level I/O error.
+ */
+ public abstract Object next() throws IOException;
+
+ /* returns the position in the log file of the last record returned by next() */
+ public abstract long position();
+ public abstract void close();
+
+ @Override
+ public abstract String toString() ;
+
+
+ }
+
+ public class FSReverseReader extends ReverseReader {
ChannelFastInputStream fis;
- private LogCodec codec = new LogCodec() {
+ private LogCodec codec = new LogCodec(resolver) {
@Override
public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) {
// Given that the SolrInputDocument is last in an add record, it's OK to just skip
@@ -651,7 +678,7 @@ public class TransactionLog {
int nextLength; // length of the next record (the next one closer to the start of the log file)
long prevPos; // where we started reading from last time (so prevPos - nextLength == start of next record)
- public ReverseReader() throws IOException {
+ public FSReverseReader() throws IOException {
incref();
long sz;
Modified: lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateHandler.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateHandler.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateHandler.java Mon Jul 1 21:31:19 2013
@@ -18,10 +18,11 @@
package org.apache.solr.update;
-import java.io.File;
import java.io.IOException;
import java.util.Vector;
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.HdfsDirectoryFactory;
import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrEventListener;
@@ -71,24 +72,6 @@ public abstract class UpdateHandler impl
}
}
- // not thread safe - for startup
- private void clearLog(PluginInfo ulogPluginInfo) {
- if (ulogPluginInfo == null) return;
- File tlogDir = UpdateLog.getTlogDir(core, ulogPluginInfo);
- log.info("Clearing tlog files, tlogDir=" + tlogDir);
- if (tlogDir.exists()) {
- String[] files = UpdateLog.getLogList(tlogDir);
- for (String file : files) {
- File f = new File(tlogDir, file);
- boolean s = f.delete();
- if (!s) {
- log.error("Could not remove tlog file:" + f.getAbsolutePath());
- //throw new SolrException(ErrorCode.SERVER_ERROR, "Could not remove tlog file:" + f.getAbsolutePath());
- }
- }
- }
- }
-
protected void callPostCommitCallbacks() {
for (SolrEventListener listener : commitCallbacks) {
listener.postCommit();
@@ -117,19 +100,43 @@ public abstract class UpdateHandler impl
idFieldType = idField!=null ? idField.getType() : null;
parseEventListeners();
PluginInfo ulogPluginInfo = core.getSolrConfig().getPluginInfo(UpdateLog.class.getName());
- if (!core.isReloaded() && !core.getDirectoryFactory().isPersistent()) {
- clearLog(ulogPluginInfo);
- }
+
if (updateLog == null && ulogPluginInfo != null && ulogPluginInfo.isEnabled()) {
- ulog = new UpdateLog();
+ String dataDir = (String)ulogPluginInfo.initArgs.get("dir");
+
+ String ulogDir = core.getCoreDescriptor().getUlogDir();
+ if (ulogDir != null) {
+ dataDir = ulogDir;
+ }
+ if (dataDir == null || dataDir.length()==0) {
+ dataDir = core.getDataDir();
+ }
+
+ if (dataDir != null && dataDir.startsWith("hdfs:/")) {
+ DirectoryFactory dirFactory = core.getDirectoryFactory();
+ if (dirFactory instanceof HdfsDirectoryFactory) {
+ ulog = new HdfsUpdateLog(((HdfsDirectoryFactory)dirFactory).getConfDir());
+ } else {
+ ulog = new HdfsUpdateLog();
+ }
+
+ } else {
+ ulog = new UpdateLog();
+ }
+
+ if (!core.isReloaded() && !core.getDirectoryFactory().isPersistent()) {
+ ulog.clearLog(core, ulogPluginInfo);
+ }
+
ulog.init(ulogPluginInfo);
- // ulog = core.createInitInstance(ulogPluginInfo, UpdateLog.class, "update log", "solr.NullUpdateLog");
+
ulog.init(this, core);
} else {
ulog = updateLog;
}
// ulog.init() when reusing an existing log is deferred (currently at the end of the DUH2 constructor
+
}
/**
Modified: lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateLog.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateLog.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateLog.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/UpdateLog.java Mon Jul 1 21:31:19 2013
@@ -17,15 +17,38 @@
package org.apache.solr.update;
+import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase.FROMLEADER;
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FileSystem;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.LocalSolrQueryRequest;
@@ -34,9 +57,6 @@ import org.apache.solr.request.SolrReque
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.update.processor.DistributedUpdateProcessor;
-import org.apache.solr.update.processor.DistributedUpdateProcessorFactory;
-import org.apache.solr.update.processor.DistributingUpdateProcessorFactory;
-import org.apache.solr.update.processor.RunUpdateProcessorFactory;
import org.apache.solr.update.processor.UpdateRequestProcessor;
import org.apache.solr.update.processor.UpdateRequestProcessorChain;
import org.apache.solr.util.DefaultSolrThreadFactory;
@@ -45,15 +65,6 @@ import org.apache.solr.util.plugin.Plugi
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.*;
-
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
-import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase.FROMLEADER;
-
/** @lucene.experimental */
public class UpdateLog implements PluginInfoInitialized {
@@ -64,6 +75,10 @@ public class UpdateLog implements Plugin
public boolean debug = log.isDebugEnabled();
public boolean trace = log.isTraceEnabled();
+ // TODO: hack
+ public FileSystem getFs() {
+ return null;
+ }
public enum SyncLevel { NONE, FLUSH, FSYNC;
public static SyncLevel getSyncLevel(String level){
@@ -108,27 +123,27 @@ public class UpdateLog implements Plugin
}
long id = -1;
- private State state = State.ACTIVE;
- private int operationFlags; // flags to write in the transaction log with operations (i.e. FLAG_GAP)
+ protected State state = State.ACTIVE;
+ protected int operationFlags; // flags to write in the transaction log with operations (i.e. FLAG_GAP)
- private TransactionLog tlog;
- private TransactionLog prevTlog;
- private Deque<TransactionLog> logs = new LinkedList<TransactionLog>(); // list of recent logs, newest first
- private LinkedList<TransactionLog> newestLogsOnStartup = new LinkedList<TransactionLog>();
- private int numOldRecords; // number of records in the recent logs
-
- private Map<BytesRef,LogPtr> map = new HashMap<BytesRef, LogPtr>();
- private Map<BytesRef,LogPtr> prevMap; // used while committing/reopening is happening
- private Map<BytesRef,LogPtr> prevMap2; // used while committing/reopening is happening
- private TransactionLog prevMapLog; // the transaction log used to look up entries found in prevMap
- private TransactionLog prevMapLog2; // the transaction log used to look up entries found in prevMap
+ protected TransactionLog tlog;
+ protected TransactionLog prevTlog;
+ protected Deque<TransactionLog> logs = new LinkedList<TransactionLog>(); // list of recent logs, newest first
+ protected LinkedList<TransactionLog> newestLogsOnStartup = new LinkedList<TransactionLog>();
+ protected int numOldRecords; // number of records in the recent logs
+
+ protected Map<BytesRef,LogPtr> map = new HashMap<BytesRef, LogPtr>();
+ protected Map<BytesRef,LogPtr> prevMap; // used while committing/reopening is happening
+ protected Map<BytesRef,LogPtr> prevMap2; // used while committing/reopening is happening
+ protected TransactionLog prevMapLog; // the transaction log used to look up entries found in prevMap
+ protected TransactionLog prevMapLog2; // the transaction log used to look up entries found in prevMap
- private final int numDeletesToKeep = 1000;
- private final int numDeletesByQueryToKeep = 100;
+ protected final int numDeletesToKeep = 1000;
+ protected final int numDeletesByQueryToKeep = 100;
public final int numRecordsToKeep = 100;
// keep track of deletes only... this is not updated on an add
- private LinkedHashMap<BytesRef, LogPtr> oldDeletes = new LinkedHashMap<BytesRef, LogPtr>(numDeletesToKeep) {
+ protected LinkedHashMap<BytesRef, LogPtr> oldDeletes = new LinkedHashMap<BytesRef, LogPtr>(numDeletesToKeep) {
@Override
protected boolean removeEldestEntry(Map.Entry eldest) {
return size() > numDeletesToKeep;
@@ -145,21 +160,21 @@ public class UpdateLog implements Plugin
}
}
- private LinkedList<DBQ> deleteByQueries = new LinkedList<DBQ>();
+ protected LinkedList<DBQ> deleteByQueries = new LinkedList<DBQ>();
- private String[] tlogFiles;
- private File tlogDir;
- private Collection<String> globalStrings;
+ protected String[] tlogFiles;
+ protected File tlogDir;
+ protected Collection<String> globalStrings;
- private String dataDir;
- private String lastDataDir;
+ protected String dataDir;
+ protected String lastDataDir;
- private VersionInfo versionInfo;
+ protected VersionInfo versionInfo;
- private SyncLevel defaultSyncLevel = SyncLevel.FLUSH;
+ protected SyncLevel defaultSyncLevel = SyncLevel.FLUSH;
volatile UpdateHandler uhandler; // a core reload can change this reference!
- private volatile boolean cancelApplyBufferUpdate;
+ protected volatile boolean cancelApplyBufferUpdate;
List<Long> startingVersions;
int startingOperation; // last operation in the logs on startup
@@ -199,7 +214,7 @@ public class UpdateLog implements Plugin
if (ulogDir != null) {
dataDir = ulogDir;
}
-
+
if (dataDir == null || dataDir.length()==0) {
dataDir = core.getDataDir();
}
@@ -280,8 +295,8 @@ public class UpdateLog implements Plugin
}
- public File getLogDir() {
- return tlogDir;
+ public String getLogDir() {
+ return tlogDir.getAbsolutePath();
}
public List<Long> getStartingVersions() {
@@ -295,7 +310,7 @@ public class UpdateLog implements Plugin
/* Takes over ownership of the log, keeping it until no longer needed
and then decrementing it's reference and dropping it.
*/
- private void addOldLog(TransactionLog oldLog, boolean removeOld) {
+ protected void addOldLog(TransactionLog oldLog, boolean removeOld) {
if (oldLog == null) return;
numOldRecords += oldLog.numRecords();
@@ -326,7 +341,7 @@ public class UpdateLog implements Plugin
}
- public static String[] getLogList(File directory) {
+ public String[] getLogList(File directory) {
final String prefix = TLOG_NAME+'.';
String[] names = directory.list(new FilenameFilter() {
@Override
@@ -334,6 +349,9 @@ public class UpdateLog implements Plugin
return name.startsWith(prefix);
}
});
+ if (names == null) {
+ throw new RuntimeException(new FileNotFoundException(directory.getAbsolutePath()));
+ }
Arrays.sort(names);
return names;
}
@@ -544,7 +562,7 @@ public class UpdateLog implements Plugin
}
}
- private void newMap() {
+ protected void newMap() {
prevMap2 = prevMap;
prevMapLog2 = prevMapLog;
@@ -797,7 +815,7 @@ public class UpdateLog implements Plugin
}
- private void ensureLog() {
+ protected void ensureLog() {
if (tlog == null) {
String newLogName = String.format(Locale.ROOT, LOG_FILENAME_PATTERN, TLOG_NAME, id);
tlog = new TransactionLog(new File(tlogDir, newLogName), globalStrings);
@@ -1145,7 +1163,7 @@ public class UpdateLog implements Plugin
- private RecoveryInfo recoveryInfo;
+ protected RecoveryInfo recoveryInfo;
class LogReplayer implements Runnable {
private Logger loglog = log; // set to something different?
@@ -1422,7 +1440,7 @@ public class UpdateLog implements Plugin
}
}
- public static File getTlogDir(SolrCore core, PluginInfo info) {
+ protected String getTlogDir(SolrCore core, PluginInfo info) {
String dataDir = (String) info.initArgs.get("dir");
String ulogDir = core.getCoreDescriptor().getUlogDir();
@@ -1433,11 +1451,30 @@ public class UpdateLog implements Plugin
if (dataDir == null || dataDir.length() == 0) {
dataDir = core.getDataDir();
}
-
- return new File(dataDir, TLOG_NAME);
+
+ return dataDir + "/" + TLOG_NAME;
+ }
+
+ /**
+ * Clears the logs on the file system. Only call before init.
+ *
+ * @param core the SolrCore
+ * @param ulogPluginInfo the init info for the UpdateHandler
+ */
+ public void clearLog(SolrCore core, PluginInfo ulogPluginInfo) {
+ if (ulogPluginInfo == null) return;
+ File tlogDir = new File(getTlogDir(core, ulogPluginInfo));
+ if (tlogDir.exists()) {
+ String[] files = getLogList(tlogDir);
+ for (String file : files) {
+ File f = new File(tlogDir, file);
+ boolean s = f.delete();
+ if (!s) {
+ log.error("Could not remove tlog file:" + f);
+ }
+ }
+ }
}
}
-
-
Modified: lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java Mon Jul 1 21:31:19 2013
@@ -222,9 +222,9 @@ public class DistributedUpdateProcessor
// Replica leader = slice.getLeader();
Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry(
collection, shardId);
- ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(leaderReplica);
- String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
- isLeader = coreNodeName.equals(leaderReplica.getName());
+ isLeader = leaderReplica.getName().equals(
+ req.getCore().getCoreDescriptor().getCloudDescriptor()
+ .getCoreNodeName());
DistribPhase phase =
DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM));
@@ -240,7 +240,7 @@ public class DistributedUpdateProcessor
// so get the replicas...
forwardToLeader = false;
List<ZkCoreNodeProps> replicaProps = zkController.getZkStateReader()
- .getReplicaProps(collection, shardId, coreNodeName,
+ .getReplicaProps(collection, shardId, leaderReplica.getName(),
coreName, null, ZkStateReader.DOWN);
if (replicaProps != null) {
@@ -272,7 +272,7 @@ public class DistributedUpdateProcessor
} else {
// I need to forward onto the leader...
nodes = new ArrayList<Node>(1);
- nodes.add(new RetryNode(leaderProps, zkController.getZkStateReader(), collection, shardId));
+ nodes.add(new RetryNode(new ZkCoreNodeProps(leaderReplica), zkController.getZkStateReader(), collection, shardId));
forwardToLeader = true;
}
@@ -343,7 +343,9 @@ public class DistributedUpdateProcessor
if (isLeader && !localIsLeader) {
log.error("ClusterState says we are the leader, but locally we don't think so");
- throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "ClusterState says we are the leader, but locally we don't think so");
+ throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+ "ClusterState says we are the leader (" + zkController.getBaseUrl()
+ + "/" + req.getCore().getName() + "), but locally we don't think so. Request came from " + from);
}
}
@@ -356,16 +358,15 @@ public class DistributedUpdateProcessor
try {
Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry(
collection, shardId);
- String leaderCoreNodeName = leaderReplica.getName();
-
- String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
- isLeader = coreNodeName.equals(leaderCoreNodeName);
+ isLeader = leaderReplica.getName().equals(
+ req.getCore().getCoreDescriptor().getCloudDescriptor()
+ .getCoreNodeName());
// TODO: what if we are no longer the leader?
forwardToLeader = false;
List<ZkCoreNodeProps> replicaProps = zkController.getZkStateReader()
- .getReplicaProps(collection, shardId, coreNodeName,
+ .getReplicaProps(collection, shardId, leaderReplica.getName(),
req.getCore().getName());
if (replicaProps != null) {
nodes = new ArrayList<Node>(replicaProps.size());
@@ -899,7 +900,7 @@ public class DistributedUpdateProcessor
// Am I the leader for this slice?
ZkCoreNodeProps coreLeaderProps = new ZkCoreNodeProps(leader);
String leaderCoreNodeName = leader.getName();
- String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
+ String coreNodeName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCoreNodeName();
isLeader = coreNodeName.equals(leaderCoreNodeName);
if (isLeader) {
Modified: lucene/dev/branches/branch_4x/solr/core/src/test-files/log4j.properties
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test-files/log4j.properties?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test-files/log4j.properties (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test-files/log4j.properties Mon Jul 1 21:31:19 2013
@@ -7,3 +7,4 @@ log4j.appender.CONSOLE.layout=org.apache
log4j.appender.CONSOLE.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
Modified: lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml Mon Jul 1 21:31:19 2013
@@ -24,7 +24,10 @@
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}">
<!-- used to keep RAM reqs down for HdfsDirectoryFactory -->
+ <bool name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</bool>
<int name="solr.hdfs.blockcache.blocksperbank">${solr.hdfs.blockcache.blocksperbank:1024}</int>
+ <str name="solr.hdfs.home">${solr.hdfs.home:}</str>
+ <str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
</directoryFactory>
<dataDir>${solr.data.dir:}</dataDir>
Modified: lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr-no-core.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr-no-core.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr-no-core.xml (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr-no-core.xml Mon Jul 1 21:31:19 2013
@@ -25,6 +25,7 @@
<str name="hostContext">${hostContext:solr}</str>
<int name="hostPort">${hostPort:8983}</int>
<int name="zkClientTimeout">${solr.zkclienttimeout:30000}</int>
+ <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
<int name="distribUpdateConnTimeout">${distribUpdateConnTimeout:15000}</int>
<int name="distribUpdateSoTimeout">${distribUpdateSoTimeout:120000}</int>
</solrcloud>
@@ -35,4 +36,4 @@
<int name="connTimeout">${connTimeout:15000}</int>
</shardHandlerFactory>
-</solr>
\ No newline at end of file
+</solr>
Modified: lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr.xml (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test-files/solr/solr.xml Mon Jul 1 21:31:19 2013
@@ -30,6 +30,7 @@
-->
<cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}"
hostContext="${hostContext:solr}" zkClientTimeout="${solr.zkclienttimeout:30000}" numShards="${numShards:3}" shareSchema="${shareSchema:false}"
+ genericCoreNodeNames="${genericCoreNodeNames:true}"
distribUpdateConnTimeout="${distribUpdateConnTimeout:15000}" distribUpdateSoTimeout="${distribUpdateSoTimeout:120000}">
<core name="collection1" instanceDir="collection1" shard="${shard:}" collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"
coreNodeName="${coreNodeName:}"/>
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java Mon Jul 1 21:31:19 2013
@@ -46,9 +46,7 @@ import org.apache.solr.common.cloud.ZkSt
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.ReplicationHandler;
-import org.apache.solr.servlet.SolrDispatchFilter;
import org.apache.solr.util.AbstractSolrTestCase;
import org.junit.BeforeClass;
@@ -176,8 +174,8 @@ public class BasicDistributedZk2Test ext
createCmd.setCoreName(ONE_NODE_COLLECTION + "core");
createCmd.setCollection(ONE_NODE_COLLECTION);
createCmd.setNumShards(1);
- createCmd.setDataDir(dataDir.getAbsolutePath() + File.separator
- + ONE_NODE_COLLECTION);
+ createCmd.setDataDir(getDataDir(dataDir.getAbsolutePath() + File.separator
+ + ONE_NODE_COLLECTION));
server.request(createCmd);
} catch (Exception e) {
e.printStackTrace();
@@ -331,7 +329,7 @@ public class BasicDistributedZk2Test ext
ureq.process(cloudClient);
} catch(SolrServerException e){
// try again
- Thread.sleep(500);
+ Thread.sleep(3500);
ureq.process(cloudClient);
}
@@ -415,14 +413,16 @@ public class BasicDistributedZk2Test ext
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/replication");
params.set("command", "backup");
+ File location = new File(TEMP_DIR, BasicDistributedZk2Test.class.getName() + "-backupdir-" + System.currentTimeMillis());
+ params.set("location", location.getAbsolutePath());
QueryRequest request = new QueryRequest(params);
NamedList<Object> results = client.request(request );
- checkForBackupSuccess(client);
+ checkForBackupSuccess(client, location);
}
- private void checkForBackupSuccess(final HttpSolrServer client)
+ private void checkForBackupSuccess(final HttpSolrServer client, File location)
throws InterruptedException, IOException {
class CheckStatus extends Thread {
volatile String fail = null;
@@ -461,16 +461,6 @@ public class BasicDistributedZk2Test ext
};
}
- ;
- SolrCore core = ((SolrDispatchFilter) shardToJetty.get(SHARD2).get(0).jetty
- .getDispatchFilter().getFilter()).getCores().getCore("collection1");
- String ddir;
- try {
- ddir = core.getDataDir();
- } finally {
- core.close();
- }
- File dataDir = new File(ddir);
int waitCnt = 0;
CheckStatus checkStatus = new CheckStatus();
@@ -482,14 +472,14 @@ public class BasicDistributedZk2Test ext
if (checkStatus.success) {
break;
}
- Thread.sleep(200);
- if (waitCnt == 20) {
+ Thread.sleep(500);
+ if (waitCnt == 90) {
fail("Backup success not detected:" + checkStatus.response);
}
waitCnt++;
}
- File[] files = dataDir.listFiles(new FilenameFilter() {
+ File[] files = location.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java Mon Jul 1 21:31:19 2013
@@ -538,11 +538,13 @@ public class BasicDistributedZkTest exte
Create createCmd = new Create();
createCmd.setCoreName(collection + freezeI);
createCmd.setCollection(collection);
- String core3dataDir = dataDir.getAbsolutePath() + File.separator
- + System.currentTimeMillis() + collection + "_3n" + freezeI;
- createCmd.setDataDir(core3dataDir);
+
createCmd.setNumShards(numShards);
try {
+ String core3dataDir = dataDir.getAbsolutePath() + File.separator
+ + System.currentTimeMillis() + collection + "_3n" + freezeI;
+ createCmd.setDataDir(getDataDir(core3dataDir));
+
server.request(createCmd);
} catch (SolrServerException e) {
throw new RuntimeException(e);
@@ -574,11 +576,13 @@ public class BasicDistributedZkTest exte
params.set(OverseerCollectionProcessor.MAX_SHARDS_PER_NODE, maxShardsPerNode);
if (createNodeSetStr != null) params.set(OverseerCollectionProcessor.CREATE_NODE_SET, createNodeSetStr);
- int clientIndex = random().nextInt(2);
+ int clientIndex = clients.size() > 1 ? random().nextInt(2) : 0;
List<Integer> list = new ArrayList<Integer>();
list.add(numShards);
list.add(numReplicas);
- collectionInfos.put(collectionName, list);
+ if (collectionInfos != null) {
+ collectionInfos.put(collectionName, list);
+ }
params.set("name", collectionName);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
@@ -932,8 +936,8 @@ public class BasicDistributedZkTest exte
if (shardId == null) {
createCmd.setNumShards(2);
}
- createCmd.setDataDir(dataDir.getAbsolutePath() + File.separator
- + collection + num);
+ createCmd.setDataDir(getDataDir(dataDir.getAbsolutePath() + File.separator
+ + collection + num));
if (shardId != null) {
createCmd.setShardId(shardId);
}
@@ -1056,8 +1060,9 @@ public class BasicDistributedZkTest exte
server.setSoTimeout(30000);
Create createCmd = new Create();
createCmd.setCoreName(collection);
- createCmd.setDataDir(dataDir.getAbsolutePath() + File.separator
- + collection + frozeUnique);
+ createCmd.setDataDir(getDataDir(dataDir.getAbsolutePath() + File.separator
+ + collection + frozeUnique));
+
server.request(createCmd);
} catch (Exception e) {
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java Mon Jul 1 21:31:19 2013
@@ -71,11 +71,13 @@ public class ClusterStateUpdateTest exte
@BeforeClass
public static void beforeClass() {
System.setProperty("solrcloud.skip.autorecovery", "true");
+ System.setProperty("genericCoreNodeNames", "false");
}
@AfterClass
public static void afterClass() throws InterruptedException {
System.clearProperty("solrcloud.skip.autorecovery");
+ System.clearProperty("genericCoreNodeNames");
}
@Override
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java Mon Jul 1 21:31:19 2013
@@ -36,6 +36,7 @@ import org.apache.lucene.util.LuceneTest
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -98,7 +99,7 @@ public class OverseerTest extends SolrTe
zkClient.close();
}
- public String publishState(String coreName, String stateName, int numShards)
+ public String publishState(String coreName, String coreNodeName, String stateName, int numShards)
throws KeeperException, InterruptedException, IOException {
if (stateName == null) {
ElectionContext ec = electionContext.remove(coreName);
@@ -108,6 +109,7 @@ public class OverseerTest extends SolrTe
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "deletecore",
ZkStateReader.NODE_NAME_PROP, nodeName,
ZkStateReader.CORE_NAME_PROP, coreName,
+ ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
ZkStateReader.COLLECTION_PROP, collection);
DistributedQueue q = Overseer.getInQueue(zkClient);
q.offer(ZkStateReader.toJSON(m));
@@ -117,6 +119,7 @@ public class OverseerTest extends SolrTe
ZkStateReader.STATE_PROP, stateName,
ZkStateReader.NODE_NAME_PROP, nodeName,
ZkStateReader.CORE_NAME_PROP, coreName,
+ ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
ZkStateReader.COLLECTION_PROP, collection,
ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
ZkStateReader.BASE_URL_PROP, "http://" + nodeName
@@ -126,7 +129,8 @@ public class OverseerTest extends SolrTe
}
for (int i = 0; i < 120; i++) {
- String shardId = getShardId(coreName);
+ String shardId = getShardId("http://" + nodeName
+ + "/solr/", coreName);
if (shardId != null) {
try {
zkClient.makePath("/collections/" + collection + "/leader_elect/"
@@ -136,7 +140,8 @@ public class OverseerTest extends SolrTe
"http://" + nodeName + "/solr/", ZkStateReader.NODE_NAME_PROP,
nodeName, ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.SHARD_ID_PROP, shardId,
- ZkStateReader.COLLECTION_PROP, collection);
+ ZkStateReader.COLLECTION_PROP, collection,
+ ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
ShardLeaderElectionContextBase ctx = new ShardLeaderElectionContextBase(
elector, shardId, collection, nodeName + "_" + coreName, props,
zkStateReader);
@@ -148,13 +153,18 @@ public class OverseerTest extends SolrTe
return null;
}
- private String getShardId(final String coreName) {
+ private String getShardId(final String baseUrl, final String coreName) {
Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(
collection);
if (slices != null) {
for (Slice slice : slices.values()) {
- if (slice.getReplicasMap().containsKey(nodeName + "_" + coreName)) {
- return slice.getName();
+ for (Replica replica : slice.getReplicas()) {
+ // TODO: for really large clusters, we could 'index' on this
+ String rbaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ if (baseUrl.equals(rbaseUrl) && coreName.equals(rcore)) {
+ return slice.getName();
+ }
}
}
}
@@ -202,7 +212,7 @@ public class OverseerTest extends SolrTe
final int numShards=6;
for (int i = 0; i < numShards; i++) {
- assertNotNull("shard got no id?", zkController.publishState("core" + (i+1), ZkStateReader.ACTIVE, 3));
+ assertNotNull("shard got no id?", zkController.publishState("core" + (i+1), "node" + (i+1), ZkStateReader.ACTIVE, 3));
}
assertEquals(2, reader.getClusterState().getSlice("collection1", "shard1").getReplicasMap().size());
@@ -277,7 +287,7 @@ public class OverseerTest extends SolrTe
final String coreName = "core" + slot;
try {
- ids[slot]=controllers[slot % nodeCount].publishState(coreName, ZkStateReader.ACTIVE, sliceCount);
+ ids[slot]=controllers[slot % nodeCount].publishState(coreName, "node" + slot, ZkStateReader.ACTIVE, sliceCount);
} catch (Throwable e) {
e.printStackTrace();
fail("register threw exception:" + e.getClass());
@@ -440,7 +450,7 @@ public class OverseerTest extends SolrTe
assertEquals(reader.getClusterState().toString(), ZkStateReader.RECOVERING,
reader.getClusterState().getSlice("collection1", "shard1").getReplicasMap()
- .get("node1_core1").getStr(ZkStateReader.STATE_PROP));
+ .get("core_node1").getStr(ZkStateReader.STATE_PROP));
//publish node state (active)
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
@@ -471,7 +481,7 @@ public class OverseerTest extends SolrTe
while(maxIterations-->0) {
Slice slice = reader.getClusterState().getSlice("collection1", "shard1");
if(slice!=null) {
- coreState = slice.getReplicasMap().get("node1_core1").getStr(ZkStateReader.STATE_PROP);
+ coreState = slice.getReplicasMap().get("core_node1").getStr(ZkStateReader.STATE_PROP);
if(coreState.equals(expectedState)) {
return;
}
@@ -523,14 +533,14 @@ public class OverseerTest extends SolrTe
overseerClient = electNewOverseer(server.getZkAddress());
Thread.sleep(1000);
- mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+ mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
waitForCollections(reader, "collection1");
verifyStatus(reader, ZkStateReader.RECOVERING);
int version = getClusterStateVersion(controllerClient);
- mockController.publishState("core1", ZkStateReader.ACTIVE, 1);
+ mockController.publishState("core1", "core_node1", ZkStateReader.ACTIVE, 1);
while(version == getClusterStateVersion(controllerClient));
@@ -539,7 +549,7 @@ public class OverseerTest extends SolrTe
overseerClient.close();
Thread.sleep(1000); //wait for overseer to get killed
- mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+ mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
version = getClusterStateVersion(controllerClient);
overseerClient = electNewOverseer(server.getZkAddress());
@@ -553,7 +563,7 @@ public class OverseerTest extends SolrTe
assertEquals("Shard count does not match", 1, reader.getClusterState()
.getSlice("collection1", "shard1").getReplicasMap().size());
version = getClusterStateVersion(controllerClient);
- mockController.publishState("core1", null,1);
+ mockController.publishState("core1", "core_node1", null,1);
while(version == getClusterStateVersion(controllerClient));
Thread.sleep(500);
assertFalse("collection1 should be gone after publishing the null state", reader.getClusterState().getCollections().contains("collection1"));
@@ -641,16 +651,16 @@ public class OverseerTest extends SolrTe
for (int i = 0; i < atLeast(4); i++) {
killCounter.incrementAndGet(); //for each round allow 1 kill
mockController = new MockZKController(server.getZkAddress(), "node1", "collection1");
- mockController.publishState("core1", "state1",1);
+ mockController.publishState("core1", "node1", "state1",1);
if(mockController2!=null) {
mockController2.close();
mockController2 = null;
}
- mockController.publishState("core1", "state2",1);
+ mockController.publishState("core1", "node1","state2",1);
mockController2 = new MockZKController(server.getZkAddress(), "node2", "collection1");
- mockController.publishState("core1", "state1",1);
+ mockController.publishState("core1", "node1", "state1",1);
verifyShardLeader(reader, "collection1", "shard1", "core1");
- mockController2.publishState("core4", "state2" ,1);
+ mockController2.publishState("core4", "node2", "state2" ,1);
mockController.close();
mockController = null;
verifyShardLeader(reader, "collection1", "shard1", "core4");
@@ -697,7 +707,7 @@ public class OverseerTest extends SolrTe
overseerClient = electNewOverseer(server.getZkAddress());
- mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+ mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
waitForCollections(reader, "collection1");
@@ -708,7 +718,7 @@ public class OverseerTest extends SolrTe
int version = getClusterStateVersion(controllerClient);
mockController = new MockZKController(server.getZkAddress(), "node1", "collection1");
- mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+ mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
while (version == getClusterStateVersion(controllerClient));
@@ -718,7 +728,7 @@ public class OverseerTest extends SolrTe
int numFound = 0;
for (DocCollection collection : state.getCollectionStates().values()) {
for (Slice slice : collection.getSlices()) {
- if (slice.getReplicasMap().get("node1_core1") != null) {
+ if (slice.getReplicasMap().get("core_node1") != null) {
numFound++;
}
}
@@ -761,7 +771,7 @@ public class OverseerTest extends SolrTe
overseerClient = electNewOverseer(server.getZkAddress());
- mockController.publishState("core1", ZkStateReader.RECOVERING, 12);
+ mockController.publishState("core1", "node1", ZkStateReader.RECOVERING, 12);
waitForCollections(reader, "collection1");
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java Mon Jul 1 21:31:19 2013
@@ -86,19 +86,25 @@ public class RecoveryZkTest extends Abst
indexThread.join();
indexThread2.join();
- Thread.sleep(500);
+ Thread.sleep(1000);
- waitForThingsToLevelOut(30);
+ waitForThingsToLevelOut(45);
Thread.sleep(2000);
waitForThingsToLevelOut(30);
+ Thread.sleep(5000);
+
waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
// test that leader and replica have same doc count
- checkShardConsistency("shard1", false, false);
+ String fail = checkShardConsistency("shard1", false, false);
+ if (fail != null) {
+ fail(fail);
+ }
+
SolrQuery query = new SolrQuery("*:*");
query.setParam("distrib", "false");
long client1Docs = shardToJetty.get("shard1").get(0).client.solrClient.query(query).getResults().getNumFound();
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java Mon Jul 1 21:31:19 2013
@@ -67,7 +67,6 @@ public class SyncSliceTest extends Abstr
super.setUp();
// we expect this time of exception as shards go up and down...
//ignoreException(".*");
- useFactory(null);
System.setProperty("numShards", Integer.toString(sliceCount));
}
@@ -94,7 +93,7 @@ public class SyncSliceTest extends Abstr
handle.put("QTime", SKIPVAL);
handle.put("timestamp", SKIPVAL);
- waitForThingsToLevelOut(15);
+ waitForThingsToLevelOut(30);
del("*:*");
List<CloudJettyRunner> skipServers = new ArrayList<CloudJettyRunner>();
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java Mon Jul 1 21:31:19 2013
@@ -91,7 +91,7 @@ public class UnloadDistributedZkTest ext
createCmd.setCollection(collection);
String coreDataDir = dataDir.getAbsolutePath() + File.separator
+ System.currentTimeMillis() + collection + "1";
- createCmd.setDataDir(coreDataDir);
+ createCmd.setDataDir(getDataDir(coreDataDir));
createCmd.setNumShards(2);
SolrServer client = clients.get(0);
@@ -107,7 +107,7 @@ public class UnloadDistributedZkTest ext
createCmd.setCollection(collection);
coreDataDir = dataDir.getAbsolutePath() + File.separator
+ System.currentTimeMillis() + collection + "2";
- createCmd.setDataDir(coreDataDir);
+ createCmd.setDataDir(getDataDir(coreDataDir));
server.request(createCmd);
@@ -171,7 +171,7 @@ public class UnloadDistributedZkTest ext
createCmd.setCollection("unloadcollection");
createCmd.setNumShards(1);
String core1DataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_1n";
- createCmd.setDataDir(core1DataDir);
+ createCmd.setDataDir(getDataDir(core1DataDir));
server.request(createCmd);
ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
@@ -189,7 +189,7 @@ public class UnloadDistributedZkTest ext
createCmd.setCoreName("unloadcollection2");
createCmd.setCollection("unloadcollection");
String core2dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_2n";
- createCmd.setDataDir(core2dataDir);
+ createCmd.setDataDir(getDataDir(core2dataDir));
server.request(createCmd);
zkStateReader.updateClusterState(true);
@@ -227,7 +227,7 @@ public class UnloadDistributedZkTest ext
createCmd.setCoreName("unloadcollection3");
createCmd.setCollection("unloadcollection");
String core3dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_3n";
- createCmd.setDataDir(core3dataDir);
+ createCmd.setDataDir(getDataDir(core3dataDir));
server.request(createCmd);
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
@@ -296,7 +296,7 @@ public class UnloadDistributedZkTest ext
createCmd.setCoreName("unloadcollection4");
createCmd.setCollection("unloadcollection");
String core4dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_4n";
- createCmd.setDataDir(core4dataDir);
+ createCmd.setDataDir(getDataDir(core4dataDir));
server.request(createCmd);
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
@@ -334,7 +334,7 @@ public class UnloadDistributedZkTest ext
createCmd = new Create();
createCmd.setCoreName(leaderProps.getCoreName());
createCmd.setCollection("unloadcollection");
- createCmd.setDataDir(core1DataDir);
+ createCmd.setDataDir(getDataDir(core1DataDir));
server.request(createCmd);
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java Mon Jul 1 21:31:19 2013
@@ -163,7 +163,7 @@ public class ZkControllerTest extends So
cc = getCoreContainer();
ZkController zkController = new ZkController(cc, server.getZkAddress(), TIMEOUT, 10000,
- "127.0.0.1", "8983", "solr", "0", 10000, 10000, new CurrentCoreDescriptorProvider() {
+ "127.0.0.1", "8983", "solr", "0", true, 10000, 10000, new CurrentCoreDescriptorProvider() {
@Override
public List<CoreDescriptor> getCurrentDescriptors() {
@@ -203,7 +203,7 @@ public class ZkControllerTest extends So
cc = getCoreContainer();
zkController = new ZkController(cc, server.getZkAddress(),
- TIMEOUT, 10000, "127.0.0.1", "8983", "solr", "0", 10000, 10000, new CurrentCoreDescriptorProvider() {
+ TIMEOUT, 10000, "127.0.0.1", "8983", "solr", "0", true, 10000, 10000, new CurrentCoreDescriptorProvider() {
@Override
public List<CoreDescriptor> getCurrentDescriptors() {
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java Mon Jul 1 21:31:19 2013
@@ -1522,7 +1522,7 @@ public class TestReplicationHandler exte
}
public String getDataDir() {
- return dataDir.toString();
+ return dataDir.getAbsolutePath();
}
public String getSolrConfigFile() {
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/search/TestRecovery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/search/TestRecovery.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/search/TestRecovery.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/search/TestRecovery.java Mon Jul 1 21:31:19 2013
@@ -17,6 +17,8 @@
package org.apache.solr.search;
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
import org.apache.solr.common.SolrException;
import org.noggit.ObjectBuilder;
import org.apache.solr.SolrTestCaseJ4;
@@ -42,8 +44,16 @@ import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
-import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.update.UpdateHandler;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.noggit.ObjectBuilder;
public class TestRecovery extends SolrTestCaseJ4 {
@@ -744,16 +754,17 @@ public class TestRecovery extends SolrTe
clearIndex();
assertU(commit());
- File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+ File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
h.close();
- String[] files = UpdateLog.getLogList(logDir);
+ String[] files = ulog.getLogList(logDir);
for (String file : files) {
new File(logDir, file).delete();
}
- assertEquals(0, UpdateLog.getLogList(logDir).length);
+ assertEquals(0, ulog.getLogList(logDir).length);
createCore();
@@ -771,7 +782,7 @@ public class TestRecovery extends SolrTe
assertU(commit());
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
- assertEquals(2, UpdateLog.getLogList(logDir).length);
+ assertEquals(2, ulog.getLogList(logDir).length);
addDocs(105, start, versions); start+=105;
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
@@ -779,7 +790,7 @@ public class TestRecovery extends SolrTe
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
// previous two logs should be gone now
- assertEquals(1, UpdateLog.getLogList(logDir).length);
+ assertEquals(1, ulog.getLogList(logDir).length);
addDocs(1, start, versions); start+=1;
h.close();
@@ -799,14 +810,14 @@ public class TestRecovery extends SolrTe
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
// previous logs should be gone now
- assertEquals(1, UpdateLog.getLogList(logDir).length);
+ assertEquals(1, ulog.getLogList(logDir).length);
//
// test that a corrupt tlog file doesn't stop us from coming up, or seeing versions before that tlog file.
//
addDocs(1, start, new LinkedList<Long>()); // don't add this to the versions list because we are going to lose it...
h.close();
- files = UpdateLog.getLogList(logDir);
+ files = ulog.getLogList(logDir);
Arrays.sort(files);
RandomAccessFile raf = new RandomAccessFile(new File(logDir, files[files.length-1]), "rw");
raf.writeChars("This is a trashed log file that really shouldn't work at all, but we'll see...");
@@ -854,7 +865,8 @@ public class TestRecovery extends SolrTe
}
};
- File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+ File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
clearIndex();
assertU(commit());
@@ -864,7 +876,7 @@ public class TestRecovery extends SolrTe
assertU(adoc("id","F3"));
h.close();
- String[] files = UpdateLog.getLogList(logDir);
+ String[] files = ulog.getLogList(logDir);
Arrays.sort(files);
RandomAccessFile raf = new RandomAccessFile(new File(logDir, files[files.length-1]), "rw");
raf.seek(raf.length()); // seek to end
@@ -908,7 +920,8 @@ public class TestRecovery extends SolrTe
try {
DirectUpdateHandler2.commitOnClose = false;
- File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+ File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
clearIndex();
assertU(commit());
@@ -920,7 +933,7 @@ public class TestRecovery extends SolrTe
h.close();
- String[] files = UpdateLog.getLogList(logDir);
+ String[] files = ulog.getLogList(logDir);
Arrays.sort(files);
RandomAccessFile raf = new RandomAccessFile(new File(logDir, files[files.length-1]), "rw");
long len = raf.length();
@@ -991,7 +1004,8 @@ public class TestRecovery extends SolrTe
}
};
- File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+ File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
clearIndex();
assertU(commit());
@@ -1001,7 +1015,7 @@ public class TestRecovery extends SolrTe
assertU(adoc("id","CCCCCC"));
h.close();
- String[] files = UpdateLog.getLogList(logDir);
+ String[] files = ulog.getLogList(logDir);
Arrays.sort(files);
String fname = files[files.length-1];
RandomAccessFile raf = new RandomAccessFile(new File(logDir, fname), "rw");
@@ -1071,17 +1085,18 @@ public class TestRecovery extends SolrTe
// stops the core, removes the transaction logs, restarts the core.
void deleteLogs() throws Exception {
- File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+ File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
h.close();
try {
- String[] files = UpdateLog.getLogList(logDir);
+ String[] files = ulog.getLogList(logDir);
for (String file : files) {
new File(logDir, file).delete();
}
- assertEquals(0, UpdateLog.getLogList(logDir).length);
+ assertEquals(0, ulog.getLogList(logDir).length);
} finally {
// make sure we create the core again, even if the assert fails so it won't mess
// up the next test.
Modified: lucene/dev/branches/branch_4x/solr/example/resources/log4j.properties
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/example/resources/log4j.properties?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/example/resources/log4j.properties (original)
+++ lucene/dev/branches/branch_4x/solr/example/resources/log4j.properties Mon Jul 1 21:31:19 2013
@@ -1,4 +1,5 @@
# Logging level
+solr.log=logs/
log4j.rootLogger=INFO, file, CONSOLE
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
@@ -12,8 +13,9 @@ log4j.appender.file.MaxFileSize=4MB
log4j.appender.file.MaxBackupIndex=9
#- File to log to and log format
-log4j.appender.file.File=logs/solr.log
+log4j.appender.file.File=${solr.log}/solr.log
log4j.appender.file.layout=org.apache.log4j.PatternLayout
log4j.appender.file.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
-log4j.logger.org.apache.zookeeper=WARN
\ No newline at end of file
+log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
Modified: lucene/dev/branches/branch_4x/solr/example/solr/solr.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/example/solr/solr.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/example/solr/solr.xml (original)
+++ lucene/dev/branches/branch_4x/solr/example/solr/solr.xml Mon Jul 1 21:31:19 2013
@@ -33,6 +33,7 @@
<int name="hostPort">${jetty.port:8983}</int>
<str name="hostContext">${hostContext:solr}</str>
<int name="zkClientTimeout">${zkClientTimeout:15000}</int>
+ <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
</solrcloud>
<shardHandlerFactory name="shardHandlerFactory"
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java Mon Jul 1 21:31:19 2013
@@ -165,15 +165,18 @@ public class ClusterState implements JSO
return Collections.unmodifiableSet(liveNodes);
}
- /**
- * Get the slice/shardId for a core.
- * @param coreNodeName in the form of nodeName_coreName (the name of the replica)
- */
- public String getShardId(String coreNodeName) {
- // System.out.println("###### getShardId("+coreNodeName+") in " + collectionStates);
+ public String getShardId(String baseUrl, String coreName) {
+ // System.out.println("###### getShardId(" + baseUrl + "," + coreName + ") in " + collectionStates);
for (DocCollection coll : collectionStates.values()) {
for (Slice slice : coll.getSlices()) {
- if (slice.getReplicasMap().containsKey(coreNodeName)) return slice.getName();
+ for (Replica replica : slice.getReplicas()) {
+ // TODO: for really large clusters, we could 'index' on this
+ String rbaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ if (baseUrl.equals(rbaseUrl) && coreName.equals(rcore)) {
+ return slice.getName();
+ }
+ }
}
}
return null;
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java Mon Jul 1 21:31:19 2013
@@ -508,6 +508,7 @@ public class ZkStateReader {
public List<ZkCoreNodeProps> getReplicaProps(String collection,
String shardId, String thisCoreNodeName, String coreName, String mustMatchStateFilter, String mustNotMatchStateFilter) {
+ assert thisCoreNodeName != null;
ClusterState clusterState = this.clusterState;
if (clusterState == null) {
return null;
@@ -540,7 +541,7 @@ public class ZkStateReader {
}
}
if (nodes.size() == 0) {
- // no replicas - go local
+ // no replicas
return null;
}
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/log4j.properties
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/log4j.properties?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/log4j.properties (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/log4j.properties Mon Jul 1 21:31:19 2013
@@ -7,3 +7,4 @@ log4j.appender.CONSOLE.layout=org.apache
log4j.appender.CONSOLE.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml Mon Jul 1 21:31:19 2013
@@ -30,7 +30,7 @@
adminPath: RequestHandler path to manage cores.
If 'null' (or absent), cores will not be manageable via REST
-->
- <cores adminPath="/admin/cores" defaultCoreName="core0" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000">
+ <cores adminPath="/admin/cores" defaultCoreName="core0" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000" genericCoreNodeNames="${genericCoreNodeNames:true}">
<core name="collection1" instanceDir="." />
<core name="core0" instanceDir="${theInstanceDir:./}" dataDir="${dataDir1}" collection="${collection:acollection}">
<property name="version" value="3.5"/>
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml Mon Jul 1 21:31:19 2013
@@ -28,7 +28,7 @@
adminPath: RequestHandler path to manage cores.
If 'null' (or absent), cores will not be manageable via request handler
-->
- <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000" numShards="${numShards:3}">
+ <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000" genericCoreNodeNames="${genericCoreNodeNames:true}">
<core name="collection1" instanceDir="collection1" shard="${shard:}" collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"/>
</cores>
</solr>
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java Mon Jul 1 21:31:19 2013
@@ -196,7 +196,8 @@ public class TestSolrProperties extends
exists("/solr/cores[@zkClientTimeout='8000']", document));
assertTrue("\"/solr/cores[@hostContext='${hostContext:solr}']\" doesn't match in:\n" + solrPersistXml,
exists("/solr/cores[@hostContext='${hostContext:solr}']", document));
-
+ assertTrue("\"/solr/cores[@genericCoreNodeNames='${genericCoreNodeNames:true}']\" doesn't match in:\n" + solrPersistXml,
+ exists("/solr/cores[@genericCoreNodeNames='${genericCoreNodeNames:true}']", document));
} finally {
fis.close();
}
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java Mon Jul 1 21:31:19 2013
@@ -103,7 +103,7 @@ public class TestCoreAdmin extends Abstr
File logDir;
try {
- logDir = core.getUpdateHandler().getUpdateLog().getLogDir();
+ logDir = new File(core.getUpdateHandler().getUpdateLog().getLogDir());
} finally {
coreProveIt.close();
core.close();
Modified: lucene/dev/branches/branch_4x/solr/test-framework/ivy.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/ivy.xml?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/ivy.xml (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/ivy.xml Mon Jul 1 21:31:19 2013
@@ -16,7 +16,10 @@
specific language governing permissions and limitations
under the License.
-->
-<ivy-module version="2.0">
+<!DOCTYPE ivy-module [
+ <!ENTITY hadoop.version "2.0.5-alpha">
+]>
+<ivy-module version="2.0" xmlns:m="http://ant.apache.org/ivy/maven">
<info organisation="org.apache.solr" module="solr-test-framework"/>
<configurations>
@@ -31,10 +34,25 @@
<dependencies defaultconf="default">
<dependency org="org.apache.ant" name="ant" rev="1.8.2" transitive="false" />
- <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*" />
+ <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*">
+ <exclude org="org.hamcrest" module="hamcrest-core"/>
+ </dependency>
<dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
<dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
+ <!-- Hadoop DfsMiniCluster Dependencies-->
+ <dependency org="org.apache.hadoop" name="hadoop-common" transitive="false" rev="&hadoop.version;" conf="default->*;junit4-stdalone->*">
+ <artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests" />
+ </dependency>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs" transitive="false" rev="&hadoop.version;" conf="default->*;junit4-stdalone->*">
+ <artifact name="hadoop-hdfs" type="tests" ext="jar" m:classifier="tests" />
+ </dependency>
+ <dependency org="log4j" name="log4j" rev="1.2.17" transitive="false" />
+ <dependency org="org.mortbay.jetty" name="jetty" rev="6.1.26" transitive="false"/>
+ <dependency org="org.mortbay.jetty" name="jetty-util" rev="6.1.26" transitive="false"/>
+ <dependency org="com.sun.jersey" name="jersey-core" rev="1.16" transitive="false"/>
+ <dependency org="commons-collections" name="commons-collections" rev="3.2.1" transitive="false"/>
+
<exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
</dependencies>
</ivy-module>
Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java Mon Jul 1 21:31:19 2013
@@ -345,6 +345,9 @@ public abstract class SolrTestCaseJ4 ext
* The directory used to story the index managed by the TestHarness h
*/
protected static File dataDir;
+
+ // hack due to File dataDir
+ protected static String hdfsDataDir;
/**
* Initializes things your test might need
@@ -395,8 +398,7 @@ public abstract class SolrTestCaseJ4 ext
public static void createCore() {
assertNotNull(testSolrHome);
solrConfig = TestHarness.createConfig(testSolrHome, coreName, getSolrConfigFile());
- h = new TestHarness( coreName,
- dataDir.getAbsolutePath(),
+ h = new TestHarness( coreName, hdfsDataDir == null ? dataDir.getAbsolutePath() : hdfsDataDir,
solrConfig,
getSchemaFile());
lrf = h.getRequestFactory
Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java Mon Jul 1 21:31:19 2013
@@ -128,7 +128,7 @@ public abstract class AbstractDistribZkT
protected void waitForRecoveriesToFinish(String collection, ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout)
throws Exception {
- waitForRecoveriesToFinish(collection, zkStateReader, verbose, failOnTimeout, 230);
+ waitForRecoveriesToFinish(collection, zkStateReader, verbose, failOnTimeout, 330);
}
protected void waitForRecoveriesToFinish(String collection,
@@ -151,8 +151,7 @@ public abstract class AbstractDistribZkT
if (verbose) System.out.println("rstate:"
+ shard.getValue().getStr(ZkStateReader.STATE_PROP)
+ " live:"
- + clusterState.liveNodesContain(shard.getValue().getStr(
- ZkStateReader.NODE_NAME_PROP)));
+ + clusterState.liveNodesContain(shard.getValue().getNodeName()));
String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
if ((state.equals(ZkStateReader.RECOVERING) || state
.equals(ZkStateReader.SYNC) || state.equals(ZkStateReader.DOWN))
Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java Mon Jul 1 21:31:19 2013
@@ -197,6 +197,7 @@ public abstract class AbstractFullDistri
@AfterClass
public static void afterClass() {
System.clearProperty("solrcloud.update.delay");
+ System.clearProperty("genericCoreNodeNames");
}
public AbstractFullDistribZkTestBase() {
@@ -211,6 +212,10 @@ public abstract class AbstractFullDistri
useExplicitNodeNames = random().nextBoolean();
}
+ protected String getDataDir(String dataDir) throws IOException {
+ return dataDir;
+ }
+
protected void initCloud() throws Exception {
assert(cloudInit == false);
cloudInit = true;
@@ -328,8 +333,8 @@ public abstract class AbstractFullDistri
getClass().getName() + "-jetty" + cnt + "-" + System.currentTimeMillis());
jettyDir.mkdirs();
setupJettySolrHome(jettyDir);
- JettySolrRunner j = createJetty(jettyDir, testDir + "/jetty"
- + cnt, null, "solrconfig.xml", null);
+ JettySolrRunner j = createJetty(jettyDir, getDataDir(testDir + "/jetty"
+ + cnt), null, "solrconfig.xml", null);
jettys.add(j);
SolrServer client = createNewSolrServer(j.getLocalPort());
clients.add(client);
@@ -428,6 +433,28 @@ public abstract class AbstractFullDistri
return cnt;
}
+ public JettySolrRunner createJetty(String dataDir, String ulogDir, String shardList,
+ String solrConfigOverride) throws Exception {
+
+ JettySolrRunner jetty = new JettySolrRunner(getSolrHome(), context, 0,
+ solrConfigOverride, null, false, getExtraServlets());
+ jetty.setShards(shardList);
+ jetty.setDataDir(getDataDir(dataDir));
+ jetty.start();
+
+ return jetty;
+ }
+
+ public JettySolrRunner createJetty(File solrHome, String dataDir, String shardList, String solrConfigOverride, String schemaOverride) throws Exception {
+
+ JettySolrRunner jetty = new JettySolrRunner(solrHome.getAbsolutePath(), context, 0, solrConfigOverride, schemaOverride, false, getExtraServlets());
+ jetty.setShards(shardList);
+ jetty.setDataDir(getDataDir(dataDir));
+ jetty.start();
+
+ return jetty;
+ }
+
protected void updateMappingsFromZk(List<JettySolrRunner> jettys,
List<SolrServer> clients) throws Exception {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
@@ -483,7 +510,7 @@ public abstract class AbstractFullDistri
cjr.jetty = jetty;
cjr.info = replica;
cjr.nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
- cjr.coreNodeName = replica.getName();
+ cjr.coreNodeName = replica.getNodeName();
cjr.url = replica.getStr(ZkStateReader.BASE_URL_PROP) + "/" + replica.getStr(ZkStateReader.CORE_NAME_PROP);
cjr.client = findClientByPort(port, theClients);
list.add(cjr);
@@ -1559,7 +1586,7 @@ public abstract class AbstractFullDistri
for (String sliceName : slices.keySet()) {
for (Replica replica : slices.get(sliceName).getReplicas()) {
if (nodesAllowedToRunShards != null && !nodesAllowedToRunShards.contains(replica.getStr(ZkStateReader.NODE_NAME_PROP))) {
- return "Shard " + replica.getName() + " created on node " + replica.getStr(ZkStateReader.NODE_NAME_PROP) + " not allowed to run shards for the created collection " + collectionName;
+ return "Shard " + replica.getName() + " created on node " + replica.getNodeName() + " not allowed to run shards for the created collection " + collectionName;
}
}
totalShards += slices.get(sliceName).getReplicas().size();
Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java?rev=1498702&r1=1498701&r2=1498702&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java Mon Jul 1 21:31:19 2013
@@ -18,6 +18,7 @@
package org.apache.solr.util;
import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.Config;
@@ -187,7 +188,7 @@ public class TestHarness extends BaseTes
String hostContext = "solr";
defaultCoreName = CoreContainer.DEFAULT_DEFAULT_CORE_NAME;
initShardHandler();
- zkSys.initZooKeeper(this, solrHome, System.getProperty("zkHost"), 30000, hostPort, hostContext, null, "30000", 30000, 30000);
+ zkSys.initZooKeeper(this, solrHome, System.getProperty("zkHost"), 30000, hostPort, hostContext, null, "30000", true, 30000, 30000);
ByteArrayInputStream is = new ByteArrayInputStream(ConfigSolrXmlOld.DEF_SOLR_XML.getBytes("UTF-8"));
Config config = new Config(loader, null, new InputSource(is), null, false);
cfg = new ConfigSolrXmlOld(config, this);
@@ -205,6 +206,9 @@ public class TestHarness extends BaseTes
container.setLogging(logging);
CoreDescriptor dcore = new CoreDescriptor(container, coreName, solrConfig.getResourceLoader().getInstanceDir());
+ if (container.isZooKeeperAware()) {
+ container.getZkController().preRegister(dcore);
+ }
dcore.setConfigName(solrConfig.getResourceName());
dcore.setSchemaName(indexSchema.getResourceName());
@@ -213,6 +217,12 @@ public class TestHarness extends BaseTes
}
SolrCore core = new SolrCore(coreName, dataDirectory, solrConfig, indexSchema, dcore);
+
+ if (container.isZooKeeperAware() && Slice.CONSTRUCTION.equals(dcore.getCloudDescriptor().getShardState())) {
+ // set update log to buffer before publishing the core
+ core.getUpdateHandler().getUpdateLog().bufferUpdates();
+ }
+
container.register(coreName, core, false);
// TODO: we should be exercising the *same* core container initialization code, not equivalent code!