You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/01/30 18:29:57 UTC

svn commit: r1065327 [2/3] - in /lucene/dev/trunk: ./ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/ lucene/src/java/org/apache/lucene/queryParser/ lucene/src/java/org/apache/lucene/search/spans/ lucene/src/test/org/apache/lucene...

Modified: lucene/dev/trunk/solr/src/java/org/apache/solr/util/SentinelIntSet.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/java/org/apache/solr/util/SentinelIntSet.java?rev=1065327&r1=1065326&r2=1065327&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/java/org/apache/solr/util/SentinelIntSet.java (original)
+++ lucene/dev/trunk/solr/src/java/org/apache/solr/util/SentinelIntSet.java Sun Jan 30 17:29:55 2011
@@ -1,134 +1,134 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.util;
-
-import java.util.Arrays;
-
-/** A native int set where one value is reserved to mean "EMPTY" */
-public class SentinelIntSet {
-  public int[] keys;
-  public int count;
-  public final int emptyVal;
-  public int rehashCount;   // the count at which a rehash should be done
-
-  public SentinelIntSet(int size, int emptyVal) {
-    this.emptyVal = emptyVal;
-    int tsize = Math.max(org.apache.lucene.util.BitUtil.nextHighestPowerOfTwo(size), 1);
-    rehashCount = tsize - (tsize>>2);
-    if (size >= rehashCount) {  // should be able to hold "size" w/o rehashing
-      tsize <<= 1;
-      rehashCount = tsize - (tsize>>2);
-    }
-    keys = new int[tsize];
-    if (emptyVal != 0)
-      clear();
-  }
-
-  public void clear() {
-    Arrays.fill(keys, emptyVal);
-    count = 0;
-  }
-
-  public int hash(int key) {
-    return key;
-  }
-
-  public int size() { return count; }
-
-  /** returns the slot for this key */
-  public int getSlot(int key) {
-    assert key != emptyVal;
-    int h = hash(key);
-    int s = h & (keys.length-1);
-    if (keys[s] == key || keys[s]== emptyVal) return s;
-
-    int increment = (h>>7)|1;
-    do {
-      s = (s + increment) & (keys.length-1);
-    } while (keys[s] != key && keys[s] != emptyVal);
-    return s;
-  }
-
-  /** returns the slot for this key, or -slot-1 if not found */
-  public int find(int key) {
-    assert key != emptyVal;
-    int h = hash(key);
-    int s = h & (keys.length-1);
-    if (keys[s] == key) return s;
-    if (keys[s] == emptyVal) return -s-1;
-
-    int increment = (h>>7)|1;
-    for(;;) {
-      s = (s + increment) & (keys.length-1);
-      if (keys[s] == key) return s;
-      if (keys[s] == emptyVal) return -s-1;
-    }
-  }
-
-
-  public boolean exists(int key) {
-    return find(key) >= 0;
-  }
-
-
-  public int put(int key) {
-    int s = find(key);
-    if (s < 0) {
-      if (count >= rehashCount) {
-        rehash();
-        s = getSlot(key);
-      } else {
-        s = -s-1;
-      }
-      count++;
-      keys[s] = key;
-      putKey(key, s);
-    } else {
-      overwriteKey(key, s);
-    }
-    return s;
-  }
-
-
-  protected void putKey(int key, int slot) {}
-  protected void overwriteKey(int key, int slot) {}
-
-  protected void startRehash(int newSize) {}
-  protected void moveKey(int key, int oldSlot, int newSlot) {}
-  protected void endRehash() {}
-
-  public void rehash() {
-    int newSize = keys.length << 1;
-    startRehash(newSize);
-    int[] oldKeys = keys;
-    keys = new int[newSize];
-    if (emptyVal != 0) Arrays.fill(keys, emptyVal);
-
-    for (int i=0; i<oldKeys.length; i++) {
-      int key = oldKeys[i];
-      if (key == emptyVal) continue;
-      int newSlot = getSlot(key);
-      keys[newSlot] = key;
-      moveKey(key, i, newSlot);
-    }
-    endRehash();
-    rehashCount = newSize - (newSize>>2);
-
-  }
-
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.util;
+
+import java.util.Arrays;
+
+/** A native int set where one value is reserved to mean "EMPTY" */
+public class SentinelIntSet {
+  public int[] keys;
+  public int count;
+  public final int emptyVal;
+  public int rehashCount;   // the count at which a rehash should be done
+
+  public SentinelIntSet(int size, int emptyVal) {
+    this.emptyVal = emptyVal;
+    int tsize = Math.max(org.apache.lucene.util.BitUtil.nextHighestPowerOfTwo(size), 1);
+    rehashCount = tsize - (tsize>>2);
+    if (size >= rehashCount) {  // should be able to hold "size" w/o rehashing
+      tsize <<= 1;
+      rehashCount = tsize - (tsize>>2);
+    }
+    keys = new int[tsize];
+    if (emptyVal != 0)
+      clear();
+  }
+
+  public void clear() {
+    Arrays.fill(keys, emptyVal);
+    count = 0;
+  }
+
+  public int hash(int key) {
+    return key;
+  }
+
+  public int size() { return count; }
+
+  /** returns the slot for this key */
+  public int getSlot(int key) {
+    assert key != emptyVal;
+    int h = hash(key);
+    int s = h & (keys.length-1);
+    if (keys[s] == key || keys[s]== emptyVal) return s;
+
+    int increment = (h>>7)|1;
+    do {
+      s = (s + increment) & (keys.length-1);
+    } while (keys[s] != key && keys[s] != emptyVal);
+    return s;
+  }
+
+  /** returns the slot for this key, or -slot-1 if not found */
+  public int find(int key) {
+    assert key != emptyVal;
+    int h = hash(key);
+    int s = h & (keys.length-1);
+    if (keys[s] == key) return s;
+    if (keys[s] == emptyVal) return -s-1;
+
+    int increment = (h>>7)|1;
+    for(;;) {
+      s = (s + increment) & (keys.length-1);
+      if (keys[s] == key) return s;
+      if (keys[s] == emptyVal) return -s-1;
+    }
+  }
+
+
+  public boolean exists(int key) {
+    return find(key) >= 0;
+  }
+
+
+  public int put(int key) {
+    int s = find(key);
+    if (s < 0) {
+      if (count >= rehashCount) {
+        rehash();
+        s = getSlot(key);
+      } else {
+        s = -s-1;
+      }
+      count++;
+      keys[s] = key;
+      putKey(key, s);
+    } else {
+      overwriteKey(key, s);
+    }
+    return s;
+  }
+
+
+  protected void putKey(int key, int slot) {}
+  protected void overwriteKey(int key, int slot) {}
+
+  protected void startRehash(int newSize) {}
+  protected void moveKey(int key, int oldSlot, int newSlot) {}
+  protected void endRehash() {}
+
+  public void rehash() {
+    int newSize = keys.length << 1;
+    startRehash(newSize);
+    int[] oldKeys = keys;
+    keys = new int[newSize];
+    if (emptyVal != 0) Arrays.fill(keys, emptyVal);
+
+    for (int i=0; i<oldKeys.length; i++) {
+      int key = oldKeys[i];
+      if (key == emptyVal) continue;
+      int newSlot = getSlot(key);
+      keys[newSlot] = key;
+      moveKey(key, i, newSlot);
+    }
+    endRehash();
+    rehashCount = newSize - (newSize>>2);
+
+  }
+
+}

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java?rev=1065327&r1=1065326&r2=1065327&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java Sun Jan 30 17:29:55 2011
@@ -1,93 +1,93 @@
-package org.apache.solr.cloud;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-
-import org.apache.solr.BaseDistributedSearchTestCase;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.core.SolrConfig;
-import org.junit.Before;
-
-public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearchTestCase {
-  private static final boolean DEBUG = false;
-  protected ZkTestServer zkServer;
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    log.info("####SETUP_START " + getName());
-    
-    ignoreException("java.nio.channels.ClosedChannelException");
-    
-    String zkDir = testDir.getAbsolutePath() + File.separator
-    + "zookeeper/server1/data";
-    zkServer = new ZkTestServer(zkDir);
-    zkServer.run();
-    
-    System.setProperty("zkHost", zkServer.getZkAddress());
-    
-    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", "schema.xml");
-
-    // set some system properties for use by tests
-    System.setProperty("solr.test.sys.prop1", "propone");
-    System.setProperty("solr.test.sys.prop2", "proptwo");
-  }
-  
-  @Override
-  protected void createServers(int numShards) throws Exception {
-    System.setProperty("collection", "control_collection");
-    controlJetty = createJetty(testDir, testDir + "/control/data", "control_shard");
-    System.clearProperty("collection");
-    controlClient = createNewSolrServer(controlJetty.getLocalPort());
-
-    StringBuilder sb = new StringBuilder();
-    for (int i = 1; i <= numShards; i++) {
-      if (sb.length() > 0) sb.append(',');
-      JettySolrRunner j = createJetty(testDir, testDir + "/jetty" + i, "shard" + (i + 2));
-      jettys.add(j);
-      clients.add(createNewSolrServer(j.getLocalPort()));
-      sb.append("localhost:").append(j.getLocalPort()).append(context);
-    }
-
-    shards = sb.toString();
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    if (DEBUG) {
-      printLayout();
-    }
-    zkServer.shutdown();
-    System.clearProperty("zkHost");
-    System.clearProperty("collection");
-    System.clearProperty("solr.test.sys.prop1");
-    System.clearProperty("solr.test.sys.prop2");
-    super.tearDown();
-    resetExceptionIgnores();
-    SolrConfig.severeErrors.clear();
-  }
-  
-  protected void printLayout() throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(), AbstractZkTestCase.TIMEOUT);
-    zkClient.printLayoutToStdOut();
-    zkClient.close();
-  }
-}
+package org.apache.solr.cloud;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+
+import org.apache.solr.BaseDistributedSearchTestCase;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.core.SolrConfig;
+import org.junit.Before;
+
+public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearchTestCase {
+  private static final boolean DEBUG = false;
+  protected ZkTestServer zkServer;
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    log.info("####SETUP_START " + getName());
+    
+    ignoreException("java.nio.channels.ClosedChannelException");
+    
+    String zkDir = testDir.getAbsolutePath() + File.separator
+    + "zookeeper/server1/data";
+    zkServer = new ZkTestServer(zkDir);
+    zkServer.run();
+    
+    System.setProperty("zkHost", zkServer.getZkAddress());
+    
+    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", "schema.xml");
+
+    // set some system properties for use by tests
+    System.setProperty("solr.test.sys.prop1", "propone");
+    System.setProperty("solr.test.sys.prop2", "proptwo");
+  }
+  
+  @Override
+  protected void createServers(int numShards) throws Exception {
+    System.setProperty("collection", "control_collection");
+    controlJetty = createJetty(testDir, testDir + "/control/data", "control_shard");
+    System.clearProperty("collection");
+    controlClient = createNewSolrServer(controlJetty.getLocalPort());
+
+    StringBuilder sb = new StringBuilder();
+    for (int i = 1; i <= numShards; i++) {
+      if (sb.length() > 0) sb.append(',');
+      JettySolrRunner j = createJetty(testDir, testDir + "/jetty" + i, "shard" + (i + 2));
+      jettys.add(j);
+      clients.add(createNewSolrServer(j.getLocalPort()));
+      sb.append("localhost:").append(j.getLocalPort()).append(context);
+    }
+
+    shards = sb.toString();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    if (DEBUG) {
+      printLayout();
+    }
+    zkServer.shutdown();
+    System.clearProperty("zkHost");
+    System.clearProperty("collection");
+    System.clearProperty("solr.test.sys.prop1");
+    System.clearProperty("solr.test.sys.prop2");
+    super.tearDown();
+    resetExceptionIgnores();
+    SolrConfig.severeErrors.clear();
+  }
+  
+  protected void printLayout() throws Exception {
+    SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(), AbstractZkTestCase.TIMEOUT);
+    zkClient.printLayoutToStdOut();
+    zkClient.close();
+  }
+}

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java?rev=1065327&r1=1065326&r2=1065327&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java Sun Jan 30 17:29:55 2011
@@ -1,147 +1,147 @@
-package org.apache.solr.cloud;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.core.SolrConfig;
-import org.apache.zookeeper.CreateMode;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Base test class for ZooKeeper tests.
- */
-public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
-
-  static final int TIMEOUT = 10000;
-
-  private static final boolean DEBUG = false;
-
-  protected static Logger log = LoggerFactory
-      .getLogger(AbstractZkTestCase.class);
-
-  protected static ZkTestServer zkServer;
-
-  protected static String zkDir;
-
-
-  @BeforeClass
-  public static void azt_beforeClass() throws Exception {
-    createTempDir();
-    zkDir = dataDir.getAbsolutePath() + File.separator
-        + "zookeeper/server1/data";
-    zkServer = new ZkTestServer(zkDir);
-    zkServer.run();
-    
-    System.setProperty("zkHost", zkServer.getZkAddress());
-    System.setProperty("hostPort", "0000");
-    
-    buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(),
-        "solrconfig.xml", "schema.xml");
-    
-    initCore("solrconfig.xml", "schema.xml");
-  }
-
-  // static to share with distrib test
-  static void buildZooKeeper(String zkHost, String zkAddress, String config,
-      String schema) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
-    zkClient.makePath("/solr");
-    zkClient.close();
-
-    zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT);
-
-    ZkNodeProps props = new ZkNodeProps();
-    props.put("configName", "conf1");
-    zkClient.makePath("/collections/collection1", props.store(), CreateMode.PERSISTENT);
-    zkClient.makePath("/collections/collection1/shards", CreateMode.PERSISTENT);
-
-    zkClient.makePath("/collections/control_collection", props.store(), CreateMode.PERSISTENT);
-    zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT);
-
-    putConfig(zkClient, config);
-    putConfig(zkClient, schema);
-    putConfig(zkClient, "stopwords.txt");
-    putConfig(zkClient, "protwords.txt");
-    putConfig(zkClient, "mapping-ISOLatin1Accent.txt");
-    putConfig(zkClient, "old_synonyms.txt");
-    putConfig(zkClient, "synonyms.txt");
-    
-    zkClient.close();
-  }
-
-  private static void putConfig(SolrZkClient zkConnection, String name)
-      throws Exception {
-    zkConnection.setData("/configs/conf1/" + name, getFile("solr"
-        + File.separator + "conf" + File.separator + name));
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    if (DEBUG) {
-      printLayout(zkServer.getZkHost());
-    }
-
-    SolrConfig.severeErrors.clear();
-    super.tearDown();
-  }
-  
-  @AfterClass
-  public static void azt_afterClass() throws IOException {
-    zkServer.shutdown();
-    System.clearProperty("zkHost");
-    System.clearProperty("solr.test.sys.prop1");
-    System.clearProperty("solr.test.sys.prop2");
-  }
-
-  protected void printLayout(String zkHost) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
-    zkClient.printLayoutToStdOut();
-    zkClient.close();
-  }
-
-  static void makeSolrZkNode(String zkHost) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT);
-    zkClient.makePath("/solr");
-    zkClient.close();
-  }
-  
-  static void tryCleanSolrZkNode(String zkHost) throws Exception {
-    tryCleanPath(zkHost, "/solr");
-  }
-  
-  static void tryCleanPath(String zkHost, String path) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT);
-    if (zkClient.exists(path)) {
-      List<String> children = zkClient.getChildren(path, null);
-      for (String string : children) {
-        tryCleanPath(zkHost, path+"/"+string);
-      }
-      zkClient.delete(path, -1);
-    }
-    zkClient.close();
-  }
-}
+package org.apache.solr.cloud;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.core.SolrConfig;
+import org.apache.zookeeper.CreateMode;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base test class for ZooKeeper tests.
+ */
+public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
+
+  static final int TIMEOUT = 10000;
+
+  private static final boolean DEBUG = false;
+
+  protected static Logger log = LoggerFactory
+      .getLogger(AbstractZkTestCase.class);
+
+  protected static ZkTestServer zkServer;
+
+  protected static String zkDir;
+
+
+  @BeforeClass
+  public static void azt_beforeClass() throws Exception {
+    createTempDir();
+    zkDir = dataDir.getAbsolutePath() + File.separator
+        + "zookeeper/server1/data";
+    zkServer = new ZkTestServer(zkDir);
+    zkServer.run();
+    
+    System.setProperty("zkHost", zkServer.getZkAddress());
+    System.setProperty("hostPort", "0000");
+    
+    buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(),
+        "solrconfig.xml", "schema.xml");
+    
+    initCore("solrconfig.xml", "schema.xml");
+  }
+
+  // static to share with distrib test
+  static void buildZooKeeper(String zkHost, String zkAddress, String config,
+      String schema) throws Exception {
+    SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
+    zkClient.makePath("/solr");
+    zkClient.close();
+
+    zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT);
+
+    ZkNodeProps props = new ZkNodeProps();
+    props.put("configName", "conf1");
+    zkClient.makePath("/collections/collection1", props.store(), CreateMode.PERSISTENT);
+    zkClient.makePath("/collections/collection1/shards", CreateMode.PERSISTENT);
+
+    zkClient.makePath("/collections/control_collection", props.store(), CreateMode.PERSISTENT);
+    zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT);
+
+    putConfig(zkClient, config);
+    putConfig(zkClient, schema);
+    putConfig(zkClient, "stopwords.txt");
+    putConfig(zkClient, "protwords.txt");
+    putConfig(zkClient, "mapping-ISOLatin1Accent.txt");
+    putConfig(zkClient, "old_synonyms.txt");
+    putConfig(zkClient, "synonyms.txt");
+    
+    zkClient.close();
+  }
+
+  private static void putConfig(SolrZkClient zkConnection, String name)
+      throws Exception {
+    zkConnection.setData("/configs/conf1/" + name, getFile("solr"
+        + File.separator + "conf" + File.separator + name));
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    if (DEBUG) {
+      printLayout(zkServer.getZkHost());
+    }
+
+    SolrConfig.severeErrors.clear();
+    super.tearDown();
+  }
+  
+  @AfterClass
+  public static void azt_afterClass() throws IOException {
+    zkServer.shutdown();
+    System.clearProperty("zkHost");
+    System.clearProperty("solr.test.sys.prop1");
+    System.clearProperty("solr.test.sys.prop2");
+  }
+
+  protected void printLayout(String zkHost) throws Exception {
+    SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
+    zkClient.printLayoutToStdOut();
+    zkClient.close();
+  }
+
+  static void makeSolrZkNode(String zkHost) throws Exception {
+    SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT);
+    zkClient.makePath("/solr");
+    zkClient.close();
+  }
+  
+  static void tryCleanSolrZkNode(String zkHost) throws Exception {
+    tryCleanPath(zkHost, "/solr");
+  }
+  
+  static void tryCleanPath(String zkHost, String path) throws Exception {
+    SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT);
+    if (zkClient.exists(path)) {
+      List<String> children = zkClient.getChildren(path, null);
+      for (String string : children) {
+        tryCleanPath(zkHost, path+"/"+string);
+      }
+      zkClient.delete(path, -1);
+    }
+    zkClient.close();
+  }
+}

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java?rev=1065327&r1=1065326&r2=1065327&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java Sun Jan 30 17:29:55 2011
@@ -1,284 +1,284 @@
-package org.apache.solr.cloud;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.net.MalformedURLException;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.junit.BeforeClass;
-
-/**
- *
- */
-public class BasicDistributedZkTest extends AbstractDistributedZkTestCase {
-  
-  private static final String DEFAULT_COLLECTION = "collection1";
-  private static final boolean DEBUG = false;
-  String t1="a_t";
-  String i1="a_si";
-  String nint = "n_i";
-  String tint = "n_ti";
-  String nfloat = "n_f";
-  String tfloat = "n_tf";
-  String ndouble = "n_d";
-  String tdouble = "n_td";
-  String nlong = "n_l";
-  String tlong = "n_tl";
-  String ndate = "n_dt";
-  String tdate = "n_tdt";
-  
-  String oddField="oddField_s";
-  String missingField="ignore_exception__missing_but_valid_field_t";
-  String invalidField="ignore_exception__invalid_field_not_in_schema";
-  
-  public BasicDistributedZkTest() {
-    fixShardCount = true;
-    
-    System.setProperty("CLOUD_UPDATE_DELAY", "0");
-  }
-
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    System.setProperty("solr.solr.home", SolrTestCaseJ4.TEST_HOME);
-  }
-  
-  @Override
-  protected void setDistributedParams(ModifiableSolrParams params) {
-
-    if (r.nextBoolean()) {
-      // don't set shards, let that be figured out from the cloud state
-      params.set("distrib", "true");
-    } else {
-      // use shard ids rather than physical locations
-      StringBuilder sb = new StringBuilder();
-      for (int i = 0; i < shardCount; i++) {
-        if (i > 0)
-          sb.append(',');
-        sb.append("shard" + (i + 3));
-      }
-      params.set("shards", sb.toString());
-      params.set("distrib", "true");
-    }
-  }
-  
-  @Override
-  public void doTest() throws Exception {
-    del("*:*");
-    indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men"
-            ,"foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d);
-    indexr(id,2, i1, 50 , tlong, 50,t1,"to come to the aid of their country."
-    );
-    indexr(id,3, i1, 2, tlong, 2,t1,"how now brown cow"
-    );
-    indexr(id,4, i1, -100 ,tlong, 101,t1,"the quick fox jumped over the lazy dog"
-    );
-    indexr(id,5, i1, 500, tlong, 500 ,t1,"the quick fox jumped way over the lazy dog"
-    );
-    indexr(id,6, i1, -600, tlong, 600 ,t1,"humpty dumpy sat on a wall");
-    indexr(id,7, i1, 123, tlong, 123 ,t1,"humpty dumpy had a great fall");
-    indexr(id,8, i1, 876, tlong, 876,t1,"all the kings horses and all the kings men");
-    indexr(id,9, i1, 7, tlong, 7,t1,"couldn't put humpty together again");
-    indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass");
-    indexr(id,11, i1, -987, tlong, 987,t1,"An eye for eye only ends up making the whole world blind.");
-    indexr(id,12, i1, 379, tlong, 379,t1,"Great works are performed, not by strength, but by perseverance.");
-    indexr(id,13, i1, 232, tlong, 232,t1,"no eggs on wall, lesson learned", oddField, "odd man out");
-
-    indexr(id, 14, "SubjectTerms_mfacet", new String[]  {"mathematical models", "mathematical analysis"});
-    indexr(id, 15, "SubjectTerms_mfacet", new String[]  {"test 1", "test 2", "test3"});
-    indexr(id, 16, "SubjectTerms_mfacet", new String[]  {"test 1", "test 2", "test3"});
-    String[] vals = new String[100];
-    for (int i=0; i<100; i++) {
-      vals[i] = "test " + i;
-    }
-    indexr(id, 17, "SubjectTerms_mfacet", vals);
-
-    for (int i=100; i<150; i++) {
-      indexr(id, i);      
-    }
-
-    commit();
-
-    handle.clear();
-    handle.put("QTime", SKIPVAL);
-    handle.put("timestamp", SKIPVAL);
-
-    // random value sort
-    for (String f : fieldNames) {
-      query("q","*:*", "sort",f+" desc");
-      query("q","*:*", "sort",f+" asc");
-    }
-
-    // these queries should be exactly ordered and scores should exactly match
-    query("q","*:*", "sort",i1+" desc");
-    query("q","*:*", "sort",i1+" asc");
-    query("q","*:*", "sort",i1+" desc", "fl","*,score");
-    query("q","*:*", "sort",tlong+" asc", "fl","score");  // test legacy behavior - "score"=="*,score"
-    query("q","*:*", "sort",tlong+" desc");
-    handle.put("maxScore", SKIPVAL);
-    query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
-    //is agnostic of request params.
-    handle.remove("maxScore");
-    query("q","{!func}"+i1, "fl","*,score");  // even scores should match exactly here
-
-    handle.put("highlighting", UNORDERED);
-    handle.put("response", UNORDERED);
-
-    handle.put("maxScore", SKIPVAL);
-    query("q","quick");
-    query("q","all","fl","id","start","0");
-    query("q","all","fl","foofoofoo","start","0");  // no fields in returned docs
-    query("q","all","fl","id","start","100");
-
-    handle.put("score", SKIPVAL);
-    query("q","quick","fl","*,score");
-    query("q","all","fl","*,score","start","1");
-    query("q","all","fl","*,score","start","100");
-
-    query("q","now their fox sat had put","fl","*,score",
-            "hl","true","hl.fl",t1);
-
-    query("q","now their fox sat had put","fl","foofoofoo",
-            "hl","true","hl.fl",t1);
-
-    query("q","matchesnothing","fl","*,score");  
-
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1);
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count");
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2);
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index");
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2);
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1);
-    query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*");
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1);
-    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2);
-
-    // test faceting multiple things at once
-    query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"
-    ,"facet.field",t1);
-
-    // test filter tagging, facet exclusion, and naming (multi-select facet support)
-    query("q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*"
-    ,"facet.field","{!key=mykey ex=a}"+t1
-    ,"facet.field","{!key=other ex=b}"+t1
-    ,"facet.field","{!key=again ex=a,b}"+t1
-    ,"facet.field",t1
-    ,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]"
-    );
-    query("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1");
-
-    // test field that is valid in schema but missing in all shards
-    query("q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2);
-    // test field that is valid in schema and missing in some shards
-    query("q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2);
-
-    query("q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1);
-
-    /*** TODO: the failure may come back in "exception"
-    try {
-      // test error produced for field that is invalid for schema
-      query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2);
-      TestCase.fail("SolrServerException expected for invalid field that is not in schema");
-    } catch (SolrServerException ex) {
-      // expected
-    }
-    ***/
-
-    // Try to get better coverage for refinement queries by turning off over requesting.
-    // This makes it much more likely that we may not get the top facet values and hence
-    // we turn of that checking.
-    handle.put("facet_fields", SKIPVAL);    
-    query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5);
-    // check a complex key name
-    query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5);
-    handle.remove("facet_fields");
-
-
-    // index the same document to two servers and make sure things
-    // don't blow up.
-    if (clients.size()>=2) {
-      index(id,100, i1, 107 ,t1,"oh no, a duplicate!");
-      for (int i=0; i<clients.size(); i++) {
-        index_specific(i, id,100, i1, 107 ,t1,"oh no, a duplicate!");
-      }
-      commit();
-      query("q","duplicate", "hl","true", "hl.fl", t1);
-      query("q","fox duplicate horses", "hl","true", "hl.fl", t1);
-      query("q","*:*", "rows",100);
-    }
-
-    // test debugging
-    handle.put("explain", UNORDERED);
-    handle.put("debug", UNORDERED);
-    handle.put("time", SKIPVAL);
-    query("q","now their fox sat had put","fl","*,score",CommonParams.DEBUG_QUERY, "true");
-    query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
-    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
-    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
-    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
-
-    // TODO: This test currently fails because debug info is obtained only
-    // on shards with matches.
-    // query("q","matchesnothing","fl","*,score", "debugQuery", "true");
-
-    // Thread.sleep(10000000000L);
-    if (DEBUG) {
-      super.printLayout();
-    }
-  }
-
-  volatile CloudSolrServer solrj;
-
-  @Override
-  protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException {
-
-    if (r.nextBoolean())
-      return super.queryServer(params);
-
-    // use the distributed solrj client
-    if (solrj == null) {
-      synchronized(this) {
-        try {
-          CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
-          server.setDefaultCollection(DEFAULT_COLLECTION);
-          solrj = server;
-        } catch (MalformedURLException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    }
-
-    if (r.nextBoolean())
-      params.set("collection",DEFAULT_COLLECTION);
-
-    QueryResponse rsp = solrj.query(params);
-    return rsp;
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    super.tearDown();
-    System.clearProperty("CLOUD_UPDATE_DELAY");
-    System.clearProperty("zkHost");
-  }
-}
+package org.apache.solr.cloud;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.net.MalformedURLException;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.junit.BeforeClass;
+
+/**
+ *
+ */
+public class BasicDistributedZkTest extends AbstractDistributedZkTestCase {
+  
+  private static final String DEFAULT_COLLECTION = "collection1";
+  private static final boolean DEBUG = false;
+  String t1="a_t";
+  String i1="a_si";
+  String nint = "n_i";
+  String tint = "n_ti";
+  String nfloat = "n_f";
+  String tfloat = "n_tf";
+  String ndouble = "n_d";
+  String tdouble = "n_td";
+  String nlong = "n_l";
+  String tlong = "n_tl";
+  String ndate = "n_dt";
+  String tdate = "n_tdt";
+  
+  String oddField="oddField_s";
+  String missingField="ignore_exception__missing_but_valid_field_t";
+  String invalidField="ignore_exception__invalid_field_not_in_schema";
+  
+  public BasicDistributedZkTest() {
+    fixShardCount = true;
+    
+    System.setProperty("CLOUD_UPDATE_DELAY", "0");
+  }
+
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    System.setProperty("solr.solr.home", SolrTestCaseJ4.TEST_HOME);
+  }
+  
+  @Override
+  protected void setDistributedParams(ModifiableSolrParams params) {
+
+    if (r.nextBoolean()) {
+      // don't set shards, let that be figured out from the cloud state
+      params.set("distrib", "true");
+    } else {
+      // use shard ids rather than physical locations
+      StringBuilder sb = new StringBuilder();
+      for (int i = 0; i < shardCount; i++) {
+        if (i > 0)
+          sb.append(',');
+        sb.append("shard" + (i + 3));
+      }
+      params.set("shards", sb.toString());
+      params.set("distrib", "true");
+    }
+  }
+  
+  @Override
+  public void doTest() throws Exception {
+    del("*:*");
+    indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men"
+            ,"foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d);
+    indexr(id,2, i1, 50 , tlong, 50,t1,"to come to the aid of their country."
+    );
+    indexr(id,3, i1, 2, tlong, 2,t1,"how now brown cow"
+    );
+    indexr(id,4, i1, -100 ,tlong, 101,t1,"the quick fox jumped over the lazy dog"
+    );
+    indexr(id,5, i1, 500, tlong, 500 ,t1,"the quick fox jumped way over the lazy dog"
+    );
+    indexr(id,6, i1, -600, tlong, 600 ,t1,"humpty dumpy sat on a wall");
+    indexr(id,7, i1, 123, tlong, 123 ,t1,"humpty dumpy had a great fall");
+    indexr(id,8, i1, 876, tlong, 876,t1,"all the kings horses and all the kings men");
+    indexr(id,9, i1, 7, tlong, 7,t1,"couldn't put humpty together again");
+    indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass");
+    indexr(id,11, i1, -987, tlong, 987,t1,"An eye for eye only ends up making the whole world blind.");
+    indexr(id,12, i1, 379, tlong, 379,t1,"Great works are performed, not by strength, but by perseverance.");
+    indexr(id,13, i1, 232, tlong, 232,t1,"no eggs on wall, lesson learned", oddField, "odd man out");
+
+    indexr(id, 14, "SubjectTerms_mfacet", new String[]  {"mathematical models", "mathematical analysis"});
+    indexr(id, 15, "SubjectTerms_mfacet", new String[]  {"test 1", "test 2", "test3"});
+    indexr(id, 16, "SubjectTerms_mfacet", new String[]  {"test 1", "test 2", "test3"});
+    String[] vals = new String[100];
+    for (int i=0; i<100; i++) {
+      vals[i] = "test " + i;
+    }
+    indexr(id, 17, "SubjectTerms_mfacet", vals);
+
+    for (int i=100; i<150; i++) {
+      indexr(id, i);      
+    }
+
+    commit();
+
+    handle.clear();
+    handle.put("QTime", SKIPVAL);
+    handle.put("timestamp", SKIPVAL);
+
+    // random value sort
+    for (String f : fieldNames) {
+      query("q","*:*", "sort",f+" desc");
+      query("q","*:*", "sort",f+" asc");
+    }
+
+    // these queries should be exactly ordered and scores should exactly match
+    query("q","*:*", "sort",i1+" desc");
+    query("q","*:*", "sort",i1+" asc");
+    query("q","*:*", "sort",i1+" desc", "fl","*,score");
+    query("q","*:*", "sort",tlong+" asc", "fl","score");  // test legacy behavior - "score"=="*,score"
+    query("q","*:*", "sort",tlong+" desc");
+    handle.put("maxScore", SKIPVAL);
+    query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
+    //is agnostic of request params.
+    handle.remove("maxScore");
+    query("q","{!func}"+i1, "fl","*,score");  // even scores should match exactly here
+
+    handle.put("highlighting", UNORDERED);
+    handle.put("response", UNORDERED);
+
+    handle.put("maxScore", SKIPVAL);
+    query("q","quick");
+    query("q","all","fl","id","start","0");
+    query("q","all","fl","foofoofoo","start","0");  // no fields in returned docs
+    query("q","all","fl","id","start","100");
+
+    handle.put("score", SKIPVAL);
+    query("q","quick","fl","*,score");
+    query("q","all","fl","*,score","start","1");
+    query("q","all","fl","*,score","start","100");
+
+    query("q","now their fox sat had put","fl","*,score",
+            "hl","true","hl.fl",t1);
+
+    query("q","now their fox sat had put","fl","foofoofoo",
+            "hl","true","hl.fl",t1);
+
+    query("q","matchesnothing","fl","*,score");  
+
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1);
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count");
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2);
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index");
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2);
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1);
+    query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*");
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1);
+    query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2);
+
+    // test faceting multiple things at once
+    query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"
+    ,"facet.field",t1);
+
+    // test filter tagging, facet exclusion, and naming (multi-select facet support)
+    query("q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*"
+    ,"facet.field","{!key=mykey ex=a}"+t1
+    ,"facet.field","{!key=other ex=b}"+t1
+    ,"facet.field","{!key=again ex=a,b}"+t1
+    ,"facet.field",t1
+    ,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]"
+    );
+    query("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1");
+
+    // test field that is valid in schema but missing in all shards
+    query("q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2);
+    // test field that is valid in schema and missing in some shards
+    query("q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2);
+
+    query("q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1);
+
+    /*** TODO: the failure may come back in "exception"
+    try {
+      // test error produced for field that is invalid for schema
+      query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2);
+      TestCase.fail("SolrServerException expected for invalid field that is not in schema");
+    } catch (SolrServerException ex) {
+      // expected
+    }
+    ***/
+
+    // Try to get better coverage for refinement queries by turning off over requesting.
+    // This makes it much more likely that we may not get the top facet values and hence
+    // we turn of that checking.
+    handle.put("facet_fields", SKIPVAL);    
+    query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5);
+    // check a complex key name
+    query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5);
+    handle.remove("facet_fields");
+
+
+    // index the same document to two servers and make sure things
+    // don't blow up.
+    if (clients.size()>=2) {
+      index(id,100, i1, 107 ,t1,"oh no, a duplicate!");
+      for (int i=0; i<clients.size(); i++) {
+        index_specific(i, id,100, i1, 107 ,t1,"oh no, a duplicate!");
+      }
+      commit();
+      query("q","duplicate", "hl","true", "hl.fl", t1);
+      query("q","fox duplicate horses", "hl","true", "hl.fl", t1);
+      query("q","*:*", "rows",100);
+    }
+
+    // test debugging
+    handle.put("explain", UNORDERED);
+    handle.put("debug", UNORDERED);
+    handle.put("time", SKIPVAL);
+    query("q","now their fox sat had put","fl","*,score",CommonParams.DEBUG_QUERY, "true");
+    query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
+    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
+    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
+    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
+
+    // TODO: This test currently fails because debug info is obtained only
+    // on shards with matches.
+    // query("q","matchesnothing","fl","*,score", "debugQuery", "true");
+
+    // Thread.sleep(10000000000L);
+    if (DEBUG) {
+      super.printLayout();
+    }
+  }
+
+  volatile CloudSolrServer solrj;
+
+  @Override
+  protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException {
+
+    if (r.nextBoolean())
+      return super.queryServer(params);
+
+    // use the distributed solrj client
+    if (solrj == null) {
+      synchronized(this) {
+        try {
+          CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
+          server.setDefaultCollection(DEFAULT_COLLECTION);
+          solrj = server;
+        } catch (MalformedURLException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    }
+
+    if (r.nextBoolean())
+      params.set("collection",DEFAULT_COLLECTION);
+
+    QueryResponse rsp = solrj.query(params);
+    return rsp;
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    System.clearProperty("CLOUD_UPDATE_DELAY");
+    System.clearProperty("zkHost");
+  }
+}

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicZkTest.java?rev=1065327&r1=1065326&r2=1065327&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicZkTest.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/BasicZkTest.java Sun Jan 30 17:29:55 2011
@@ -1,138 +1,138 @@
-package org.apache.solr.cloud;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.LogMergePolicy;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.SolrIndexWriter;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- *
- */
-public class BasicZkTest extends AbstractZkTestCase {
-  
-  @BeforeClass
-  public static void beforeClass() {
-    System.setProperty("CLOUD_UPDATE_DELAY", "1");
-  }
-  
-  @Test
-  public void testBasic() throws Exception {
-    // test using ZooKeeper
-    assertTrue("Not using ZooKeeper", h.getCoreContainer().isZooKeeperAware());
-    
-    ZkController zkController = h.getCoreContainer().getZkController();
-    
-    // test merge factor picked up
-    SolrCore core = h.getCore();
-    SolrIndexWriter writer = new SolrIndexWriter("testWriter", core
-        .getNewIndexDir(), core.getDirectoryFactory(), false, core.getSchema(),
-        core.getSolrConfig().mainIndexConfig, core.getDeletionPolicy());
-    assertEquals("Mergefactor was not picked up", ((LogMergePolicy)writer.getConfig().getMergePolicy()).getMergeFactor(), 8);
-    writer.close();
-    
-    lrf.args.put("version", "2.0");
-    assertQ("test query on empty index", req("qlkciyopsbgzyvkylsjhchghjrdf"),
-        "//result[@numFound='0']");
-
-    // test escaping of ";"
-    assertU("deleting 42 for no reason at all", delI("42"));
-    assertU("adding doc#42", adoc("id", "42", "val_s", "aa;bb"));
-    assertU("does commit work?", commit());
-
-    assertQ("backslash escaping semicolon", req("id:42 AND val_s:aa\\;bb"),
-        "//*[@numFound='1']", "//int[@name='id'][.='42']");
-
-    assertQ("quote escaping semicolon", req("id:42 AND val_s:\"aa;bb\""),
-        "//*[@numFound='1']", "//int[@name='id'][.='42']");
-
-    assertQ("no escaping semicolon", req("id:42 AND val_s:aa"),
-        "//*[@numFound='0']");
-
-    assertU(delI("42"));
-    assertU(commit());
-    assertQ(req("id:42"), "//*[@numFound='0']");
-
-    // test overwrite default of true
-
-    assertU(adoc("id", "42", "val_s", "AAA"));
-    assertU(adoc("id", "42", "val_s", "BBB"));
-    assertU(commit());
-    assertQ(req("id:42"), "//*[@numFound='1']", "//str[.='BBB']");
-    assertU(adoc("id", "42", "val_s", "CCC"));
-    assertU(adoc("id", "42", "val_s", "DDD"));
-    assertU(commit());
-    assertQ(req("id:42"), "//*[@numFound='1']", "//str[.='DDD']");
-
-    // test deletes
-    String[] adds = new String[] { add(doc("id", "101"), "overwrite", "true"),
-        add(doc("id", "101"), "overwrite", "true"),
-        add(doc("id", "105"), "overwrite", "false"),
-        add(doc("id", "102"), "overwrite", "true"),
-        add(doc("id", "103"), "overwrite", "false"),
-        add(doc("id", "101"), "overwrite", "true"), };
-    for (String a : adds) {
-      assertU(a, a);
-    }
-    assertU(commit());
-    
-    zkServer.shutdown();
-    
-    Thread.sleep(300);
-    
-    // try a reconnect from disconnect
-    zkServer = new ZkTestServer(zkDir);
-    zkServer.run();
-    
-    Thread.sleep(300);
-    
-    // ensure zk still thinks node is up
-    assertTrue(
-        zkController.getCloudState().getLiveNodes().toString(),
-        zkController.getCloudState().liveNodesContain(
-            zkController.getNodeName()));
-
-    // test maxint
-    assertQ(req("q", "id:[100 TO 110]", "rows", "2147483647"),
-        "//*[@numFound='4']");
-
-    // test big limit
-    assertQ(req("q", "id:[100 TO 111]", "rows", "1147483647"),
-        "//*[@numFound='4']");
-
-    assertQ(req("id:[100 TO 110]"), "//*[@numFound='4']");
-    assertU(delI("102"));
-    assertU(commit());
-    assertQ(req("id:[100 TO 110]"), "//*[@numFound='3']");
-    assertU(delI("105"));
-    assertU(commit());
-    assertQ(req("id:[100 TO 110]"), "//*[@numFound='2']");
-    assertU(delQ("id:[100 TO 110]"));
-    assertU(commit());
-    assertQ(req("id:[100 TO 110]"), "//*[@numFound='0']");
-  }
-  
-  @AfterClass
-  public static void afterClass() {
-    System.clearProperty("CLOUD_UPDATE_DELAY");
-  }
-}
+package org.apache.solr.cloud;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.LogMergePolicy;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.update.SolrIndexWriter;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class BasicZkTest extends AbstractZkTestCase {
+  
+  @BeforeClass
+  public static void beforeClass() {
+    System.setProperty("CLOUD_UPDATE_DELAY", "1");
+  }
+  
+  @Test
+  public void testBasic() throws Exception {
+    // test using ZooKeeper
+    assertTrue("Not using ZooKeeper", h.getCoreContainer().isZooKeeperAware());
+    
+    ZkController zkController = h.getCoreContainer().getZkController();
+    
+    // test merge factor picked up
+    SolrCore core = h.getCore();
+    SolrIndexWriter writer = new SolrIndexWriter("testWriter", core
+        .getNewIndexDir(), core.getDirectoryFactory(), false, core.getSchema(),
+        core.getSolrConfig().mainIndexConfig, core.getDeletionPolicy());
+    assertEquals("Mergefactor was not picked up", ((LogMergePolicy)writer.getConfig().getMergePolicy()).getMergeFactor(), 8);
+    writer.close();
+    
+    lrf.args.put("version", "2.0");
+    assertQ("test query on empty index", req("qlkciyopsbgzyvkylsjhchghjrdf"),
+        "//result[@numFound='0']");
+
+    // test escaping of ";"
+    assertU("deleting 42 for no reason at all", delI("42"));
+    assertU("adding doc#42", adoc("id", "42", "val_s", "aa;bb"));
+    assertU("does commit work?", commit());
+
+    assertQ("backslash escaping semicolon", req("id:42 AND val_s:aa\\;bb"),
+        "//*[@numFound='1']", "//int[@name='id'][.='42']");
+
+    assertQ("quote escaping semicolon", req("id:42 AND val_s:\"aa;bb\""),
+        "//*[@numFound='1']", "//int[@name='id'][.='42']");
+
+    assertQ("no escaping semicolon", req("id:42 AND val_s:aa"),
+        "//*[@numFound='0']");
+
+    assertU(delI("42"));
+    assertU(commit());
+    assertQ(req("id:42"), "//*[@numFound='0']");
+
+    // test overwrite default of true
+
+    assertU(adoc("id", "42", "val_s", "AAA"));
+    assertU(adoc("id", "42", "val_s", "BBB"));
+    assertU(commit());
+    assertQ(req("id:42"), "//*[@numFound='1']", "//str[.='BBB']");
+    assertU(adoc("id", "42", "val_s", "CCC"));
+    assertU(adoc("id", "42", "val_s", "DDD"));
+    assertU(commit());
+    assertQ(req("id:42"), "//*[@numFound='1']", "//str[.='DDD']");
+
+    // test deletes
+    String[] adds = new String[] { add(doc("id", "101"), "overwrite", "true"),
+        add(doc("id", "101"), "overwrite", "true"),
+        add(doc("id", "105"), "overwrite", "false"),
+        add(doc("id", "102"), "overwrite", "true"),
+        add(doc("id", "103"), "overwrite", "false"),
+        add(doc("id", "101"), "overwrite", "true"), };
+    for (String a : adds) {
+      assertU(a, a);
+    }
+    assertU(commit());
+    
+    zkServer.shutdown();
+    
+    Thread.sleep(300);
+    
+    // try a reconnect from disconnect
+    zkServer = new ZkTestServer(zkDir);
+    zkServer.run();
+    
+    Thread.sleep(300);
+    
+    // ensure zk still thinks node is up
+    assertTrue(
+        zkController.getCloudState().getLiveNodes().toString(),
+        zkController.getCloudState().liveNodesContain(
+            zkController.getNodeName()));
+
+    // test maxint
+    assertQ(req("q", "id:[100 TO 110]", "rows", "2147483647"),
+        "//*[@numFound='4']");
+
+    // test big limit
+    assertQ(req("q", "id:[100 TO 111]", "rows", "1147483647"),
+        "//*[@numFound='4']");
+
+    assertQ(req("id:[100 TO 110]"), "//*[@numFound='4']");
+    assertU(delI("102"));
+    assertU(commit());
+    assertQ(req("id:[100 TO 110]"), "//*[@numFound='3']");
+    assertU(delI("105"));
+    assertU(commit());
+    assertQ(req("id:[100 TO 110]"), "//*[@numFound='2']");
+    assertU(delQ("id:[100 TO 110]"));
+    assertU(commit());
+    assertQ(req("id:[100 TO 110]"), "//*[@numFound='0']");
+  }
+  
+  @AfterClass
+  public static void afterClass() {
+    System.clearProperty("CLOUD_UPDATE_DELAY");
+  }
+}

Modified: lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java?rev=1065327&r1=1065326&r2=1065327&view=diff
==============================================================================
--- lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java (original)
+++ lucene/dev/trunk/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java Sun Jan 30 17:29:55 2011
@@ -1,255 +1,255 @@
-package org.apache.solr.cloud;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.cloud.CloudState;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreContainer.Initializer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrConfig;
-import org.apache.solr.core.SolrCore;
-import org.apache.zookeeper.CreateMode;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * TODO: look at hostPort used below
- */
-public class CloudStateUpdateTest extends SolrTestCaseJ4 {
-  protected static Logger log = LoggerFactory
-      .getLogger(AbstractZkTestCase.class);
-
-  private static final boolean VERBOSE = false;
-
-  protected ZkTestServer zkServer;
-
-  protected String zkDir;
-
-  private CoreContainer container1;
-
-  private CoreContainer container2;
-
-  private CoreContainer container3;
-
-  private File dataDir1;
-
-  private File dataDir2;
-
-  private File dataDir3;
-  
-  private File dataDir4;
-
-  private Initializer init2;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    initCore();
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    
-    System.setProperty("zkClientTimeout", "3000");
-    
-    zkDir = dataDir.getAbsolutePath() + File.separator
-        + "zookeeper/server1/data";
-    zkServer = new ZkTestServer(zkDir);
-    zkServer.run();
-    System.setProperty("zkHost", zkServer.getZkAddress());
-    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer
-        .getZkAddress(), "solrconfig.xml", "schema.xml");
-    
-    log.info("####SETUP_START " + getName());
-    dataDir1 = new File(dataDir + File.separator + "data1");
-    dataDir1.mkdirs();
-    
-    dataDir2 = new File(dataDir + File.separator + "data2");
-    dataDir2.mkdirs();
-    
-    dataDir3 = new File(dataDir + File.separator + "data3");
-    dataDir3.mkdirs();
-    
-    dataDir4 = new File(dataDir + File.separator + "data3");
-    dataDir4.mkdirs();
-    
-    // set some system properties for use by tests
-    System.setProperty("solr.test.sys.prop1", "propone");
-    System.setProperty("solr.test.sys.prop2", "proptwo");
-    
-    System.setProperty("hostPort", "1661");
-    CoreContainer.Initializer init1 = new CoreContainer.Initializer() {
-      {
-        this.dataDir = CloudStateUpdateTest.this.dataDir1.getAbsolutePath();
-      }
-    };
-    
-    container1 = init1.initialize();
-    System.clearProperty("hostPort");
-    
-    System.setProperty("hostPort", "1662");
-    init2 = new CoreContainer.Initializer() {
-      {
-        this.dataDir = CloudStateUpdateTest.this.dataDir2.getAbsolutePath();
-      }
-    };
-    
-    container2 = init2.initialize();
-    System.clearProperty("hostPort");
-    
-    System.setProperty("hostPort", "1663");
-    CoreContainer.Initializer init3 = new CoreContainer.Initializer() {
-      {
-        this.dataDir = CloudStateUpdateTest.this.dataDir3.getAbsolutePath();
-      }
-    };
-    container3 = init3.initialize();
-    System.clearProperty("hostPort");
-    
-    log.info("####SETUP_END " + getName());
-    
-  }
-
-  @Test
-  public void testCoreRegistration() throws Exception {
-    System.setProperty("CLOUD_UPDATE_DELAY", "1");
-    
-    ZkNodeProps props2 = new ZkNodeProps();
-    props2.put("configName", "conf1");
-    
-    SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT);
-    zkClient.makePath("/collections/testcore", props2.store(), CreateMode.PERSISTENT);
-    zkClient.makePath("/collections/testcore/shards", CreateMode.PERSISTENT);
-    zkClient.close();
-    
-    CoreDescriptor dcore = new CoreDescriptor(container1, "testcore",
-        "testcore");
-    
-    dcore.setDataDir(dataDir4.getAbsolutePath());
-
-    SolrCore core = container1.create(dcore);
-    container1.register(core, false);
-    
-    ZkController zkController2 = container2.getZkController();
-
-    String host = zkController2.getHostName();
-    
-    // slight pause - TODO: takes an oddly long amount of time to schedule tasks
-    // with almost no delay ...
-    CloudState cloudState2 = null;
-    Map<String,Slice> slices = null;
-    for (int i = 75; i > 0; i--) {
-      cloudState2 = zkController2.getCloudState();
-      slices = cloudState2.getSlices("testcore");
-      
-      if (slices != null && slices.containsKey(host + ":1661_solr_testcore")) {
-        break;
-      }
-      Thread.sleep(500);
-    }
-
-    assertNotNull(slices);
-    assertTrue(slices.containsKey(host + ":1661_solr_testcore"));
-
-    Slice slice = slices.get(host + ":1661_solr_testcore");
-    assertEquals(host + ":1661_solr_testcore", slice.getName());
-
-    Map<String,ZkNodeProps> shards = slice.getShards();
-
-    assertEquals(1, shards.size());
-
-    ZkNodeProps zkProps = shards.get(host + ":1661_solr_testcore");
-
-    assertNotNull(zkProps);
-
-    assertEquals(host + ":1661_solr", zkProps.get("node_name"));
-
-    assertEquals("http://" + host + ":1661/solr/testcore", zkProps.get("url"));
-
-    Set<String> liveNodes = cloudState2.getLiveNodes();
-    assertNotNull(liveNodes);
-    assertEquals(3, liveNodes.size());
-
-    container3.shutdown();
-
-    // slight pause (15s timeout) for watch to trigger
-    for(int i = 0; i < (5 * 15); i++) {
-      if(zkController2.getCloudState().getLiveNodes().size() == 2) {
-        break;
-      }
-      Thread.sleep(200);
-    }
-
-    assertEquals(2, zkController2.getCloudState().getLiveNodes().size());
-
-    // quickly kill / start client
-
-    container2.getZkController().getZkClient().getSolrZooKeeper().getConnection()
-        .disconnect();
-    container2.shutdown();
-
-    container2 = init2.initialize();
-    
-    // pause for watch to trigger
-    for(int i = 0; i < 200; i++) {
-      if (container1.getZkController().getCloudState().liveNodesContain(
-          container2.getZkController().getNodeName())) {
-        break;
-      }
-      Thread.sleep(100);
-    }
-
-    assertTrue(container1.getZkController().getCloudState().liveNodesContain(
-        container2.getZkController().getNodeName()));
-    
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    if (VERBOSE) {
-      printLayout(zkServer.getZkHost());
-    }
-    container1.shutdown();
-    container2.shutdown();
-    container3.shutdown();
-    zkServer.shutdown();
-    super.tearDown();
-    System.clearProperty("zkClientTimeout");
-    System.clearProperty("zkHost");
-    System.clearProperty("hostPort");
-    System.clearProperty("CLOUD_UPDATE_DELAY");
-    SolrConfig.severeErrors.clear();
-  }
-
-  private void printLayout(String zkHost) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(
-        zkHost, AbstractZkTestCase.TIMEOUT);
-    zkClient.printLayoutToStdOut();
-    zkClient.close();
-  }
-}
+package org.apache.solr.cloud;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.cloud.CloudState;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.CoreContainer.Initializer;
+import org.apache.solr.core.CoreDescriptor;
+import org.apache.solr.core.SolrConfig;
+import org.apache.solr.core.SolrCore;
+import org.apache.zookeeper.CreateMode;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * TODO: look at hostPort used below
+ */
+public class CloudStateUpdateTest extends SolrTestCaseJ4 {
+  protected static Logger log = LoggerFactory
+      .getLogger(AbstractZkTestCase.class);
+
+  private static final boolean VERBOSE = false;
+
+  protected ZkTestServer zkServer;
+
+  protected String zkDir;
+
+  private CoreContainer container1;
+
+  private CoreContainer container2;
+
+  private CoreContainer container3;
+
+  private File dataDir1;
+
+  private File dataDir2;
+
+  private File dataDir3;
+  
+  private File dataDir4;
+
+  private Initializer init2;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore();
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    System.setProperty("zkClientTimeout", "3000");
+    
+    zkDir = dataDir.getAbsolutePath() + File.separator
+        + "zookeeper/server1/data";
+    zkServer = new ZkTestServer(zkDir);
+    zkServer.run();
+    System.setProperty("zkHost", zkServer.getZkAddress());
+    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer
+        .getZkAddress(), "solrconfig.xml", "schema.xml");
+    
+    log.info("####SETUP_START " + getName());
+    dataDir1 = new File(dataDir + File.separator + "data1");
+    dataDir1.mkdirs();
+    
+    dataDir2 = new File(dataDir + File.separator + "data2");
+    dataDir2.mkdirs();
+    
+    dataDir3 = new File(dataDir + File.separator + "data3");
+    dataDir3.mkdirs();
+    
+    dataDir4 = new File(dataDir + File.separator + "data3");
+    dataDir4.mkdirs();
+    
+    // set some system properties for use by tests
+    System.setProperty("solr.test.sys.prop1", "propone");
+    System.setProperty("solr.test.sys.prop2", "proptwo");
+    
+    System.setProperty("hostPort", "1661");
+    CoreContainer.Initializer init1 = new CoreContainer.Initializer() {
+      {
+        this.dataDir = CloudStateUpdateTest.this.dataDir1.getAbsolutePath();
+      }
+    };
+    
+    container1 = init1.initialize();
+    System.clearProperty("hostPort");
+    
+    System.setProperty("hostPort", "1662");
+    init2 = new CoreContainer.Initializer() {
+      {
+        this.dataDir = CloudStateUpdateTest.this.dataDir2.getAbsolutePath();
+      }
+    };
+    
+    container2 = init2.initialize();
+    System.clearProperty("hostPort");
+    
+    System.setProperty("hostPort", "1663");
+    CoreContainer.Initializer init3 = new CoreContainer.Initializer() {
+      {
+        this.dataDir = CloudStateUpdateTest.this.dataDir3.getAbsolutePath();
+      }
+    };
+    container3 = init3.initialize();
+    System.clearProperty("hostPort");
+    
+    log.info("####SETUP_END " + getName());
+    
+  }
+
+  @Test
+  public void testCoreRegistration() throws Exception {
+    System.setProperty("CLOUD_UPDATE_DELAY", "1");
+    
+    ZkNodeProps props2 = new ZkNodeProps();
+    props2.put("configName", "conf1");
+    
+    SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT);
+    zkClient.makePath("/collections/testcore", props2.store(), CreateMode.PERSISTENT);
+    zkClient.makePath("/collections/testcore/shards", CreateMode.PERSISTENT);
+    zkClient.close();
+    
+    CoreDescriptor dcore = new CoreDescriptor(container1, "testcore",
+        "testcore");
+    
+    dcore.setDataDir(dataDir4.getAbsolutePath());
+
+    SolrCore core = container1.create(dcore);
+    container1.register(core, false);
+    
+    ZkController zkController2 = container2.getZkController();
+
+    String host = zkController2.getHostName();
+    
+    // slight pause - TODO: takes an oddly long amount of time to schedule tasks
+    // with almost no delay ...
+    CloudState cloudState2 = null;
+    Map<String,Slice> slices = null;
+    for (int i = 75; i > 0; i--) {
+      cloudState2 = zkController2.getCloudState();
+      slices = cloudState2.getSlices("testcore");
+      
+      if (slices != null && slices.containsKey(host + ":1661_solr_testcore")) {
+        break;
+      }
+      Thread.sleep(500);
+    }
+
+    assertNotNull(slices);
+    assertTrue(slices.containsKey(host + ":1661_solr_testcore"));
+
+    Slice slice = slices.get(host + ":1661_solr_testcore");
+    assertEquals(host + ":1661_solr_testcore", slice.getName());
+
+    Map<String,ZkNodeProps> shards = slice.getShards();
+
+    assertEquals(1, shards.size());
+
+    ZkNodeProps zkProps = shards.get(host + ":1661_solr_testcore");
+
+    assertNotNull(zkProps);
+
+    assertEquals(host + ":1661_solr", zkProps.get("node_name"));
+
+    assertEquals("http://" + host + ":1661/solr/testcore", zkProps.get("url"));
+
+    Set<String> liveNodes = cloudState2.getLiveNodes();
+    assertNotNull(liveNodes);
+    assertEquals(3, liveNodes.size());
+
+    container3.shutdown();
+
+    // slight pause (15s timeout) for watch to trigger
+    for(int i = 0; i < (5 * 15); i++) {
+      if(zkController2.getCloudState().getLiveNodes().size() == 2) {
+        break;
+      }
+      Thread.sleep(200);
+    }
+
+    assertEquals(2, zkController2.getCloudState().getLiveNodes().size());
+
+    // quickly kill / start client
+
+    container2.getZkController().getZkClient().getSolrZooKeeper().getConnection()
+        .disconnect();
+    container2.shutdown();
+
+    container2 = init2.initialize();
+    
+    // pause for watch to trigger
+    for(int i = 0; i < 200; i++) {
+      if (container1.getZkController().getCloudState().liveNodesContain(
+          container2.getZkController().getNodeName())) {
+        break;
+      }
+      Thread.sleep(100);
+    }
+
+    assertTrue(container1.getZkController().getCloudState().liveNodesContain(
+        container2.getZkController().getNodeName()));
+    
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    if (VERBOSE) {
+      printLayout(zkServer.getZkHost());
+    }
+    container1.shutdown();
+    container2.shutdown();
+    container3.shutdown();
+    zkServer.shutdown();
+    super.tearDown();
+    System.clearProperty("zkClientTimeout");
+    System.clearProperty("zkHost");
+    System.clearProperty("hostPort");
+    System.clearProperty("CLOUD_UPDATE_DELAY");
+    SolrConfig.severeErrors.clear();
+  }
+
+  private void printLayout(String zkHost) throws Exception {
+    SolrZkClient zkClient = new SolrZkClient(
+        zkHost, AbstractZkTestCase.TIMEOUT);
+    zkClient.printLayoutToStdOut();
+    zkClient.close();
+  }
+}