You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/08/30 01:13:23 UTC

svn commit: r1163047 [6/15] - in /lucene/dev/branches/flexscoring: ./ dev-tools/idea/lucene/contrib/ lucene/ lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/ lucene/contrib/demo/src/java/org/apache/lucene/demo/xmlparser/ lucene/cont...

Modified: lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java Mon Aug 29 23:13:10 2011
@@ -35,9 +35,7 @@ import java.util.regex.Pattern;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.index.codecs.CodecProvider;
@@ -45,6 +43,7 @@ import org.apache.lucene.index.codecs.mo
 import org.apache.lucene.index.codecs.mockintblock.MockVariableIntBlockCodec;
 import org.apache.lucene.index.codecs.mocksep.MockSepCodec;
 import org.apache.lucene.index.codecs.mockrandom.MockRandomCodec;
+import org.apache.lucene.index.codecs.mockrandom.MockRandomDocValuesCodec;
 import org.apache.lucene.index.codecs.preflex.PreFlexCodec;
 import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec;
 import org.apache.lucene.index.codecs.pulsing.PulsingCodec;
@@ -164,7 +163,7 @@ public abstract class LuceneTestCase ext
    */
   public static final int RANDOM_MULTIPLIER = Integer.parseInt(System.getProperty("tests.multiplier", "1"));
 
-  private int savedBoolMaxClauseCount;
+  private int savedBoolMaxClauseCount = BooleanQuery.getMaxClauseCount();
 
   private volatile Thread.UncaughtExceptionHandler savedUncaughtExceptionHandler = null;
 
@@ -229,7 +228,7 @@ public abstract class LuceneTestCase ext
     if (prior != null) {
       cp.unregister(prior);
     }
-    cp.register(_TestUtil.randomizeCodec(random, c));
+    cp.register(c);
   }
 
   // returns current default codec
@@ -276,6 +275,8 @@ public abstract class LuceneTestCase ext
     // baseBlockSize cannot be over 127:
     swapCodec(new MockVariableIntBlockCodec(codecHasParam && "MockVariableIntBlock".equals(codec) ? codecParam : _TestUtil.nextInt(random, 1, 127)), cp);
     swapCodec(new MockRandomCodec(random), cp);
+    // give docvalues non-cfs testcoverage
+    swapCodec(new MockRandomDocValuesCodec(random), cp);
 
     return cp.lookup(codec);
   }
@@ -397,11 +398,19 @@ public abstract class LuceneTestCase ext
 
   @AfterClass
   public static void afterClassLuceneTestCaseJ4() {
-    if (!testsFailed) {
-      assertTrue("ensure your setUp() calls super.setUp() and your tearDown() calls super.tearDown()!!!", 
-          state == State.INITIAL || state == State.TEARDOWN);
+    State oldState = state; // capture test execution state
+    state = State.INITIAL; // set the state for subsequent tests
+    
+    Throwable problem = null;
+    try {
+      if (!testsFailed) {
+        assertTrue("ensure your setUp() calls super.setUp() and your tearDown() calls super.tearDown()!!!", 
+          oldState == State.INITIAL || oldState == State.TEARDOWN);
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
     }
-    state = State.INITIAL;
+    
     if (! "false".equals(TEST_CLEAN_THREADS)) {
       int rogueThreads = threadCleanup("test class");
       if (rogueThreads > 0) {
@@ -409,6 +418,73 @@ public abstract class LuceneTestCase ext
         System.err.println("RESOURCE LEAK: test class left " + rogueThreads + " thread(s) running");
       }
     }
+    
+    String codecDescription = uninstallCodecsAfterClass();
+    Locale.setDefault(savedLocale);
+    TimeZone.setDefault(savedTimeZone);
+    System.clearProperty("solr.solr.home");
+    System.clearProperty("solr.data.dir");
+    
+    try {
+      // now look for unclosed resources
+      if (!testsFailed) {
+        checkResourcesAfterClass();
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    stores = null;
+
+    try {
+      // clear out any temp directories if we can
+      if (!testsFailed) {
+        clearTempDirectoriesAfterClass();
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+
+    // if we had afterClass failures, get some debugging information
+    if (problem != null) {
+      reportPartialFailureInfo();      
+    }
+    
+    // if verbose or tests failed, report some information back
+    if (VERBOSE || testsFailed || problem != null) {
+      printDebuggingInformation(codecDescription);
+    }
+    
+    // reset seed
+    random.setSeed(0L);
+    random.initialized = false;
+    
+    if (problem != null) {
+      throw new RuntimeException(problem);
+    }
+  }
+  
+  /** print some useful debugging information about the environment */
+  private static void printDebuggingInformation(String codecDescription) {
+    System.err.println("NOTE: test params are: codec=" + codecDescription +
+        ", locale=" + locale +
+        ", timezone=" + (timeZone == null ? "(null)" : timeZone.getID()));
+    System.err.println("NOTE: all tests run in this JVM:");
+    System.err.println(Arrays.toString(testClassesRun.toArray()));
+    System.err.println("NOTE: " + System.getProperty("os.name") + " "
+        + System.getProperty("os.version") + " "
+        + System.getProperty("os.arch") + "/"
+        + System.getProperty("java.vendor") + " "
+        + System.getProperty("java.version") + " "
+        + (Constants.JRE_IS_64BIT ? "(64-bit)" : "(32-bit)") + "/"
+        + "cpus=" + Runtime.getRuntime().availableProcessors() + ","
+        + "threads=" + Thread.activeCount() + ","
+        + "free=" + Runtime.getRuntime().freeMemory() + ","
+        + "total=" + Runtime.getRuntime().totalMemory());
+  }
+  
+  /** uninstalls test codecs, returns description of the codec used for debugging */
+  private static String uninstallCodecsAfterClass() {
     String codecDescription;
     CodecProvider cp = CodecProvider.getDefault();
 
@@ -421,71 +497,50 @@ public abstract class LuceneTestCase ext
     if ("random".equals(TEST_CODECPROVIDER) && CodecProvider.getDefault() == savedCodecProvider)
       removeTestCodecs(codec, CodecProvider.getDefault());
     CodecProvider.setDefault(savedCodecProvider);
-    Locale.setDefault(savedLocale);
-    TimeZone.setDefault(savedTimeZone);
-    System.clearProperty("solr.solr.home");
-    System.clearProperty("solr.data.dir");
-    // now look for unclosed resources
-    if (!testsFailed)
-      for (MockDirectoryWrapper d : stores.keySet()) {
-        if (d.isOpen()) {
-          StackTraceElement elements[] = stores.get(d);
-          // Look for the first class that is not LuceneTestCase that requested
-          // a Directory. The first two items are of Thread's, so skipping over
-          // them.
-          StackTraceElement element = null;
-          for (int i = 2; i < elements.length; i++) {
-            StackTraceElement ste = elements[i];
-            if (ste.getClassName().indexOf("LuceneTestCase") == -1) {
-              element = ste;
-              break;
-            }
+
+    return codecDescription;
+  }
+  
+  /** check that directories and their resources were closed */
+  private static void checkResourcesAfterClass() {
+    for (MockDirectoryWrapper d : stores.keySet()) {
+      if (d.isOpen()) {
+        StackTraceElement elements[] = stores.get(d);
+        // Look for the first class that is not LuceneTestCase that requested
+        // a Directory. The first two items are of Thread's, so skipping over
+        // them.
+        StackTraceElement element = null;
+        for (int i = 2; i < elements.length; i++) {
+          StackTraceElement ste = elements[i];
+          if (ste.getClassName().indexOf("LuceneTestCase") == -1) {
+            element = ste;
+            break;
           }
-          fail("directory of test was not closed, opened from: " + element);
         }
+        fail("directory of test was not closed, opened from: " + element);
       }
-    stores = null;
-    // if verbose or tests failed, report some information back
-    if (VERBOSE || testsFailed)
-      System.err.println("NOTE: test params are: codec=" + codecDescription +
-        ", locale=" + locale +
-        ", timezone=" + (timeZone == null ? "(null)" : timeZone.getID()));
-    if (VERBOSE || testsFailed) {
-      System.err.println("NOTE: all tests run in this JVM:");
-      System.err.println(Arrays.toString(testClassesRun.toArray()));
-      System.err.println("NOTE: " + System.getProperty("os.name") + " "
-          + System.getProperty("os.version") + " "
-          + System.getProperty("os.arch") + "/"
-          + System.getProperty("java.vendor") + " "
-          + System.getProperty("java.version") + " "
-          + (Constants.JRE_IS_64BIT ? "(64-bit)" : "(32-bit)") + "/"
-          + "cpus=" + Runtime.getRuntime().availableProcessors() + ","
-          + "threads=" + Thread.activeCount() + ","
-          + "free=" + Runtime.getRuntime().freeMemory() + ","
-          + "total=" + Runtime.getRuntime().totalMemory());
     }
-    // clear out any temp directories if we can
-    if (!testsFailed) {
-      for (Entry<File, StackTraceElement[]> entry : tempDirs.entrySet()) {
-        try {
-          _TestUtil.rmDir(entry.getKey());
-        } catch (IOException e) {
-          e.printStackTrace();
-          System.err.println("path " + entry.getKey() + " allocated from");
-          // first two STE's are Java's
-          StackTraceElement[] elements = entry.getValue();
-          for (int i = 2; i < elements.length; i++) {
-            StackTraceElement ste = elements[i];            
-            // print only our code's stack information
-            if (ste.getClassName().indexOf("org.apache.lucene") == -1) break; 
-            System.err.println("\t" + ste);
-          }
-          fail("could not remove temp dir: " + entry.getKey());
+  }
+  
+  /** clear temp directories: this will fail if its not successful */
+  private static void clearTempDirectoriesAfterClass() {
+    for (Entry<File, StackTraceElement[]> entry : tempDirs.entrySet()) {
+      try {
+        _TestUtil.rmDir(entry.getKey());
+      } catch (IOException e) {
+        e.printStackTrace();
+        System.err.println("path " + entry.getKey() + " allocated from");
+        // first two STE's are Java's
+        StackTraceElement[] elements = entry.getValue();
+        for (int i = 2; i < elements.length; i++) {
+          StackTraceElement ste = elements[i];            
+          // print only our code's stack information
+          if (ste.getClassName().indexOf("org.apache.lucene") == -1) break; 
+          System.err.println("\t" + ste);
         }
+        fail("could not remove temp dir: " + entry.getKey());
       }
     }
-    random.setSeed(0L);
-    random.initialized = false;
   }
 
   protected static boolean testsFailed; /* true if any tests failed */
@@ -522,10 +577,11 @@ public abstract class LuceneTestCase ext
     public void starting(FrameworkMethod method) {
       // set current method name for logging
       LuceneTestCase.this.name = method.getName();
+      State s = state; // capture test execution state
+      state = State.RANTEST; // set the state for subsequent tests
       if (!testsFailed) {
-        assertTrue("ensure your setUp() calls super.setUp()!!!", state == State.SETUP);
+        assertTrue("ensure your setUp() calls super.setUp()!!!", s == State.SETUP);
       }
-      state = State.RANTEST;
       super.starting(method);
     }
   };
@@ -534,10 +590,9 @@ public abstract class LuceneTestCase ext
   public void setUp() throws Exception {
     seed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l2;
     random.setSeed(seed);
-    if (!testsFailed) {
-      assertTrue("ensure your tearDown() calls super.tearDown()!!!", (state == State.INITIAL || state == State.TEARDOWN));
-    }
-    state = State.SETUP;
+    State s = state; // capture test execution state
+    state = State.SETUP; // set the state for subsequent tests
+   
     savedUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler();
     Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
       public void uncaughtException(Thread t, Throwable e) {
@@ -545,12 +600,15 @@ public abstract class LuceneTestCase ext
         uncaughtExceptions.add(new UncaughtExceptionEntry(t, e));
         if (savedUncaughtExceptionHandler != null)
           savedUncaughtExceptionHandler.uncaughtException(t, e);
-      }
+        }
     });
 
     savedBoolMaxClauseCount = BooleanQuery.getMaxClauseCount();
-  }
 
+    if (!testsFailed) {
+      assertTrue("ensure your tearDown() calls super.tearDown()!!!", (s == State.INITIAL || s == State.TEARDOWN));
+    }
+  }
 
   /**
    * Forcible purges all cache entries from the FieldCache.
@@ -573,38 +631,39 @@ public abstract class LuceneTestCase ext
 
   @After
   public void tearDown() throws Exception {
-    if (!testsFailed) {
-      // Note: we allow a test to go straight from SETUP -> TEARDOWN (without ever entering the RANTEST state)
-      // because if you assume() inside setUp(), it skips the test and the TestWatchman has no way to know...
-      assertTrue("ensure your setUp() calls super.setUp()!!!", state == State.RANTEST || state == State.SETUP);
-    }
-    state = State.TEARDOWN;
-    BooleanQuery.setMaxClauseCount(savedBoolMaxClauseCount);
-    if ("perMethod".equals(TEST_CLEAN_THREADS)) {
-      int rogueThreads = threadCleanup("test method: '" + getName() + "'");
-      if (rogueThreads > 0) {
-        System.err.println("RESOURCE LEAK: test method: '" + getName()
-            + "' left " + rogueThreads + " thread(s) running");
-        // TODO: fail, but print seed for now.
-        if (!testsFailed && uncaughtExceptions.isEmpty()) {
-          reportAdditionalFailureInfo();
-        }
+    State oldState = state; // capture test execution state
+    state = State.TEARDOWN; // set the state for subsequent tests
+    
+    // NOTE: with junit 4.7, we don't get a reproduceWith because our Watchman
+    // does not know if something fails in tearDown. so we ensure this happens ourselves for now.
+    // we can remove this if we upgrade to 4.8
+    Throwable problem = null;
+    
+    try {
+      if (!testsFailed) {
+        // Note: we allow a test to go straight from SETUP -> TEARDOWN (without ever entering the RANTEST state)
+        // because if you assume() inside setUp(), it skips the test and the TestWatchman has no way to know...
+        assertTrue("ensure your setUp() calls super.setUp()!!!", oldState == State.RANTEST || oldState == State.SETUP);
       }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
     }
-    Thread.setDefaultUncaughtExceptionHandler(savedUncaughtExceptionHandler);
-    try {
 
-      if (!uncaughtExceptions.isEmpty()) {
-        testsFailed = true;
-        reportAdditionalFailureInfo();
-        System.err.println("The following exceptions were thrown by threads:");
-        for (UncaughtExceptionEntry entry : uncaughtExceptions) {
-          System.err.println("*** Thread: " + entry.thread.getName() + " ***");
-          entry.exception.printStackTrace(System.err);
-        }
-        fail("Some threads threw uncaught exceptions!");
-      }
+    BooleanQuery.setMaxClauseCount(savedBoolMaxClauseCount);
 
+    // this won't throw any exceptions or fail the test
+    // if we change this, then change this logic
+    checkRogueThreadsAfter();
+    // restore the default uncaught exception handler
+    Thread.setDefaultUncaughtExceptionHandler(savedUncaughtExceptionHandler);
+    
+    try {
+      checkUncaughtExceptionsAfter();
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    try {
       // calling assertSaneFieldCaches here isn't as useful as having test
       // classes call it directly from the scope where the index readers
       // are used, because they could be gc'ed just before this tearDown
@@ -618,9 +677,44 @@ public abstract class LuceneTestCase ext
       // your Test class so that the inconsistant FieldCache usages are
       // isolated in distinct test methods
       assertSaneFieldCaches(getTestLabel());
-
-    } finally {
-      purgeFieldCache(FieldCache.DEFAULT);
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    purgeFieldCache(FieldCache.DEFAULT);
+    
+    if (problem != null) {
+      testsFailed = true;
+      reportAdditionalFailureInfo();
+      throw new RuntimeException(problem);
+    }
+  }
+  
+  /** check if the test still has threads running, we don't want them to 
+   *  fail in a subsequent test and pass the blame to the wrong test */
+  private void checkRogueThreadsAfter() {
+    if ("perMethod".equals(TEST_CLEAN_THREADS)) {
+      int rogueThreads = threadCleanup("test method: '" + getName() + "'");
+      if (!testsFailed && rogueThreads > 0) {
+        System.err.println("RESOURCE LEAK: test method: '" + getName()
+            + "' left " + rogueThreads + " thread(s) running");
+        // TODO: fail, but print seed for now
+        if (uncaughtExceptions.isEmpty()) {
+          reportAdditionalFailureInfo();
+        }
+      }
+    }
+  }
+  
+  /** see if any other threads threw uncaught exceptions, and fail the test if so */
+  private void checkUncaughtExceptionsAfter() {
+    if (!uncaughtExceptions.isEmpty()) {
+      System.err.println("The following exceptions were thrown by threads:");
+      for (UncaughtExceptionEntry entry : uncaughtExceptions) {
+        System.err.println("*** Thread: " + entry.thread.getName() + " ***");
+        entry.exception.printStackTrace(System.err);
+      }
+      fail("Some threads threw uncaught exceptions!");
     }
   }
 
@@ -718,10 +812,6 @@ public abstract class LuceneTestCase ext
         throw e;
       }
 
-      if (insanity.length != 0) {
-        reportAdditionalFailureInfo();
-      }
-
       assertEquals(msg + ": Insane FieldCache usage(s) found",
                    0, insanity.length);
       insanity = null;
@@ -1042,85 +1132,43 @@ public abstract class LuceneTestCase ext
     return dir;
   }
   
-  /** Returns a new field instance. 
-   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
-  public static Field newField(String name, String value, Index index) {
-    return newField(random, name, value, index);
-  }
-  
-  /** Returns a new field instance. 
-   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
-  public static Field newField(String name, String value, Store store, Index index) {
-    return newField(random, name, value, store, index);
-  }
-  
-  /**
-   * Returns a new Field instance. Use this when the test does not
-   * care about some specific field settings (most tests)
-   * <ul>
-   *  <li>If the store value is set to Store.NO, sometimes the field will be randomly stored.
-   *  <li>More term vector data than you ask for might be indexed, for example if you choose YES
-   *      it might index term vectors with offsets too.
-   * </ul>
-   */
-  public static Field newField(String name, String value, Store store, Index index, TermVector tv) {
-    return newField(random, name, value, store, index, tv);
-  }
-  
-  /** Returns a new field instance, using the specified random. 
-   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
-  public static Field newField(Random random, String name, String value, Index index) {
-    return newField(random, name, value, Store.NO, index);
-  }
-  
-  /** Returns a new field instance, using the specified random. 
-   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
-  public static Field newField(Random random, String name, String value, Store store, Index index) {
-    return newField(random, name, value, store, index, TermVector.NO);
+  public static Field newField(String name, String value, FieldType type) {
+    return newField(random, name, value, type);
   }
   
-  /** Returns a new field instance, using the specified random. 
-   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
-  public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) {
-    
-    if (usually(random)) {
+  public static Field newField(Random random, String name, String value, FieldType type) {
+    if (usually(random) || !type.indexed()) {
       // most of the time, don't modify the params
-      return new Field(name, value, store, index, tv);
+      return new Field(name, type, value);
     }
 
-    if (random.nextBoolean()) {
-      // tickle any code still relying on field names being interned:
-      name = new String(name);
+    FieldType newType = new FieldType(type);
+    if (!newType.stored() && random.nextBoolean()) {
+      newType.setStored(true); // randomly store it
     }
 
-    if (!index.isIndexed())
-      return new Field(name, value, store, index, tv);
-
-    if (!store.isStored() && random.nextBoolean())
-      store = Store.YES; // randomly store it
-
-    tv = randomTVSetting(random, tv);
-
-    return new Field(name, value, store, index, tv);
-  }
-
-  static final TermVector tvSettings[] = {
-    TermVector.NO, TermVector.YES, TermVector.WITH_OFFSETS,
-    TermVector.WITH_POSITIONS, TermVector.WITH_POSITIONS_OFFSETS
-  };
+    if (!newType.storeTermVectors() && random.nextBoolean()) {
+      newType.setStoreTermVectors(true);
+      if (!newType.storeTermVectorOffsets()) {
+        newType.setStoreTermVectorOffsets(random.nextBoolean());
+      }
+      if (!newType.storeTermVectorPositions()) {
+        newType.setStoreTermVectorPositions(random.nextBoolean());
+      }
+    }
 
-  private static TermVector randomTVSetting(Random random, TermVector minimum) {
-    switch(minimum) {
-      case NO: return tvSettings[_TestUtil.nextInt(random, 0, tvSettings.length-1)];
-      case YES: return tvSettings[_TestUtil.nextInt(random, 1, tvSettings.length-1)];
-      case WITH_OFFSETS: return random.nextBoolean() ? TermVector.WITH_OFFSETS
-          : TermVector.WITH_POSITIONS_OFFSETS;
-      case WITH_POSITIONS: return random.nextBoolean() ? TermVector.WITH_POSITIONS
-          : TermVector.WITH_POSITIONS_OFFSETS;
-      default: return TermVector.WITH_POSITIONS_OFFSETS;
+    // TODO: we need to do this, but smarter, ie, most of
+    // the time we set the same value for a given field but
+    // sometimes (rarely) we change it up:
+    /*
+    if (newType.omitNorms()) {
+      newType.setOmitNorms(random.nextBoolean());
     }
+    */
+    
+    return new Field(name, newType, value);
   }
-
+  
   /** return a random Locale from the available locales on the system */
   public static Locale randomLocale(Random random) {
     Locale locales[] = Locale.getAvailableLocales();
@@ -1277,6 +1325,13 @@ public abstract class LuceneTestCase ext
   }
 
   // We get here from InterceptTestCaseEvents on the 'failed' event....
+  public static void reportPartialFailureInfo() {
+    System.err.println("NOTE: reproduce with (hopefully): ant test -Dtestcase=" + testClassesRun.get(testClassesRun.size()-1)
+        + " -Dtests.seed=" + new ThreeLongs(staticSeed, 0L, LuceneTestCaseRunner.runnerSeed)
+        + reproduceWithExtraParams());
+  }
+  
+  // We get here from InterceptTestCaseEvents on the 'failed' event....
   public void reportAdditionalFailureInfo() {
     System.err.println("NOTE: reproduce with: ant test -Dtestcase=" + getClass().getSimpleName()
         + " -Dtestmethod=" + getName() + " -Dtests.seed=" + new ThreeLongs(staticSeed, seed, LuceneTestCaseRunner.runnerSeed)
@@ -1284,7 +1339,7 @@ public abstract class LuceneTestCase ext
   }
 
   // extra params that were overridden needed to reproduce the command
-  private String reproduceWithExtraParams() {
+  private static String reproduceWithExtraParams() {
     StringBuilder sb = new StringBuilder();
     if (!TEST_CODEC.equals("randomPerField")) sb.append(" -Dtests.codec=").append(TEST_CODEC);
     if (!TEST_LOCALE.equals("random")) sb.append(" -Dtests.locale=").append(TEST_LOCALE);

Modified: lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java Mon Aug 29 23:13:10 2011
@@ -28,7 +28,6 @@ import java.io.PrintStream;
 import java.lang.reflect.Method;
 import java.util.Enumeration;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.zip.ZipEntry;
@@ -36,11 +35,11 @@ import java.util.zip.ZipFile;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.index.CheckIndex;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LogMergePolicy;
 import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MergeScheduler;
@@ -424,10 +423,9 @@ public class _TestUtil {
   
   /** Adds field info for a Document. */
   public static void add(Document doc, FieldInfos fieldInfos) {
-    List<Fieldable> fields = doc.getFields();
-    for (Fieldable field : fields) {
-      fieldInfos.addOrUpdate(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(),
-              field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getIndexOptions(), field.docValuesType());
+    for (IndexableField field : doc) {
+      fieldInfos.addOrUpdate(field.name(), field.indexed(), field.storeTermVectors(), field.storeTermVectorPositions(),
+              field.storeTermVectorOffsets(), field.omitNorms(), false, field.indexOptions(), field.docValuesType());
     }
   }
   
@@ -504,23 +502,16 @@ public class _TestUtil {
   // TODO: is there a pre-existing way to do this!!!
   public static Document cloneDocument(Document doc1) {
     final Document doc2 = new Document();
-    for(Fieldable f : doc1.getFields()) {
+    for(IndexableField f : doc1) {
       Field field1 = (Field) f;
       
       Field field2 = new Field(field1.name(),
-                               field1.stringValue(),
-                               field1.isStored() ? Field.Store.YES : Field.Store.NO,
-                               field1.isIndexed() ? (field1.isTokenized() ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED) : Field.Index.NO);
-      field2.setOmitNorms(field1.getOmitNorms());
-      field2.setIndexOptions(field1.getIndexOptions());
+                               field1.getFieldType(),
+                               field1.stringValue()
+                               );
       doc2.add(field2);
     }
 
     return doc2;
   }
-  
-  public static Codec randomizeCodec(Random random, Codec codec) {
-    codec.setDocValuesUseCFS(random.nextBoolean());
-    return codec;
-  }
 }

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestDemo.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestDemo.java Mon Aug 29 23:13:10 2011
@@ -22,7 +22,7 @@ import java.io.IOException;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.*;
@@ -49,8 +49,7 @@ public class TestDemo extends LuceneTest
     Document doc = new Document();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newField("fieldname", text, Field.Store.YES,
-        Field.Index.ANALYZED));
+    doc.add(newField("fieldname", text, TextField.TYPE_STORED));
     iwriter.addDocument(doc);
     iwriter.close();
     

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestExternalCodecs.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestExternalCodecs.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestExternalCodecs.java Mon Aug 29 23:13:10 2011
@@ -529,13 +529,13 @@ public class TestExternalCodecs extends 
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     // uses default codec:
-    doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED));
     // uses pulsing codec:
-    Field field2 = newField("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED);
+    Field field2 = newField("field2", "this field uses the pulsing codec as the test", TextField.TYPE_UNSTORED);
     provider.setFieldCodec(field2.name(), "Pulsing");
     doc.add(field2);
     
-    Field idField = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    Field idField = newField("id", "", StringField.TYPE_UNSTORED);
     provider.setFieldCodec(idField.name(), "Pulsing");
 
     doc.add(idField);

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Mon Aug 29 23:13:10 2011
@@ -33,6 +33,7 @@ import org.apache.lucene.index.MergePoli
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
 
 /**
  * Holds tests cases to verify external APIs are accessible
@@ -90,7 +91,7 @@ public class TestMergeSchedulerExternal 
     dir.failOn(new FailOnlyOnMerge());
 
     Document doc = new Document();
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    Field idField = newField("id", "", StringField.TYPE_STORED);
     doc.add(idField);
     
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearch.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearch.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearch.java Mon Aug 29 23:13:10 2011
@@ -93,8 +93,8 @@ public class TestSearch extends LuceneTe
       };
       for (int j = 0; j < docs.length; j++) {
         Document d = new Document();
-        d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
-        d.add(newField("id", ""+j, Field.Index.NOT_ANALYZED_NO_NORMS));
+        d.add(newField("contents", docs[j], TextField.TYPE_STORED));
+        d.add(newField("id", ""+j, StringField.TYPE_UNSTORED));
         writer.addDocument(d);
       }
       writer.close();

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Mon Aug 29 23:13:10 2011
@@ -92,8 +92,8 @@ public class TestSearchForDuplicates ext
 
       for (int j = 0; j < MAX_DOCS; j++) {
         Document d = new Document();
-        d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
-        d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
+        d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, TextField.TYPE_STORED));
+        d.add(newField(ID_FIELD, Integer.toString(j), TextField.TYPE_STORED));
         writer.addDocument(d);
       }
       writer.close();

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Mon Aug 29 23:13:10 2011
@@ -23,8 +23,7 @@ import java.io.IOException;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.DocsAndPositionsEnum;
@@ -60,7 +59,7 @@ public class TestCachingTokenFilter exte
     
     stream = new CachingTokenFilter(stream);
     
-    doc.add(new Field("preanalyzed", stream, TermVector.NO));
+    doc.add(new TextField("preanalyzed", stream));
     
     // 1) we consume all tokens twice before we add the doc to the index
     checkTokens(stream);

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java Mon Aug 29 23:13:10 2011
@@ -1,10 +1,11 @@
 package org.apache.lucene.document;
 
-import org.apache.lucene.util.LuceneTestCase;
-
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -34,8 +35,10 @@ public class TestBinaryDocument extends 
   public void testBinaryFieldInIndex()
     throws Exception
   {
-    Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes());
-    Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
+    FieldType ft = new FieldType();
+    ft.setStored(true);
+    IndexableField binaryFldStored = new BinaryField("binaryStored", binaryValStored.getBytes());
+    IndexableField stringFldStored = new Field("stringStored", ft, binaryValStored);
 
     Document doc = new Document();
     
@@ -44,7 +47,7 @@ public class TestBinaryDocument extends 
     doc.add(stringFldStored);
 
     /** test for field count */
-    assertEquals(2, doc.fields.size());
+    assertEquals(2, doc.getFields().size());
     
     /** add the doc to a ram index */
     Directory dir = newDirectory();
@@ -57,7 +60,9 @@ public class TestBinaryDocument extends 
     assertTrue(docFromReader != null);
     
     /** fetch the binary stored field and compare it's content with the original one */
-    String binaryFldStoredTest = new String(docFromReader.getBinaryValue("binaryStored"));
+    BytesRef bytes = docFromReader.getBinaryValue("binaryStored");
+    assertNotNull(bytes);
+    String binaryFldStoredTest = new String(bytes.bytes, bytes.offset, bytes.length);
     assertTrue(binaryFldStoredTest.equals(binaryValStored));
     
     /** fetch the string field and compare it's content with the original one */
@@ -77,8 +82,8 @@ public class TestBinaryDocument extends 
   }
   
   public void testCompressionTools() throws Exception {
-    Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
-    Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed));
+    IndexableField binaryFldCompressed = new BinaryField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
+    IndexableField stringFldCompressed = new BinaryField("stringCompressed", CompressionTools.compressString(binaryValCompressed));
     
     Document doc = new Document();
     

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDateTools.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDateTools.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDateTools.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDateTools.java Mon Aug 29 23:13:10 2011
@@ -196,4 +196,4 @@ public class TestDateTools extends Lucen
     }
   }
 
-}
\ No newline at end of file
+}

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDocument.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDocument.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDocument.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/document/TestDocument.java Mon Aug 29 23:13:10 2011
@@ -1,6 +1,7 @@
 package org.apache.lucene.document;
 
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
@@ -8,6 +9,7 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
 /**
@@ -37,22 +39,24 @@ public class TestDocument extends Lucene
   
   public void testBinaryField() throws Exception {
     Document doc = new Document();
-    Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES,
-        Field.Index.NO);
-    Fieldable binaryFld = new Field("binary", binaryVal.getBytes());
-    Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes());
+    
+    FieldType ft = new FieldType();
+    ft.setStored(true);
+    IndexableField stringFld = new Field("string", ft, binaryVal);
+    IndexableField binaryFld = new BinaryField("binary", binaryVal.getBytes());
+    IndexableField binaryFld2 = new BinaryField("binary", binaryVal2.getBytes());
     
     doc.add(stringFld);
     doc.add(binaryFld);
     
-    assertEquals(2, doc.fields.size());
+    assertEquals(2, doc.getFields().size());
     
-    assertTrue(binaryFld.isBinary());
-    assertTrue(binaryFld.isStored());
-    assertFalse(binaryFld.isIndexed());
-    assertFalse(binaryFld.isTokenized());
+    assertTrue(binaryFld.binaryValue() != null);
+    assertTrue(binaryFld.stored());
+    assertFalse(binaryFld.indexed());
+    assertFalse(binaryFld.tokenized());
     
-    String binaryTest = new String(doc.getBinaryValue("binary"));
+    String binaryTest = doc.getBinaryValue("binary").utf8ToString();
     assertTrue(binaryTest.equals(binaryVal));
     
     String stringTest = doc.get("string");
@@ -60,14 +64,14 @@ public class TestDocument extends Lucene
     
     doc.add(binaryFld2);
     
-    assertEquals(3, doc.fields.size());
+    assertEquals(3, doc.getFields().size());
     
-    byte[][] binaryTests = doc.getBinaryValues("binary");
+    BytesRef[] binaryTests = doc.getBinaryValues("binary");
     
     assertEquals(2, binaryTests.length);
     
-    binaryTest = new String(binaryTests[0]);
-    String binaryTest2 = new String(binaryTests[1]);
+    binaryTest = binaryTests[0].utf8ToString();
+    String binaryTest2 = binaryTests[1].utf8ToString();
     
     assertFalse(binaryTest.equals(binaryTest2));
     
@@ -75,10 +79,10 @@ public class TestDocument extends Lucene
     assertTrue(binaryTest2.equals(binaryVal2));
     
     doc.removeField("string");
-    assertEquals(2, doc.fields.size());
+    assertEquals(2, doc.getFields().size());
     
     doc.removeFields("binary");
-    assertEquals(0, doc.fields.size());
+    assertEquals(0, doc.getFields().size());
   }
   
   /**
@@ -89,45 +93,48 @@ public class TestDocument extends Lucene
    */
   public void testRemoveForNewDocument() throws Exception {
     Document doc = makeDocumentWithFields();
-    assertEquals(8, doc.fields.size());
+    assertEquals(8, doc.getFields().size());
     doc.removeFields("keyword");
-    assertEquals(6, doc.fields.size());
+    assertEquals(6, doc.getFields().size());
     doc.removeFields("doesnotexists"); // removing non-existing fields is
                                        // siltenlty ignored
     doc.removeFields("keyword"); // removing a field more than once
-    assertEquals(6, doc.fields.size());
+    assertEquals(6, doc.getFields().size());
     doc.removeField("text");
-    assertEquals(5, doc.fields.size());
+    assertEquals(5, doc.getFields().size());
     doc.removeField("text");
-    assertEquals(4, doc.fields.size());
+    assertEquals(4, doc.getFields().size());
     doc.removeField("text");
-    assertEquals(4, doc.fields.size());
+    assertEquals(4, doc.getFields().size());
     doc.removeField("doesnotexists"); // removing non-existing fields is
                                       // siltenlty ignored
-    assertEquals(4, doc.fields.size());
+    assertEquals(4, doc.getFields().size());
     doc.removeFields("unindexed");
-    assertEquals(2, doc.fields.size());
+    assertEquals(2, doc.getFields().size());
     doc.removeFields("unstored");
-    assertEquals(0, doc.fields.size());
+    assertEquals(0, doc.getFields().size());
     doc.removeFields("doesnotexists"); // removing non-existing fields is
                                        // siltenlty ignored
-    assertEquals(0, doc.fields.size());
+    assertEquals(0, doc.getFields().size());
   }
   
   public void testConstructorExceptions() {
-    new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
-    new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
+    FieldType ft = new FieldType();
+    ft.setStored(true);
+    new Field("name", ft, "value"); // okay
+    new StringField("name", "value"); // okay
     try {
-      new Field("name", "value", Field.Store.NO, Field.Index.NO);
+      new Field("name", new FieldType(), "value");
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception
     }
-    new Field("name", "value", Field.Store.YES, Field.Index.NO,
-        Field.TermVector.NO); // okay
+    new Field("name", ft, "value"); // okay
     try {
-      new Field("name", "value", Field.Store.YES, Field.Index.NO,
-          Field.TermVector.YES);
+      FieldType ft2 = new FieldType();
+      ft2.setStored(true);
+      ft2.setStoreTermVectors(true);
+      new Field("name", ft2, "value");
       fail();
     } catch (IllegalArgumentException e) {
       // expected exception
@@ -174,28 +181,26 @@ public class TestDocument extends Lucene
   
   private Document makeDocumentWithFields() {
     Document doc = new Document();
-    doc.add(new Field("keyword", "test1", Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
-    doc.add(new Field("keyword", "test2", Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
-    doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
-    doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
+    FieldType stored = new FieldType();
+    stored.setStored(true);
+    doc.add(new Field("keyword", StringField.TYPE_STORED, "test1"));
+    doc.add(new Field("keyword", StringField.TYPE_STORED, "test2"));
+    doc.add(new Field("text", TextField.TYPE_STORED, "test1"));
+    doc.add(new Field("text", TextField.TYPE_STORED, "test2"));
+    doc.add(new Field("unindexed", stored, "test1"));
+    doc.add(new Field("unindexed", stored, "test2"));
     doc
-        .add(new Field("unstored", "test1", Field.Store.NO,
-            Field.Index.ANALYZED));
+        .add(new TextField("unstored", "test1"));
     doc
-        .add(new Field("unstored", "test2", Field.Store.NO,
-            Field.Index.ANALYZED));
+        .add(new TextField("unstored", "test2"));
     return doc;
   }
   
   private void doAssert(Document doc, boolean fromIndex) {
-    String[] keywordFieldValues = doc.getValues("keyword");
-    String[] textFieldValues = doc.getValues("text");
-    String[] unindexedFieldValues = doc.getValues("unindexed");
-    String[] unstoredFieldValues = doc.getValues("unstored");
+    IndexableField[] keywordFieldValues = doc.getFields("keyword");
+    IndexableField[] textFieldValues = doc.getFields("text");
+    IndexableField[] unindexedFieldValues = doc.getFields("unindexed");
+    IndexableField[] unstoredFieldValues = doc.getFields("unstored");
     
     assertTrue(keywordFieldValues.length == 2);
     assertTrue(textFieldValues.length == 2);
@@ -206,28 +211,26 @@ public class TestDocument extends Lucene
       assertTrue(unstoredFieldValues.length == 2);
     }
     
-    assertTrue(keywordFieldValues[0].equals("test1"));
-    assertTrue(keywordFieldValues[1].equals("test2"));
-    assertTrue(textFieldValues[0].equals("test1"));
-    assertTrue(textFieldValues[1].equals("test2"));
-    assertTrue(unindexedFieldValues[0].equals("test1"));
-    assertTrue(unindexedFieldValues[1].equals("test2"));
+    assertTrue(keywordFieldValues[0].stringValue().equals("test1"));
+    assertTrue(keywordFieldValues[1].stringValue().equals("test2"));
+    assertTrue(textFieldValues[0].stringValue().equals("test1"));
+    assertTrue(textFieldValues[1].stringValue().equals("test2"));
+    assertTrue(unindexedFieldValues[0].stringValue().equals("test1"));
+    assertTrue(unindexedFieldValues[1].stringValue().equals("test2"));
     // this test cannot work for documents retrieved from the index
     // since unstored fields will obviously not be returned
     if (!fromIndex) {
-      assertTrue(unstoredFieldValues[0].equals("test1"));
-      assertTrue(unstoredFieldValues[1].equals("test2"));
+      assertTrue(unstoredFieldValues[0].stringValue().equals("test1"));
+      assertTrue(unstoredFieldValues[1].stringValue().equals("test2"));
     }
   }
   
   public void testFieldSetValue() throws Exception {
     
-    Field field = new Field("id", "id1", Field.Store.YES,
-        Field.Index.NOT_ANALYZED);
+    Field field = new Field("id", StringField.TYPE_STORED, "id1");
     Document doc = new Document();
     doc.add(field);
-    doc.add(new Field("keyword", "test", Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
+    doc.add(new Field("keyword", StringField.TYPE_STORED, "test"));
     
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random, dir);
@@ -248,7 +251,7 @@ public class TestDocument extends Lucene
     int result = 0;
     for (int i = 0; i < 3; i++) {
       Document doc2 = searcher.doc(hits[i].doc);
-      Field f = doc2.getField("id");
+      Field f = (Field) doc2.getField("id");
       if (f.stringValue().equals("id1")) result |= 1;
       else if (f.stringValue().equals("id2")) result |= 2;
       else if (f.stringValue().equals("id3")) result |= 4;
@@ -262,9 +265,8 @@ public class TestDocument extends Lucene
   }
   
   public void testFieldSetValueChangeBinary() {
-    Field field1 = new Field("field1", new byte[0]);
-    Field field2 = new Field("field2", "", Field.Store.YES,
-        Field.Index.ANALYZED);
+    Field field1 = new BinaryField("field1", new byte[0]);
+    Field field2 = new Field("field2", TextField.TYPE_STORED, "");
     try {
       field1.setValue("abc");
       fail("did not hit expected exception");

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BPostings.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BPostings.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BPostings.java Mon Aug 29 23:13:10 2011
@@ -24,12 +24,13 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
-import org.junit.Ignore;
 
 /**
  * Test indexes ~82M docs with 26 terms each, so you get > Integer.MAX_VALUE terms/docs pairs
@@ -62,9 +63,10 @@ public class Test2BPostings extends Luce
     }
 
     Document doc = new Document();
-    Field field = new Field("field", new MyTokenStream());
-    field.setIndexOptions(IndexOptions.DOCS_ONLY);
-    field.setOmitNorms(true);
+    FieldType ft = new FieldType(TextField.TYPE_UNSTORED);
+    ft.setOmitNorms(true);
+    ft.setIndexOptions(IndexOptions.DOCS_ONLY);
+    Field field = new Field("field", ft, new MyTokenStream());
     doc.add(field);
     
     final int numDocs = (Integer.MAX_VALUE / 26) + 1;

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BTerms.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BTerms.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/Test2BTerms.java Mon Aug 29 23:13:10 2011
@@ -25,9 +25,7 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.document.*;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.codecs.CodecProvider;
-import java.io.File;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -178,9 +176,11 @@ public class Test2BTerms extends LuceneT
 
       Document doc = new Document();
       final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC);
-      Field field = new Field("field", ts);
-      field.setIndexOptions(IndexOptions.DOCS_ONLY);
-      field.setOmitNorms(true);
+
+      FieldType customType = new FieldType(TextField.TYPE_STORED);
+      customType.setIndexOptions(IndexOptions.DOCS_ONLY);
+      customType.setOmitNorms(true);
+      Field field = new Field("field", customType, ts);
       doc.add(field);
       //w.setInfoStream(System.out);
       final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC);

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Mon Aug 29 23:13:10 2011
@@ -25,9 +25,9 @@ import java.util.List;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.index.codecs.mocksep.MockSepCodec;
@@ -38,7 +38,6 @@ import org.apache.lucene.search.DocIdSet
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -167,9 +166,8 @@ public class TestAddIndexes extends Luce
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "bbb " + i, Field.Store.NO,
-                        Field.Index.ANALYZED));
+      doc.add(newField("id", "" + (i % 10), StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED));
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     // Deletes one of the 10 added docs, leaving 9:
@@ -203,8 +201,8 @@ public class TestAddIndexes extends Luce
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("id", "" + (i % 10), StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED));
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     
@@ -241,9 +239,8 @@ public class TestAddIndexes extends Luce
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "bbb " + i, Field.Store.NO,
-                        Field.Index.ANALYZED));
+      doc.add(newField("id", "" + (i % 10), StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED));
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
 
@@ -503,8 +500,7 @@ public class TestAddIndexes extends Luce
   private void addDocs(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO,
-                        Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
     }
   }
@@ -512,8 +508,7 @@ public class TestAddIndexes extends Luce
   private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
       Document doc = new Document();
-      doc.add(newField("content", "bbb", Field.Store.NO,
-                        Field.Index.ANALYZED));
+      doc.add(newField("content", "bbb", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
     }
   }
@@ -582,20 +577,21 @@ public class TestAddIndexes extends Luce
         .setMaxBufferedDocs(5).setMergePolicy(lmp));
 
     Document doc = new Document();
-    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_STORED);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
     for(int i=0;i<60;i++)
       writer.addDocument(doc);
 
     Document doc2 = new Document();
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.NO));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.NO));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.NO));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.NO));
+    FieldType customType2 = new FieldType();
+    customType2.setStored(true);
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
     for(int i=0;i<10;i++)
       writer.addDocument(doc2);
     writer.close();
@@ -619,7 +615,7 @@ public class TestAddIndexes extends Luce
   private void addDoc(IndexWriter writer) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
   }
   
@@ -944,7 +940,7 @@ public class TestAddIndexes extends Luce
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
       IndexWriter writer = new IndexWriter(dirs[i], conf);
       Document doc = new Document();
-      doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+      doc.add(new StringField("id", "myid"));
       writer.addDocument(doc);
       writer.close();
     }
@@ -973,8 +969,8 @@ public class TestAddIndexes extends Luce
   private void addDocs3(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
-      doc.add(newField("id", "" + i, Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
+      doc.add(newField("id", "" + i, TextField.TYPE_STORED));
       writer.addDocument(doc);
     }
   }
@@ -1061,7 +1057,9 @@ public class TestAddIndexes extends Luce
       dirs[i] = new RAMDirectory();
       IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
       Document d = new Document();
-      d.add(new Field("c", "v", Store.YES, Index.ANALYZED, TermVector.YES));
+      FieldType customType = new FieldType(TextField.TYPE_STORED);
+      customType.setStoreTermVectors(true);
+      d.add(new Field("c", customType, "v"));
       w.addDocument(d);
       w.close();
     }
@@ -1099,10 +1097,10 @@ public class TestAddIndexes extends Luce
         new MockAnalyzer(random)).setMergePolicy(lmp2);
     IndexWriter w2 = new IndexWriter(src, conf2);
     Document doc = new Document();
-    doc.add(new Field("c", "some text", Store.YES, Index.ANALYZED));
+    doc.add(new Field("c", TextField.TYPE_STORED, "some text"));
     w2.addDocument(doc);
     doc = new Document();
-    doc.add(new Field("d", "delete", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(new StringField("d", "delete"));
     w2.addDocument(doc);
     w2.commit();
     w2.deleteDocuments(new Term("d", "delete"));
@@ -1152,7 +1150,9 @@ public class TestAddIndexes extends Luce
       conf.setCodecProvider(provider);
       IndexWriter w = new IndexWriter(toAdd, conf);
       Document doc = new Document();
-      doc.add(newField("foo", "bar", Index.NOT_ANALYZED));
+      FieldType customType = new FieldType();
+      customType.setIndexed(true); 
+      doc.add(newField("foo", "bar", customType));
       w.addDocument(doc);
       w.close();
     }

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java Mon Aug 29 23:13:10 2011
@@ -95,8 +95,8 @@ public class TestAtomicUpdate extends Lu
       // Update all 100 docs...
       for(int i=0; i<100; i++) {
         Document d = new Document();
-        d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
-        d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.ANALYZED));
+        d.add(new Field("id", StringField.TYPE_STORED, Integer.toString(i)));
+        d.add(new TextField("contents", English.intToEnglish(i+10*count)));
         writer.updateDocument(new Term("id", Integer.toString(i)), d);
       }
     }
@@ -136,8 +136,8 @@ public class TestAtomicUpdate extends Lu
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {
       Document d = new Document();
-      d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
-      d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
+      d.add(newField("id", Integer.toString(i), StringField.TYPE_STORED));
+      d.add(newField("contents", English.intToEnglish(i), TextField.TYPE_UNSTORED));
       if ((i-1)%7 == 0) {
         writer.commit();
       }

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Mon Aug 29 23:13:10 2011
@@ -21,16 +21,18 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -45,9 +47,9 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.Constants;
 
 /*
   Verify we can read the pre-4.0 file format, do searches
@@ -288,11 +290,11 @@ public class TestBackwardsCompatibility 
     for(int i=0;i<35;i++) {
       if (liveDocs.get(i)) {
         Document d = reader.document(i);
-        List<Fieldable> fields = d.getFields();
+        List<IndexableField> fields = d.getFields();
         if (d.getField("content3") == null) {
           final int numFields = 5;
           assertEquals(numFields, fields.size());
-          Field f =  d.getField("id");
+          IndexableField f =  d.getField("id");
           assertEquals(""+i, f.stringValue());
 
           f = d.getField("utf8");
@@ -594,12 +596,16 @@ public class TestBackwardsCompatibility 
   private void addDoc(IndexWriter writer, int id) throws IOException
   {
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
-    doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
-    doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("fie\u2C77ld", "field with non-ascii name", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new TextField("content", "aaa"));
+    doc.add(new Field("id", StringField.TYPE_STORED, Integer.toString(id)));
+    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
+    customType2.setStoreTermVectors(true);
+    customType2.setStoreTermVectorPositions(true);
+    customType2.setStoreTermVectorOffsets(true);
+    doc.add(new Field("autf8", customType2, "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd"));
+    doc.add(new Field("utf8", customType2, "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd"));
+    doc.add(new Field("content2", customType2, "here is more content with aaa aaa aaa"));
+    doc.add(new Field("fie\u2C77ld", customType2, "field with non-ascii name"));
     // add numeric fields, to test if flex preserves encoding
     doc.add(new NumericField("trieInt", 4).setIntValue(id));
     doc.add(new NumericField("trieLong", 4).setLongValue(id));
@@ -608,11 +614,14 @@ public class TestBackwardsCompatibility 
 
   private void addNoProxDoc(IndexWriter writer) throws IOException {
     Document doc = new Document();
-    Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED);
-    f.setIndexOptions(IndexOptions.DOCS_ONLY);
+    FieldType customType = new FieldType(TextField.TYPE_STORED);
+    customType.setIndexOptions(IndexOptions.DOCS_ONLY);
+    Field f = new Field("content3", customType, "aaa");
     doc.add(f);
-    f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO);
-    f.setIndexOptions(IndexOptions.DOCS_ONLY);
+    FieldType customType2 = new FieldType();
+    customType2.setStored(true);
+    customType2.setIndexOptions(IndexOptions.DOCS_ONLY);
+    f = new Field("content4", customType2, "aaa");
     doc.add(f);
     writer.addDocument(doc);
   }

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java Mon Aug 29 23:13:10 2011
@@ -21,6 +21,8 @@ import java.io.IOException;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
@@ -47,8 +49,10 @@ public class TestBinaryTerms extends Luc
       bytes.bytes[1] = (byte) (255 - i);
       bytes.length = 2;
       Document doc = new Document();
-      doc.add(new Field("id", "" + i, Field.Store.YES, Field.Index.NO));
-      doc.add(new Field("bytes", tokenStream));
+      FieldType customType = new FieldType();
+      customType.setStored(true);
+      doc.add(new Field("id", customType, "" + i));
+      doc.add(new TextField("bytes", tokenStream));
       iw.addDocument(doc);
     }
     

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java Mon Aug 29 23:13:10 2011
@@ -27,7 +27,8 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.util.Constants;
 
 public class TestCheckIndex extends LuceneTestCase {
@@ -36,7 +37,11 @@ public class TestCheckIndex extends Luce
     Directory dir = newDirectory();
     IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     Document doc = new Document();
-    doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_STORED);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("field", "aaa", customType));
     for(int i=0;i<19;i++) {
       writer.addDocument(doc);
     }

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCodecs.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestCodecs.java Mon Aug 29 23:13:10 2011
@@ -23,9 +23,9 @@ import java.util.HashSet;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.index.codecs.FieldsConsumer;
 import org.apache.lucene.index.codecs.FieldsProducer;
@@ -340,7 +340,9 @@ public class TestCodecs extends LuceneTe
       pq.add(new Term("content", "ccc"));
 
       final Document doc = new Document();
-      doc.add(newField("content", "aaa bbb ccc ddd", Store.NO, Field.Index.ANALYZED_NO_NORMS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setOmitNorms(true);
+      doc.add(newField("content", "aaa bbb ccc ddd", customType));
 
       // add document and force commit for creating a first segment
       writer.addDocument(doc);

Modified: lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1163047&r1=1163046&r2=1163047&view=diff
==============================================================================
--- lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/flexscoring/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Mon Aug 29 23:13:10 2011
@@ -17,14 +17,16 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
-import org.apache.lucene.store.MockDirectoryWrapper;
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-
+import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
-import java.io.IOException;
 
 public class TestConcurrentMergeScheduler extends LuceneTestCase {
   
@@ -75,7 +77,7 @@ public class TestConcurrentMergeSchedule
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     writer.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    Field idField = newField("id", "", StringField.TYPE_STORED);
     doc.add(idField);
     int extraCount = 0;
 
@@ -135,7 +137,7 @@ public class TestConcurrentMergeSchedule
     writer.setInfoStream(VERBOSE ? System.out : null);
 
     Document doc = new Document();
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    Field idField = newField("id", "", StringField.TYPE_STORED);
     doc.add(idField);
     for(int i=0;i<10;i++) {
       if (VERBOSE) {
@@ -180,7 +182,7 @@ public class TestConcurrentMergeSchedule
 
       for(int j=0;j<21;j++) {
         Document doc = new Document();
-        doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+        doc.add(newField("content", "a b c", TextField.TYPE_UNSTORED));
         writer.addDocument(doc);
       }
         
@@ -202,7 +204,7 @@ public class TestConcurrentMergeSchedule
   public void testNoWaitClose() throws IOException {
     MockDirectoryWrapper directory = newDirectory();
     Document doc = new Document();
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    Field idField = newField("id", "", StringField.TYPE_STORED);
     doc.add(idField);
 
     IndexWriter writer = new IndexWriter(