You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucy.apache.org by ma...@apache.org on 2013/07/17 16:12:32 UTC

[lucy-commits] [12/34] Migrate Lucy's index classes to IVARS.

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/IndexManager.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/IndexManager.c b/core/Lucy/Index/IndexManager.c
index 61bb761..684e476 100644
--- a/core/Lucy/Index/IndexManager.c
+++ b/core/Lucy/Index/IndexManager.c
@@ -40,26 +40,28 @@ IxManager_new(const CharBuf *host, LockFactory *lock_factory) {
 IndexManager*
 IxManager_init(IndexManager *self, const CharBuf *host,
                LockFactory *lock_factory) {
-    self->host                = host
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    ivars->host                = host
                                 ? CB_Clone(host)
                                 : CB_new_from_trusted_utf8("", 0);
-    self->lock_factory        = (LockFactory*)INCREF(lock_factory);
-    self->folder              = NULL;
-    self->write_lock_timeout  = 1000;
-    self->write_lock_interval = 100;
-    self->merge_lock_timeout  = 0;
-    self->merge_lock_interval = 1000;
-    self->deletion_lock_timeout  = 1000;
-    self->deletion_lock_interval = 100;
+    ivars->lock_factory        = (LockFactory*)INCREF(lock_factory);
+    ivars->folder              = NULL;
+    ivars->write_lock_timeout  = 1000;
+    ivars->write_lock_interval = 100;
+    ivars->merge_lock_timeout  = 0;
+    ivars->merge_lock_interval = 1000;
+    ivars->deletion_lock_timeout  = 1000;
+    ivars->deletion_lock_interval = 100;
 
     return self;
 }
 
 void
 IxManager_destroy(IndexManager *self) {
-    DECREF(self->host);
-    DECREF(self->folder);
-    DECREF(self->lock_factory);
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    DECREF(ivars->host);
+    DECREF(ivars->folder);
+    DECREF(ivars->lock_factory);
     SUPER_DESTROY(self, INDEXMANAGER);
 }
 
@@ -81,7 +83,8 @@ IxManager_highest_seg_num(IndexManager *self, Snapshot *snapshot) {
 
 CharBuf*
 IxManager_make_snapshot_filename(IndexManager *self) {
-    Folder *folder = (Folder*)CERTIFY(self->folder, FOLDER);
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    Folder *folder = (Folder*)CERTIFY(ivars->folder, FOLDER);
     DirHandle *dh = Folder_Open_Dir(folder, NULL);
     uint64_t max_gen = 0;
 
@@ -216,49 +219,54 @@ IxManager_choose_sparse(IndexManager *self, I32Array *doc_counts) {
 
 static LockFactory*
 S_obtain_lock_factory(IndexManager *self) {
-    if (!self->lock_factory) {
-        if (!self->folder) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    if (!ivars->lock_factory) {
+        if (!ivars->folder) {
             THROW(ERR, "Can't create a LockFactory without a Folder");
         }
-        self->lock_factory = LockFact_new(self->folder, self->host);
+        ivars->lock_factory = LockFact_new(ivars->folder, ivars->host);
     }
-    return self->lock_factory;
+    return ivars->lock_factory;
 }
 
 Lock*
 IxManager_make_write_lock(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *write_lock_name = ZCB_WRAP_STR("write", 5);
     LockFactory *lock_factory = S_obtain_lock_factory(self);
     return LockFact_Make_Lock(lock_factory, (CharBuf*)write_lock_name,
-                              self->write_lock_timeout,
-                              self->write_lock_interval);
+                              ivars->write_lock_timeout,
+                              ivars->write_lock_interval);
 }
 
 Lock*
 IxManager_make_deletion_lock(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *lock_name = ZCB_WRAP_STR("deletion", 8);
     LockFactory *lock_factory = S_obtain_lock_factory(self);
     return LockFact_Make_Lock(lock_factory, (CharBuf*)lock_name,
-                              self->deletion_lock_timeout,
-                              self->deletion_lock_interval);
+                              ivars->deletion_lock_timeout,
+                              ivars->deletion_lock_interval);
 }
 
 Lock*
 IxManager_make_merge_lock(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_lock_name = ZCB_WRAP_STR("merge", 5);
     LockFactory *lock_factory = S_obtain_lock_factory(self);
     return LockFact_Make_Lock(lock_factory, (CharBuf*)merge_lock_name,
-                              self->merge_lock_timeout,
-                              self->merge_lock_interval);
+                              ivars->merge_lock_timeout,
+                              ivars->merge_lock_interval);
 }
 
 void
 IxManager_write_merge_data(IndexManager *self, int64_t cutoff) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
     Hash *data = Hash_new(1);
     bool success;
     Hash_Store_Str(data, "cutoff", 6, (Obj*)CB_newf("%i64", cutoff));
-    success = Json_spew_json((Obj*)data, self->folder, (CharBuf*)merge_json);
+    success = Json_spew_json((Obj*)data, ivars->folder, (CharBuf*)merge_json);
     DECREF(data);
     if (!success) {
         THROW(ERR, "Failed to write to %o", merge_json);
@@ -267,10 +275,11 @@ IxManager_write_merge_data(IndexManager *self, int64_t cutoff) {
 
 Hash*
 IxManager_read_merge_data(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
-    if (Folder_Exists(self->folder, (CharBuf*)merge_json)) {
+    if (Folder_Exists(ivars->folder, (CharBuf*)merge_json)) {
         Hash *stuff
-            = (Hash*)Json_slurp_json(self->folder, (CharBuf*)merge_json);
+            = (Hash*)Json_slurp_json(ivars->folder, (CharBuf*)merge_json);
         if (stuff) {
             CERTIFY(stuff, HASH);
             return stuff;
@@ -286,8 +295,9 @@ IxManager_read_merge_data(IndexManager *self) {
 
 bool
 IxManager_remove_merge_data(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
-    return Folder_Delete(self->folder, (CharBuf*)merge_json) != 0;
+    return Folder_Delete(ivars->folder, (CharBuf*)merge_json) != 0;
 }
 
 Lock*
@@ -310,78 +320,79 @@ IxManager_make_snapshot_read_lock(IndexManager *self,
 
 void
 IxManager_set_folder(IndexManager *self, Folder *folder) {
-    DECREF(self->folder);
-    self->folder = (Folder*)INCREF(folder);
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    DECREF(ivars->folder);
+    ivars->folder = (Folder*)INCREF(folder);
 }
 
 Folder*
 IxManager_get_folder(IndexManager *self) {
-    return self->folder;
+    return IxManager_IVARS(self)->folder;
 }
 
 CharBuf*
 IxManager_get_host(IndexManager *self) {
-    return self->host;
+    return IxManager_IVARS(self)->host;
 }
 
 uint32_t
 IxManager_get_write_lock_timeout(IndexManager *self) {
-    return self->write_lock_timeout;
+    return IxManager_IVARS(self)->write_lock_timeout;
 }
 
 uint32_t
 IxManager_get_write_lock_interval(IndexManager *self) {
-    return self->write_lock_interval;
+    return IxManager_IVARS(self)->write_lock_interval;
 }
 
 uint32_t
 IxManager_get_merge_lock_timeout(IndexManager *self) {
-    return self->merge_lock_timeout;
+    return IxManager_IVARS(self)->merge_lock_timeout;
 }
 
 uint32_t
 IxManager_get_merge_lock_interval(IndexManager *self) {
-    return self->merge_lock_interval;
+    return IxManager_IVARS(self)->merge_lock_interval;
 }
 
 uint32_t
 IxManager_get_deletion_lock_timeout(IndexManager *self) {
-    return self->deletion_lock_timeout;
+    return IxManager_IVARS(self)->deletion_lock_timeout;
 }
 
 uint32_t
 IxManager_get_deletion_lock_interval(IndexManager *self) {
-    return self->deletion_lock_interval;
+    return IxManager_IVARS(self)->deletion_lock_interval;
 }
 
 void
 IxManager_set_write_lock_timeout(IndexManager *self, uint32_t timeout) {
-    self->write_lock_timeout = timeout;
+    IxManager_IVARS(self)->write_lock_timeout = timeout;
 }
 
 void
 IxManager_set_write_lock_interval(IndexManager *self, uint32_t interval) {
-    self->write_lock_interval = interval;
+    IxManager_IVARS(self)->write_lock_interval = interval;
 }
 
 void
 IxManager_set_merge_lock_timeout(IndexManager *self, uint32_t timeout) {
-    self->merge_lock_timeout = timeout;
+    IxManager_IVARS(self)->merge_lock_timeout = timeout;
 }
 
 void
 IxManager_set_merge_lock_interval(IndexManager *self, uint32_t interval) {
-    self->merge_lock_interval = interval;
+    IxManager_IVARS(self)->merge_lock_interval = interval;
 }
 
 void
 IxManager_set_deletion_lock_timeout(IndexManager *self, uint32_t timeout) {
-    self->deletion_lock_timeout = timeout;
+    IxManager_IVARS(self)->deletion_lock_timeout = timeout;
 }
 
 void
 IxManager_set_deletion_lock_interval(IndexManager *self, uint32_t interval) {
-    self->deletion_lock_interval = interval;
+    IxManager_IVARS(self)->deletion_lock_interval = interval;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/IndexReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/IndexReader.c b/core/Lucy/Index/IndexReader.c
index 99250c2..9f81db2 100644
--- a/core/Lucy/Index/IndexReader.c
+++ b/core/Lucy/Index/IndexReader.c
@@ -52,62 +52,66 @@ IxReader_init(IndexReader *self, Schema *schema, Folder *folder,
     DataReader_init((DataReader*)self, schema, folder, snapshot, segments,
                     seg_tick);
     DECREF(snapshot);
-    self->components     = Hash_new(0);
-    self->read_lock      = NULL;
-    self->deletion_lock  = NULL;
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    ivars->components     = Hash_new(0);
+    ivars->read_lock      = NULL;
+    ivars->deletion_lock  = NULL;
     if (manager) {
-        self->manager = (IndexManager*)INCREF(manager);
-        IxManager_Set_Folder(self->manager, self->folder);
+        ivars->manager = (IndexManager*)INCREF(manager);
+        IxManager_Set_Folder(ivars->manager, ivars->folder);
     }
     else {
-        self->manager = NULL;
+        ivars->manager = NULL;
     }
     return self;
 }
 
 void
 IxReader_close(IndexReader *self) {
-    if (self->components) {
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    if (ivars->components) {
         CharBuf *key;
         DataReader *component;
-        Hash_Iterate(self->components);
-        while (Hash_Next(self->components, (Obj**)&key,
+        Hash_Iterate(ivars->components);
+        while (Hash_Next(ivars->components, (Obj**)&key,
                          (Obj**)&component)
               ) {
             if (Obj_Is_A((Obj*)component, DATAREADER)) {
                 DataReader_Close(component);
             }
         }
-        Hash_Clear(self->components);
+        Hash_Clear(ivars->components);
     }
-    if (self->read_lock) {
-        Lock_Release(self->read_lock);
-        DECREF(self->read_lock);
-        self->read_lock = NULL;
+    if (ivars->read_lock) {
+        Lock_Release(ivars->read_lock);
+        DECREF(ivars->read_lock);
+        ivars->read_lock = NULL;
     }
 }
 
 void
 IxReader_destroy(IndexReader *self) {
-    DECREF(self->components);
-    if (self->read_lock) {
-        Lock_Release(self->read_lock);
-        DECREF(self->read_lock);
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    DECREF(ivars->components);
+    if (ivars->read_lock) {
+        Lock_Release(ivars->read_lock);
+        DECREF(ivars->read_lock);
     }
-    DECREF(self->manager);
-    DECREF(self->deletion_lock);
+    DECREF(ivars->manager);
+    DECREF(ivars->deletion_lock);
     SUPER_DESTROY(self, INDEXREADER);
 }
 
 Hash*
 IxReader_get_components(IndexReader *self) {
-    return self->components;
+    return IxReader_IVARS(self)->components;
 }
 
 DataReader*
 IxReader_obtain(IndexReader *self, const CharBuf *api) {
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
     DataReader *component
-        = (DataReader*)Hash_Fetch(self->components, (Obj*)api);
+        = (DataReader*)Hash_Fetch(ivars->components, (Obj*)api);
     if (!component) {
         THROW(ERR, "No component registered for '%o'", api);
     }
@@ -116,7 +120,8 @@ IxReader_obtain(IndexReader *self, const CharBuf *api) {
 
 DataReader*
 IxReader_fetch(IndexReader *self, const CharBuf *api) {
-    return (DataReader*)Hash_Fetch(self->components, (Obj*)api);
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    return (DataReader*)Hash_Fetch(ivars->components, (Obj*)api);
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/Indexer.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Indexer.c b/core/Lucy/Index/Indexer.c
index 1a9076d..803b7db 100644
--- a/core/Lucy/Index/Indexer.c
+++ b/core/Lucy/Index/Indexer.c
@@ -70,33 +70,34 @@ Indexer_new(Schema *schema, Obj *index, IndexManager *manager, int32_t flags) {
 Indexer*
 Indexer_init(Indexer *self, Schema *schema, Obj *index,
              IndexManager *manager, int32_t flags) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     bool      create   = (flags & Indexer_CREATE)   ? true : false;
     bool      truncate = (flags & Indexer_TRUNCATE) ? true : false;
     Folder   *folder   = S_init_folder(index, create);
     Snapshot *latest_snapshot = Snapshot_new();
 
     // Init.
-    self->stock_doc     = Doc_new(NULL, 0);
-    self->truncate      = false;
-    self->optimize      = false;
-    self->prepared      = false;
-    self->needs_commit  = false;
-    self->snapfile      = NULL;
-    self->merge_lock    = NULL;
+    ivars->stock_doc     = Doc_new(NULL, 0);
+    ivars->truncate      = false;
+    ivars->optimize      = false;
+    ivars->prepared      = false;
+    ivars->needs_commit  = false;
+    ivars->snapfile      = NULL;
+    ivars->merge_lock    = NULL;
 
     // Assign.
-    self->folder       = folder;
-    self->manager      = manager
+    ivars->folder       = folder;
+    ivars->manager      = manager
                          ? (IndexManager*)INCREF(manager)
                          : IxManager_new(NULL, NULL);
-    IxManager_Set_Folder(self->manager, folder);
+    IxManager_Set_Folder(ivars->manager, folder);
 
     // Get a write lock for this folder.
-    Lock *write_lock = IxManager_Make_Write_Lock(self->manager);
+    Lock *write_lock = IxManager_Make_Write_Lock(ivars->manager);
     Lock_Clear_Stale(write_lock);
     if (Lock_Obtain(write_lock)) {
         // Only assign if successful, otherwise DESTROY unlocks -- bad!
-        self->write_lock = write_lock;
+        ivars->write_lock = write_lock;
     }
     else {
         DECREF(write_lock);
@@ -112,7 +113,7 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
 
     // Look for an existing Schema if one wasn't supplied.
     if (schema) {
-        self->schema = (Schema*)INCREF(schema);
+        ivars->schema = (Schema*)INCREF(schema);
     }
     else {
         if (!latest_snapfile) {
@@ -123,10 +124,10 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
             CharBuf *schema_file = S_find_schema_file(latest_snapshot);
             Hash *dump = (Hash*)Json_slurp_json(folder, schema_file);
             if (dump) { // read file successfully
-                self->schema = (Schema*)CERTIFY(
+                ivars->schema = (Schema*)CERTIFY(
                                    VTable_Load_Obj(SCHEMA, (Obj*)dump),
                                    SCHEMA);
-                schema = self->schema;
+                schema = ivars->schema;
                 DECREF(dump);
                 schema_file = NULL;
             }
@@ -140,21 +141,21 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
     // PolyReader.  Otherwise, start with the most recent Snapshot and an
     // up-to-date PolyReader.
     if (truncate) {
-        self->snapshot = Snapshot_new();
-        self->polyreader = PolyReader_new(schema, folder, NULL, NULL, NULL);
-        self->truncate = true;
+        ivars->snapshot = Snapshot_new();
+        ivars->polyreader = PolyReader_new(schema, folder, NULL, NULL, NULL);
+        ivars->truncate = true;
     }
     else {
         // TODO: clone most recent snapshot rather than read it twice.
-        self->snapshot = (Snapshot*)INCREF(latest_snapshot);
-        self->polyreader = latest_snapfile
+        ivars->snapshot = (Snapshot*)INCREF(latest_snapshot);
+        ivars->polyreader = latest_snapfile
                            ? PolyReader_open((Obj*)folder, NULL, NULL)
                            : PolyReader_new(schema, folder, NULL, NULL, NULL);
 
         if (latest_snapfile) {
             // Make sure than any existing fields which may have been
             // dynamically added during past indexing sessions get added.
-            Schema *old_schema = PolyReader_Get_Schema(self->polyreader);
+            Schema *old_schema = PolyReader_Get_Schema(ivars->polyreader);
             Schema_Eat(schema, old_schema);
         }
     }
@@ -163,18 +164,18 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
     // Note: we have to feed FilePurger with the most recent snapshot file
     // now, but with the Indexer's snapshot later.
     FilePurger *file_purger
-        = FilePurger_new(folder, latest_snapshot, self->manager);
+        = FilePurger_new(folder, latest_snapshot, ivars->manager);
     FilePurger_Purge(file_purger);
     DECREF(file_purger);
 
     // Create a new segment.
     int64_t new_seg_num
-        = IxManager_Highest_Seg_Num(self->manager, latest_snapshot) + 1;
-    Lock *merge_lock = IxManager_Make_Merge_Lock(self->manager);
+        = IxManager_Highest_Seg_Num(ivars->manager, latest_snapshot) + 1;
+    Lock *merge_lock = IxManager_Make_Merge_Lock(ivars->manager);
     if (Lock_Is_Locked(merge_lock)) {
         // If there's a background merge process going on, stay out of its
         // way.
-        Hash *merge_data = IxManager_Read_Merge_Data(self->manager);
+        Hash *merge_data = IxManager_Read_Merge_Data(ivars->manager);
         Obj *cutoff_obj = merge_data
                           ? Hash_Fetch_Str(merge_data, "cutoff", 6)
                           : NULL;
@@ -191,27 +192,27 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
         }
         DECREF(merge_data);
     }
-    self->segment = Seg_new(new_seg_num);
+    ivars->segment = Seg_new(new_seg_num);
 
     // Add all known fields to Segment.
     VArray *fields = Schema_All_Fields(schema);
     for (uint32_t i = 0, max = VA_Get_Size(fields); i < max; i++) {
-        Seg_Add_Field(self->segment, (CharBuf*)VA_Fetch(fields, i));
+        Seg_Add_Field(ivars->segment, (CharBuf*)VA_Fetch(fields, i));
     }
     DECREF(fields);
 
     DECREF(merge_lock);
 
     // Create new SegWriter and FilePurger.
-    self->file_purger
-        = FilePurger_new(folder, self->snapshot, self->manager);
-    self->seg_writer = SegWriter_new(self->schema, self->snapshot,
-                                     self->segment, self->polyreader);
-    SegWriter_Prep_Seg_Dir(self->seg_writer);
+    ivars->file_purger
+        = FilePurger_new(folder, ivars->snapshot, ivars->manager);
+    ivars->seg_writer = SegWriter_new(ivars->schema, ivars->snapshot,
+                                     ivars->segment, ivars->polyreader);
+    SegWriter_Prep_Seg_Dir(ivars->seg_writer);
 
     // Grab a local ref to the DeletionsWriter.
-    self->del_writer = (DeletionsWriter*)INCREF(
-                           SegWriter_Get_Del_Writer(self->seg_writer));
+    ivars->del_writer = (DeletionsWriter*)INCREF(
+                           SegWriter_Get_Del_Writer(ivars->seg_writer));
 
     DECREF(latest_snapfile);
     DECREF(latest_snapshot);
@@ -221,20 +222,21 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
 
 void
 Indexer_destroy(Indexer *self) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     S_release_merge_lock(self);
     S_release_write_lock(self);
-    DECREF(self->schema);
-    DECREF(self->folder);
-    DECREF(self->segment);
-    DECREF(self->manager);
-    DECREF(self->stock_doc);
-    DECREF(self->polyreader);
-    DECREF(self->del_writer);
-    DECREF(self->snapshot);
-    DECREF(self->seg_writer);
-    DECREF(self->file_purger);
-    DECREF(self->write_lock);
-    DECREF(self->snapfile);
+    DECREF(ivars->schema);
+    DECREF(ivars->folder);
+    DECREF(ivars->segment);
+    DECREF(ivars->manager);
+    DECREF(ivars->stock_doc);
+    DECREF(ivars->polyreader);
+    DECREF(ivars->del_writer);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->seg_writer);
+    DECREF(ivars->file_purger);
+    DECREF(ivars->write_lock);
+    DECREF(ivars->snapfile);
     SUPER_DESTROY(self, INDEXER);
 }
 
@@ -268,12 +270,14 @@ S_init_folder(Obj *index, bool create) {
 
 void
 Indexer_add_doc(Indexer *self, Doc *doc, float boost) {
-    SegWriter_Add_Doc(self->seg_writer, doc, boost);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    SegWriter_Add_Doc(ivars->seg_writer, doc, boost);
 }
 
 void
 Indexer_delete_by_term(Indexer *self, CharBuf *field, Obj *term) {
-    Schema    *schema = self->schema;
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    Schema    *schema = ivars->schema;
     FieldType *type   = Schema_Fetch_Type(schema, field);
 
     // Raise exception if the field isn't indexed.
@@ -288,28 +292,31 @@ Indexer_delete_by_term(Indexer *self, CharBuf *field, Obj *term) {
         VArray *terms = Analyzer_Split(analyzer, (CharBuf*)term);
         Obj *analyzed_term = VA_Fetch(terms, 0);
         if (analyzed_term) {
-            DelWriter_Delete_By_Term(self->del_writer, field,
+            DelWriter_Delete_By_Term(ivars->del_writer, field,
                                      analyzed_term);
         }
         DECREF(terms);
     }
     else {
-        DelWriter_Delete_By_Term(self->del_writer, field, term);
+        DelWriter_Delete_By_Term(ivars->del_writer, field, term);
     }
 }
 
 void
 Indexer_delete_by_query(Indexer *self, Query *query) {
-    DelWriter_Delete_By_Query(self->del_writer, query);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    DelWriter_Delete_By_Query(ivars->del_writer, query);
 }
 
 void
 Indexer_delete_by_doc_id(Indexer *self, int32_t doc_id) {
-    DelWriter_Delete_By_Doc_ID(self->del_writer, doc_id);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    DelWriter_Delete_By_Doc_ID(ivars->del_writer, doc_id);
 }
 
 void
 Indexer_add_index(Indexer *self, Obj *index) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     Folder *other_folder = NULL;
     IndexReader *reader  = NULL;
 
@@ -328,7 +335,7 @@ Indexer_add_index(Indexer *self, Obj *index) {
         THROW(ERR, "Index doesn't seem to contain any data");
     }
     else {
-        Schema *schema       = self->schema;
+        Schema *schema       = ivars->schema;
         Schema *other_schema = IxReader_Get_Schema(reader);
         VArray *other_fields = Schema_All_Fields(other_schema);
         VArray *seg_readers  = IxReader_Seg_Readers(reader);
@@ -339,7 +346,7 @@ Indexer_add_index(Indexer *self, Obj *index) {
         // Add fields to Segment.
         for (uint32_t i = 0, max = VA_Get_Size(other_fields); i < max; i++) {
             CharBuf *other_field = (CharBuf*)VA_Fetch(other_fields, i);
-            Seg_Add_Field(self->segment, other_field);
+            Seg_Add_Field(ivars->segment, other_field);
         }
         DECREF(other_fields);
 
@@ -353,10 +360,10 @@ Indexer_add_index(Indexer *self, Obj *index) {
                                  ? DelReader_Iterator(del_reader)
                                  : NULL;
             I32Array *doc_map = DelWriter_Generate_Doc_Map(
-                                    self->del_writer, deletions,
+                                    ivars->del_writer, deletions,
                                     SegReader_Doc_Max(seg_reader),
-                                    (int32_t)Seg_Get_Count(self->segment));
-            SegWriter_Add_Segment(self->seg_writer, seg_reader, doc_map);
+                                    (int32_t)Seg_Get_Count(ivars->segment));
+            SegWriter_Add_Segment(ivars->seg_writer, seg_reader, doc_map);
             DECREF(deletions);
             DECREF(doc_map);
         }
@@ -369,7 +376,7 @@ Indexer_add_index(Indexer *self, Obj *index) {
 
 void
 Indexer_optimize(Indexer *self) {
-    self->optimize = true;
+    Indexer_IVARS(self)->optimize = true;
 }
 
 static CharBuf*
@@ -391,19 +398,20 @@ S_find_schema_file(Snapshot *snapshot) {
 
 static bool
 S_maybe_merge(Indexer *self, VArray *seg_readers) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     bool      merge_happened  = false;
     uint32_t  num_seg_readers = VA_Get_Size(seg_readers);
-    Lock     *merge_lock      = IxManager_Make_Merge_Lock(self->manager);
+    Lock     *merge_lock      = IxManager_Make_Merge_Lock(ivars->manager);
     bool      got_merge_lock  = Lock_Obtain(merge_lock);
     int64_t   cutoff;
 
     if (got_merge_lock) {
-        self->merge_lock = merge_lock;
+        ivars->merge_lock = merge_lock;
         cutoff = 0;
     }
     else {
         // If something else holds the merge lock, don't interfere.
-        Hash *merge_data = IxManager_Read_Merge_Data(self->manager);
+        Hash *merge_data = IxManager_Read_Merge_Data(ivars->manager);
         if (merge_data) {
             Obj *cutoff_obj = Hash_Fetch_Str(merge_data, "cutoff", 6);
             if (cutoff_obj) {
@@ -422,8 +430,8 @@ S_maybe_merge(Indexer *self, VArray *seg_readers) {
 
     // Get a list of segments to recycle.  Validate and confirm that there are
     // no dupes in the list.
-    VArray *to_merge = IxManager_Recycle(self->manager, self->polyreader,
-                                         self->del_writer, cutoff, self->optimize);
+    VArray *to_merge = IxManager_Recycle(ivars->manager, ivars->polyreader,
+                                         ivars->del_writer, cutoff, ivars->optimize);
 
     Hash *seen = Hash_new(VA_Get_Size(to_merge));
     for (uint32_t i = 0, max = VA_Get_Size(to_merge); i < max; i++) {
@@ -445,26 +453,26 @@ S_maybe_merge(Indexer *self, VArray *seg_readers) {
         SegReader *seg_reader = (SegReader*)VA_Fetch(to_merge, i);
         int64_t seg_num = SegReader_Get_Seg_Num(seg_reader);
         Matcher *deletions
-            = DelWriter_Seg_Deletions(self->del_writer, seg_reader);
+            = DelWriter_Seg_Deletions(ivars->del_writer, seg_reader);
         I32Array *doc_map = DelWriter_Generate_Doc_Map(
-                                self->del_writer, deletions,
+                                ivars->del_writer, deletions,
                                 SegReader_Doc_Max(seg_reader),
-                                (int32_t)Seg_Get_Count(self->segment));
+                                (int32_t)Seg_Get_Count(ivars->segment));
         if (seg_num <= cutoff) {
             THROW(ERR, "Segment %o violates cutoff (%i64 <= %i64)",
                   SegReader_Get_Seg_Name(seg_reader), seg_num, cutoff);
         }
-        SegWriter_Merge_Segment(self->seg_writer, seg_reader, doc_map);
+        SegWriter_Merge_Segment(ivars->seg_writer, seg_reader, doc_map);
         merge_happened = true;
         DECREF(deletions);
         DECREF(doc_map);
     }
 
     // Write out new deletions.
-    if (DelWriter_Updated(self->del_writer)) {
+    if (DelWriter_Updated(ivars->del_writer)) {
         // Only write out if they haven't all been applied.
         if (VA_Get_Size(to_merge) != num_seg_readers) {
-            DelWriter_Finish(self->del_writer);
+            DelWriter_Finish(ivars->del_writer);
         }
     }
 
@@ -474,11 +482,12 @@ S_maybe_merge(Indexer *self, VArray *seg_readers) {
 
 void
 Indexer_prepare_commit(Indexer *self) {
-    VArray   *seg_readers     = PolyReader_Get_Seg_Readers(self->polyreader);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    VArray   *seg_readers     = PolyReader_Get_Seg_Readers(ivars->polyreader);
     uint32_t  num_seg_readers = VA_Get_Size(seg_readers);
     bool      merge_happened  = false;
 
-    if (!self->write_lock || self->prepared) {
+    if (!ivars->write_lock || ivars->prepared) {
         THROW(ERR, "Can't call Prepare_Commit() more than once");
     }
 
@@ -488,26 +497,26 @@ Indexer_prepare_commit(Indexer *self) {
     }
 
     // Add a new segment and write a new snapshot file if...
-    if (Seg_Get_Count(self->segment)             // Docs/segs added.
+    if (Seg_Get_Count(ivars->segment)             // Docs/segs added.
         || merge_happened                        // Some segs merged.
-        || !Snapshot_Num_Entries(self->snapshot) // Initializing index.
-        || DelWriter_Updated(self->del_writer)
+        || !Snapshot_Num_Entries(ivars->snapshot) // Initializing index.
+        || DelWriter_Updated(ivars->del_writer)
        ) {
-        Folder   *folder   = self->folder;
-        Schema   *schema   = self->schema;
-        Snapshot *snapshot = self->snapshot;
+        Folder   *folder   = ivars->folder;
+        Schema   *schema   = ivars->schema;
+        Snapshot *snapshot = ivars->snapshot;
 
         // Derive snapshot and schema file names.
-        DECREF(self->snapfile);
-        self->snapfile = IxManager_Make_Snapshot_Filename(self->manager);
-        CB_Cat_Trusted_Str(self->snapfile, ".temp", 5);
-        uint64_t schema_gen = IxFileNames_extract_gen(self->snapfile);
+        DECREF(ivars->snapfile);
+        ivars->snapfile = IxManager_Make_Snapshot_Filename(ivars->manager);
+        CB_Cat_Trusted_Str(ivars->snapfile, ".temp", 5);
+        uint64_t schema_gen = IxFileNames_extract_gen(ivars->snapfile);
         char base36[StrHelp_MAX_BASE36_BYTES];
         StrHelp_to_base36(schema_gen, &base36);
         CharBuf *new_schema_name = CB_newf("schema_%s.json", base36);
 
         // Finish the segment, write schema file.
-        SegWriter_Finish(self->seg_writer);
+        SegWriter_Finish(ivars->seg_writer);
         Schema_Write(schema, folder, new_schema_name);
         CharBuf *old_schema_name = S_find_schema_file(snapshot);
         if (old_schema_name) {
@@ -517,42 +526,44 @@ Indexer_prepare_commit(Indexer *self) {
         DECREF(new_schema_name);
 
         // Write temporary snapshot file.
-        Folder_Delete(folder, self->snapfile);
-        Snapshot_Write_File(snapshot, folder, self->snapfile);
+        Folder_Delete(folder, ivars->snapfile);
+        Snapshot_Write_File(snapshot, folder, ivars->snapfile);
 
-        self->needs_commit = true;
+        ivars->needs_commit = true;
     }
 
     // Close reader, so that we can delete its files if appropriate.
-    PolyReader_Close(self->polyreader);
+    PolyReader_Close(ivars->polyreader);
 
-    self->prepared = true;
+    ivars->prepared = true;
 }
 
 void
 Indexer_commit(Indexer *self) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+
     // Safety check.
-    if (!self->write_lock) {
+    if (!ivars->write_lock) {
         THROW(ERR, "Can't call commit() more than once");
     }
 
-    if (!self->prepared) {
+    if (!ivars->prepared) {
         Indexer_Prepare_Commit(self);
     }
 
-    if (self->needs_commit) {
+    if (ivars->needs_commit) {
         bool success;
 
         // Rename temp snapshot file.
-        CharBuf *temp_snapfile = CB_Clone(self->snapfile);
-        CB_Chop(self->snapfile, sizeof(".temp") - 1);
-        Snapshot_Set_Path(self->snapshot, self->snapfile);
-        success = Folder_Rename(self->folder, temp_snapfile, self->snapfile);
+        CharBuf *temp_snapfile = CB_Clone(ivars->snapfile);
+        CB_Chop(ivars->snapfile, sizeof(".temp") - 1);
+        Snapshot_Set_Path(ivars->snapshot, ivars->snapfile);
+        success = Folder_Rename(ivars->folder, temp_snapfile, ivars->snapfile);
         DECREF(temp_snapfile);
         if (!success) { RETHROW(INCREF(Err_get_error())); }
 
         // Purge obsolete files.
-        FilePurger_Purge(self->file_purger);
+        FilePurger_Purge(ivars->file_purger);
     }
 
     // Release locks, invalidating the Indexer.
@@ -562,34 +573,36 @@ Indexer_commit(Indexer *self) {
 
 Schema*
 Indexer_get_schema(Indexer *self) {
-    return self->schema;
+    return Indexer_IVARS(self)->schema;
 }
 
 SegWriter*
 Indexer_get_seg_writer(Indexer *self) {
-    return self->seg_writer;
+    return Indexer_IVARS(self)->seg_writer;
 }
 
 Doc*
 Indexer_get_stock_doc(Indexer *self) {
-    return self->stock_doc;
+    return Indexer_IVARS(self)->stock_doc;
 }
 
 static void
 S_release_write_lock(Indexer *self) {
-    if (self->write_lock) {
-        Lock_Release(self->write_lock);
-        DECREF(self->write_lock);
-        self->write_lock = NULL;
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    if (ivars->write_lock) {
+        Lock_Release(ivars->write_lock);
+        DECREF(ivars->write_lock);
+        ivars->write_lock = NULL;
     }
 }
 
 static void
 S_release_merge_lock(Indexer *self) {
-    if (self->merge_lock) {
-        Lock_Release(self->merge_lock);
-        DECREF(self->merge_lock);
-        self->merge_lock = NULL;
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    if (ivars->merge_lock) {
+        Lock_Release(ivars->merge_lock);
+        DECREF(ivars->merge_lock);
+        ivars->merge_lock = NULL;
     }
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/Inverter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Inverter.c b/core/Lucy/Index/Inverter.c
index 10077ed..77c0bcc 100644
--- a/core/Lucy/Index/Inverter.c
+++ b/core/Lucy/Index/Inverter.c
@@ -40,138 +40,155 @@ Inverter_new(Schema *schema, Segment *segment) {
 
 Inverter*
 Inverter_init(Inverter *self, Schema *schema, Segment *segment) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+
     // Init.
-    self->tick       = -1;
-    self->doc        = NULL;
-    self->sorted     = false;
-    self->blank      = InvEntry_new(NULL, NULL, 0);
-    self->current    = self->blank;
+    ivars->tick       = -1;
+    ivars->doc        = NULL;
+    ivars->sorted     = false;
+    ivars->blank      = InvEntry_new(NULL, NULL, 0);
+    ivars->current    = ivars->blank;
 
     // Derive.
-    self->entry_pool = VA_new(Schema_Num_Fields(schema));
-    self->entries    = VA_new(Schema_Num_Fields(schema));
+    ivars->entry_pool = VA_new(Schema_Num_Fields(schema));
+    ivars->entries    = VA_new(Schema_Num_Fields(schema));
 
     // Assign.
-    self->schema  = (Schema*)INCREF(schema);
-    self->segment = (Segment*)INCREF(segment);
+    ivars->schema  = (Schema*)INCREF(schema);
+    ivars->segment = (Segment*)INCREF(segment);
 
     return self;
 }
 
 void
 Inverter_destroy(Inverter *self) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
     Inverter_Clear(self);
-    DECREF(self->blank);
-    DECREF(self->entries);
-    DECREF(self->entry_pool);
-    DECREF(self->schema);
-    DECREF(self->segment);
+    DECREF(ivars->blank);
+    DECREF(ivars->entries);
+    DECREF(ivars->entry_pool);
+    DECREF(ivars->schema);
+    DECREF(ivars->segment);
     SUPER_DESTROY(self, INVERTER);
 }
 
 uint32_t
 Inverter_iterate(Inverter *self) {
-    self->tick = -1;
-    if (!self->sorted) {
-        VA_Sort(self->entries, NULL, NULL);
-        self->sorted = true;
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    ivars->tick = -1;
+    if (!ivars->sorted) {
+        VA_Sort(ivars->entries, NULL, NULL);
+        ivars->sorted = true;
     }
-    return VA_Get_Size(self->entries);
+    return VA_Get_Size(ivars->entries);
 }
 
 int32_t
 Inverter_next(Inverter *self) {
-    self->current = (InverterEntry*)VA_Fetch(self->entries, ++self->tick);
-    if (!self->current) { self->current = self->blank; } // Exhausted.
-    return self->current->field_num;
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    ivars->current = (InverterEntry*)VA_Fetch(ivars->entries, ++ivars->tick);
+    if (!ivars->current) { ivars->current = ivars->blank; } // Exhausted.
+    return InvEntry_IVARS(ivars->current)->field_num;
 }
 
 void
 Inverter_set_doc(Inverter *self, Doc *doc) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
     Inverter_Clear(self); // Zap all cached field values and Inversions.
-    self->doc = (Doc*)INCREF(doc);
+    ivars->doc = (Doc*)INCREF(doc);
 }
 
 void
 Inverter_set_boost(Inverter *self, float boost) {
-    self->boost = boost;
+    Inverter_IVARS(self)->boost = boost;
 }
 
 float
 Inverter_get_boost(Inverter *self) {
-    return self->boost;
+    return Inverter_IVARS(self)->boost;
 }
 
 Doc*
 Inverter_get_doc(Inverter *self) {
-    return self->doc;
+    return Inverter_IVARS(self)->doc;
 }
 
 CharBuf*
 Inverter_get_field_name(Inverter *self) {
-    return self->current->field;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->field;
 }
 
 Obj*
 Inverter_get_value(Inverter *self) {
-    return self->current->value;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->value;
 }
 
 FieldType*
 Inverter_get_type(Inverter *self) {
-    return self->current->type;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->type;
 }
 
 Analyzer*
 Inverter_get_analyzer(Inverter *self) {
-    return self->current->analyzer;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->analyzer;
 }
 
 Similarity*
 Inverter_get_similarity(Inverter *self) {
-    return self->current->sim;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->sim;
 }
 
 Inversion*
 Inverter_get_inversion(Inverter *self) {
-    return self->current->inversion;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->inversion;
 }
 
 
 void
 Inverter_add_field(Inverter *self, InverterEntry *entry) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    InverterEntryIVARS *const entry_ivars = InvEntry_IVARS(entry);
+
     // Get an Inversion, going through analyzer if appropriate.
-    if (entry->analyzer) {
-        DECREF(entry->inversion);
-        entry->inversion = Analyzer_Transform_Text(entry->analyzer,
-                                                   (CharBuf*)entry->value);
-        Inversion_Invert(entry->inversion);
+    if (entry_ivars->analyzer) {
+        DECREF(entry_ivars->inversion);
+        entry_ivars->inversion
+            = Analyzer_Transform_Text(entry_ivars->analyzer,
+                                      (CharBuf*)entry_ivars->value);
+        Inversion_Invert(entry_ivars->inversion);
     }
-    else if (entry->indexed || entry->highlightable) {
-        ViewCharBuf *value = (ViewCharBuf*)entry->value;
+    else if (entry_ivars->indexed || entry_ivars->highlightable) {
+        ViewCharBuf *value = (ViewCharBuf*)entry_ivars->value;
         size_t token_len = ViewCB_Get_Size(value);
         Token *seed = Token_new((char*)ViewCB_Get_Ptr8(value),
                                 token_len, 0, token_len, 1.0f, 1);
-        DECREF(entry->inversion);
-        entry->inversion = Inversion_new(seed);
+        DECREF(entry_ivars->inversion);
+        entry_ivars->inversion = Inversion_new(seed);
         DECREF(seed);
-        Inversion_Invert(entry->inversion); // Nearly a no-op.
+        Inversion_Invert(entry_ivars->inversion); // Nearly a no-op.
     }
 
     // Prime the iterator.
-    VA_Push(self->entries, INCREF(entry));
-    self->sorted = false;
+    VA_Push(ivars->entries, INCREF(entry));
+    ivars->sorted = false;
 }
 
 void
 Inverter_clear(Inverter *self) {
-    for (uint32_t i = 0, max = VA_Get_Size(self->entries); i < max; i++) {
-        InvEntry_Clear(VA_Fetch(self->entries, i));
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->entries); i < max; i++) {
+        InvEntry_Clear(VA_Fetch(ivars->entries, i));
     }
-    VA_Clear(self->entries);
-    self->tick = -1;
-    DECREF(self->doc);
-    self->doc = NULL;
+    VA_Clear(ivars->entries);
+    ivars->tick = -1;
+    DECREF(ivars->doc);
+    ivars->doc = NULL;
 }
 
 InverterEntry*
@@ -183,49 +200,50 @@ InvEntry_new(Schema *schema, const CharBuf *field, int32_t field_num) {
 InverterEntry*
 InvEntry_init(InverterEntry *self, Schema *schema, const CharBuf *field,
               int32_t field_num) {
-    self->field_num  = field_num;
-    self->field      = field ? CB_Clone(field) : NULL;
-    self->inversion  = NULL;
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    ivars->field_num  = field_num;
+    ivars->field      = field ? CB_Clone(field) : NULL;
+    ivars->inversion  = NULL;
 
     if (schema) {
-        self->analyzer
+        ivars->analyzer
             = (Analyzer*)INCREF(Schema_Fetch_Analyzer(schema, field));
-        self->sim  = (Similarity*)INCREF(Schema_Fetch_Sim(schema, field));
-        self->type = (FieldType*)INCREF(Schema_Fetch_Type(schema, field));
-        if (!self->type) { THROW(ERR, "Unknown field: '%o'", field); }
+        ivars->sim  = (Similarity*)INCREF(Schema_Fetch_Sim(schema, field));
+        ivars->type = (FieldType*)INCREF(Schema_Fetch_Type(schema, field));
+        if (!ivars->type) { THROW(ERR, "Unknown field: '%o'", field); }
 
-        uint8_t prim_id = FType_Primitive_ID(self->type);
+        uint8_t prim_id = FType_Primitive_ID(ivars->type);
         switch (prim_id & FType_PRIMITIVE_ID_MASK) {
             case FType_TEXT:
-                self->value = (Obj*)ViewCB_new_from_trusted_utf8(NULL, 0);
+                ivars->value = (Obj*)ViewCB_new_from_trusted_utf8(NULL, 0);
                 break;
             case FType_BLOB:
-                self->value = (Obj*)ViewBB_new(NULL, 0);
+                ivars->value = (Obj*)ViewBB_new(NULL, 0);
                 break;
             case FType_INT32:
-                self->value = (Obj*)Int32_new(0);
+                ivars->value = (Obj*)Int32_new(0);
                 break;
             case FType_INT64:
-                self->value = (Obj*)Int64_new(0);
+                ivars->value = (Obj*)Int64_new(0);
                 break;
             case FType_FLOAT32:
-                self->value = (Obj*)Float32_new(0);
+                ivars->value = (Obj*)Float32_new(0);
                 break;
             case FType_FLOAT64:
-                self->value = (Obj*)Float64_new(0);
+                ivars->value = (Obj*)Float64_new(0);
                 break;
             default:
                 THROW(ERR, "Unrecognized primitive id: %i8", prim_id);
         }
 
-        self->indexed = FType_Indexed(self->type);
-        if (self->indexed && FType_Is_A(self->type, NUMERICTYPE)) {
+        ivars->indexed = FType_Indexed(ivars->type);
+        if (ivars->indexed && FType_Is_A(ivars->type, NUMERICTYPE)) {
             THROW(ERR, "Field '%o' spec'd as indexed, but numerical types cannot "
                   "be indexed yet", field);
         }
-        if (FType_Is_A(self->type, FULLTEXTTYPE)) {
-            self->highlightable
-                = FullTextType_Highlightable((FullTextType*)self->type);
+        if (FType_Is_A(ivars->type, FULLTEXTTYPE)) {
+            ivars->highlightable
+                = FullTextType_Highlightable((FullTextType*)ivars->type);
         }
     }
     return self;
@@ -233,26 +251,29 @@ InvEntry_init(InverterEntry *self, Schema *schema, const CharBuf *field,
 
 void
 InvEntry_destroy(InverterEntry *self) {
-    DECREF(self->field);
-    DECREF(self->value);
-    DECREF(self->analyzer);
-    DECREF(self->type);
-    DECREF(self->sim);
-    DECREF(self->inversion);
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    DECREF(ivars->field);
+    DECREF(ivars->value);
+    DECREF(ivars->analyzer);
+    DECREF(ivars->type);
+    DECREF(ivars->sim);
+    DECREF(ivars->inversion);
     SUPER_DESTROY(self, INVERTERENTRY);
 }
 
 void
 InvEntry_clear(InverterEntry *self) {
-    DECREF(self->inversion);
-    self->inversion = NULL;
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    DECREF(ivars->inversion);
+    ivars->inversion = NULL;
 }
 
 int32_t
 InvEntry_compare_to(InverterEntry *self, Obj *other) {
-    InverterEntry *competitor
-        = (InverterEntry*)CERTIFY(other, INVERTERENTRY);
-    return self->field_num - competitor->field_num;
+    CERTIFY(other, INVERTERENTRY);
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    InverterEntryIVARS *const ovars = InvEntry_IVARS((InverterEntry*)other);
+    return ivars->field_num - ovars->field_num;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/LexIndex.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/LexIndex.c b/core/Lucy/Index/LexIndex.c
index dca9adf..3d3200e 100644
--- a/core/Lucy/Index/LexIndex.c
+++ b/core/Lucy/Index/LexIndex.c
@@ -50,41 +50,42 @@ LexIndex_init(LexIndex *self, Schema *schema, Folder *folder,
 
     // Init.
     Lex_init((Lexicon*)self, field);
-    self->tinfo        = TInfo_new(0);
-    self->tick         = 0;
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    ivars->tinfo        = TInfo_new(0);
+    ivars->tick         = 0;
 
     // Derive
-    self->field_type = Schema_Fetch_Type(schema, field);
-    if (!self->field_type) {
+    ivars->field_type = Schema_Fetch_Type(schema, field);
+    if (!ivars->field_type) {
         CharBuf *mess = MAKE_MESS("Unknown field: '%o'", field);
         DECREF(ix_file);
         DECREF(ixix_file);
         DECREF(self);
         Err_throw_mess(ERR, mess);
     }
-    INCREF(self->field_type);
-    self->term_stepper = FType_Make_Term_Stepper(self->field_type);
-    self->ixix_in = Folder_Open_In(folder, ixix_file);
-    if (!self->ixix_in) {
+    INCREF(ivars->field_type);
+    ivars->term_stepper = FType_Make_Term_Stepper(ivars->field_type);
+    ivars->ixix_in = Folder_Open_In(folder, ixix_file);
+    if (!ivars->ixix_in) {
         Err *error = (Err*)INCREF(Err_get_error());
         DECREF(ix_file);
         DECREF(ixix_file);
         DECREF(self);
         RETHROW(error);
     }
-    self->ix_in = Folder_Open_In(folder, ix_file);
-    if (!self->ix_in) {
+    ivars->ix_in = Folder_Open_In(folder, ix_file);
+    if (!ivars->ix_in) {
         Err *error = (Err*)INCREF(Err_get_error());
         DECREF(ix_file);
         DECREF(ixix_file);
         DECREF(self);
         RETHROW(error);
     }
-    self->index_interval = Arch_Index_Interval(arch);
-    self->skip_interval  = Arch_Skip_Interval(arch);
-    self->size    = (int32_t)(InStream_Length(self->ixix_in) / sizeof(int64_t));
-    self->offsets = (int64_t*)InStream_Buf(self->ixix_in,
-                                           (size_t)InStream_Length(self->ixix_in));
+    ivars->index_interval = Arch_Index_Interval(arch);
+    ivars->skip_interval  = Arch_Skip_Interval(arch);
+    ivars->size    = (int32_t)(InStream_Length(ivars->ixix_in) / sizeof(int64_t));
+    ivars->offsets = (int64_t*)InStream_Buf(ivars->ixix_in,
+                                           (size_t)InStream_Length(ivars->ixix_in));
 
     DECREF(ixix_file);
     DECREF(ix_file);
@@ -94,55 +95,60 @@ LexIndex_init(LexIndex *self, Schema *schema, Folder *folder,
 
 void
 LexIndex_destroy(LexIndex *self) {
-    DECREF(self->field_type);
-    DECREF(self->ixix_in);
-    DECREF(self->ix_in);
-    DECREF(self->term_stepper);
-    DECREF(self->tinfo);
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    DECREF(ivars->field_type);
+    DECREF(ivars->ixix_in);
+    DECREF(ivars->ix_in);
+    DECREF(ivars->term_stepper);
+    DECREF(ivars->tinfo);
     SUPER_DESTROY(self, LEXINDEX);
 }
 
 int32_t
 LexIndex_get_term_num(LexIndex *self) {
-    return (self->index_interval * self->tick) - 1;
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    return (ivars->index_interval * ivars->tick) - 1;
 }
 
 Obj*
 LexIndex_get_term(LexIndex *self) {
-    return TermStepper_Get_Value(self->term_stepper);
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    return TermStepper_Get_Value(ivars->term_stepper);
 }
 
 TermInfo*
 LexIndex_get_term_info(LexIndex *self) {
-    return self->tinfo;
+    return LexIndex_IVARS(self)->tinfo;
 }
 
 static void
 S_read_entry(LexIndex *self) {
-    InStream *ix_in  = self->ix_in;
-    TermInfo *tinfo  = self->tinfo;
-    int64_t offset = (int64_t)NumUtil_decode_bigend_u64(self->offsets + self->tick);
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    InStream *ix_in  = ivars->ix_in;
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS(ivars->tinfo);
+    int64_t offset = (int64_t)NumUtil_decode_bigend_u64(ivars->offsets + ivars->tick);
     InStream_Seek(ix_in, offset);
-    TermStepper_Read_Key_Frame(self->term_stepper, ix_in);
-    tinfo->doc_freq     = InStream_Read_C32(ix_in);
-    tinfo->post_filepos = InStream_Read_C64(ix_in);
-    tinfo->skip_filepos = tinfo->doc_freq >= self->skip_interval
+    TermStepper_Read_Key_Frame(ivars->term_stepper, ix_in);
+    tinfo_ivars->doc_freq     = InStream_Read_C32(ix_in);
+    tinfo_ivars->post_filepos = InStream_Read_C64(ix_in);
+    tinfo_ivars->skip_filepos = tinfo_ivars->doc_freq >= ivars->skip_interval
                           ? InStream_Read_C64(ix_in)
                           : 0;
-    tinfo->lex_filepos  = InStream_Read_C64(ix_in);
+    tinfo_ivars->lex_filepos  = InStream_Read_C64(ix_in);
 }
 
 void
 LexIndex_seek(LexIndex *self, Obj *target) {
-    TermStepper *term_stepper = self->term_stepper;
-    InStream    *ix_in        = self->ix_in;
-    FieldType   *type         = self->field_type;
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    TermStepper *term_stepper = ivars->term_stepper;
+    InStream    *ix_in        = ivars->ix_in;
+    FieldType   *type         = ivars->field_type;
     int32_t      lo           = 0;
-    int32_t      hi           = self->size - 1;
+    int32_t      hi           = ivars->size - 1;
     int32_t      result       = -100;
 
-    if (target == NULL || self->size == 0) {
-        self->tick = 0;
+    if (target == NULL || ivars->size == 0) {
+        ivars->tick = 0;
         return;
     }
     else {
@@ -163,12 +169,12 @@ LexIndex_seek(LexIndex *self, Obj *target) {
     while (hi >= lo) {
         const int32_t mid = lo + ((hi - lo) / 2);
         const int64_t offset
-            = (int64_t)NumUtil_decode_bigend_u64(self->offsets + mid);
+            = (int64_t)NumUtil_decode_bigend_u64(ivars->offsets + mid);
         InStream_Seek(ix_in, offset);
         TermStepper_Read_Key_Frame(term_stepper, ix_in);
 
         // Compare values.  There is no need for a NULL-check because the term
-        // number is alway between 0 and self->size - 1.
+        // number is alway between 0 and ivars->size - 1.
         Obj *value = TermStepper_Get_Value(term_stepper);
         int32_t comparison = FType_Compare_Values(type, target, value);
 
@@ -185,7 +191,7 @@ LexIndex_seek(LexIndex *self, Obj *target) {
     }
 
     // Record the index of the entry we've seeked to, then read entry.
-    self->tick = hi == -1 // indicating that target lt first entry
+    ivars->tick = hi == -1 // indicating that target lt first entry
                  ? 0
                  : result == -100 // if result is still -100, it wasn't set
                  ? hi

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/Lexicon.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Lexicon.c b/core/Lucy/Index/Lexicon.c
index c26b1e0..d1fd76a 100644
--- a/core/Lucy/Index/Lexicon.c
+++ b/core/Lucy/Index/Lexicon.c
@@ -21,19 +21,21 @@
 
 Lexicon*
 Lex_init(Lexicon *self, const CharBuf *field) {
-    self->field = CB_Clone(field);
+    LexiconIVARS *const ivars = Lex_IVARS(self);
+    ivars->field = CB_Clone(field);
     ABSTRACT_CLASS_CHECK(self, LEXICON);
     return self;
 }
 
 CharBuf*
 Lex_get_field(Lexicon *self) {
-    return self->field;
+    return Lex_IVARS(self)->field;
 }
 
 void
 Lex_destroy(Lexicon *self) {
-    DECREF(self->field);
+    LexiconIVARS *const ivars = Lex_IVARS(self);
+    DECREF(ivars->field);
     SUPER_DESTROY(self, LEXICON);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/LexiconReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/LexiconReader.c b/core/Lucy/Index/LexiconReader.c
index 9a82ef6..e08bea2 100644
--- a/core/Lucy/Index/LexiconReader.c
+++ b/core/Lucy/Index/LexiconReader.c
@@ -61,27 +61,30 @@ PolyLexReader_init(PolyLexiconReader *self, VArray *readers,
         if (!schema) { schema = LexReader_Get_Schema(reader); }
     }
     LexReader_init((LexiconReader*)self, schema, NULL, NULL, NULL, -1);
-    self->readers = (VArray*)INCREF(readers);
-    self->offsets = (I32Array*)INCREF(offsets);
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+    ivars->readers = (VArray*)INCREF(readers);
+    ivars->offsets = (I32Array*)INCREF(offsets);
     return self;
 }
 
 void
 PolyLexReader_close(PolyLexiconReader *self) {
-    if (self->readers) {
-        for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+    if (ivars->readers) {
+        for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
             LexiconReader *reader
-                = (LexiconReader*)VA_Fetch(self->readers, i);
+                = (LexiconReader*)VA_Fetch(ivars->readers, i);
             if (reader) { LexReader_Close(reader); }
         }
-        VA_Clear(self->readers);
+        VA_Clear(ivars->readers);
     }
 }
 
 void
 PolyLexReader_destroy(PolyLexiconReader *self) {
-    DECREF(self->readers);
-    DECREF(self->offsets);
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+    DECREF(ivars->readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYLEXICONREADER);
 }
 
@@ -94,7 +97,8 @@ PolyLexReader_lexicon(PolyLexiconReader *self, const CharBuf *field,
         Schema *schema = PolyLexReader_Get_Schema(self);
         FieldType *type = Schema_Fetch_Type(schema, field);
         if (type != NULL) {
-            lexicon = PolyLex_new(field, self->readers);
+            PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+            lexicon = PolyLex_new(field, ivars->readers);
             if (!PolyLex_Get_Num_Seg_Lexicons(lexicon)) {
                 DECREF(lexicon);
                 return NULL;
@@ -109,9 +113,10 @@ PolyLexReader_lexicon(PolyLexiconReader *self, const CharBuf *field,
 uint32_t
 PolyLexReader_doc_freq(PolyLexiconReader *self, const CharBuf *field,
                        Obj *term) {
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
     uint32_t doc_freq = 0;
-    for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
-        LexiconReader *reader = (LexiconReader*)VA_Fetch(self->readers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
+        LexiconReader *reader = (LexiconReader*)VA_Fetch(ivars->readers, i);
         if (reader) {
             doc_freq += LexReader_Doc_Freq(reader, field, term);
         }
@@ -157,15 +162,16 @@ DefLexReader_init(DefaultLexiconReader *self, Schema *schema, Folder *folder,
     // Init.
     LexReader_init((LexiconReader*)self, schema, folder, snapshot, segments,
                    seg_tick);
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
     Segment *segment = DefLexReader_Get_Segment(self);
 
     // Build an array of SegLexicon objects.
-    self->lexicons = VA_new(Schema_Num_Fields(schema));
+    ivars->lexicons = VA_new(Schema_Num_Fields(schema));
     for (uint32_t i = 1, max = Schema_Num_Fields(schema) + 1; i < max; i++) {
         CharBuf *field = Seg_Field_Name(segment, i);
         if (field && S_has_data(schema, folder, segment, field)) {
             SegLexicon *lexicon = SegLex_new(schema, folder, segment, field);
-            VA_Store(self->lexicons, i, (Obj*)lexicon);
+            VA_Store(ivars->lexicons, i, (Obj*)lexicon);
         }
     }
 
@@ -174,26 +180,29 @@ DefLexReader_init(DefaultLexiconReader *self, Schema *schema, Folder *folder,
 
 void
 DefLexReader_close(DefaultLexiconReader *self) {
-    DECREF(self->lexicons);
-    self->lexicons = NULL;
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
+    DECREF(ivars->lexicons);
+    ivars->lexicons = NULL;
 }
 
 void
 DefLexReader_destroy(DefaultLexiconReader *self) {
-    DECREF(self->lexicons);
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
+    DECREF(ivars->lexicons);
     SUPER_DESTROY(self, DEFAULTLEXICONREADER);
 }
 
 Lexicon*
 DefLexReader_lexicon(DefaultLexiconReader *self, const CharBuf *field,
                      Obj *term) {
-    int32_t     field_num = Seg_Field_Num(self->segment, field);
-    SegLexicon *orig      = (SegLexicon*)VA_Fetch(self->lexicons, field_num);
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
+    int32_t     field_num = Seg_Field_Num(ivars->segment, field);
+    SegLexicon *orig      = (SegLexicon*)VA_Fetch(ivars->lexicons, field_num);
     SegLexicon *lexicon   = NULL;
 
     if (orig) { // i.e. has data
         lexicon
-            = SegLex_new(self->schema, self->folder, self->segment, field);
+            = SegLex_new(ivars->schema, ivars->folder, ivars->segment, field);
         SegLex_Seek(lexicon, term);
     }
 
@@ -202,10 +211,11 @@ DefLexReader_lexicon(DefaultLexiconReader *self, const CharBuf *field,
 
 static TermInfo*
 S_find_tinfo(DefaultLexiconReader *self, const CharBuf *field, Obj *target) {
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
     if (field != NULL && target != NULL) {
-        int32_t field_num = Seg_Field_Num(self->segment, field);
+        int32_t field_num = Seg_Field_Num(ivars->segment, field);
         SegLexicon *lexicon
-            = (SegLexicon*)VA_Fetch(self->lexicons, field_num);
+            = (SegLexicon*)VA_Fetch(ivars->lexicons, field_num);
 
         if (lexicon) {
             // Iterate until the result is ge the term.

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/LexiconWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/LexiconWriter.c b/core/Lucy/Index/LexiconWriter.c
index 9a65fa3..115771e 100644
--- a/core/Lucy/Index/LexiconWriter.c
+++ b/core/Lucy/Index/LexiconWriter.c
@@ -46,79 +46,85 @@ LexWriter_init(LexiconWriter *self, Schema *schema, Snapshot *snapshot,
     Architecture *arch = Schema_Get_Architecture(schema);
 
     DataWriter_init((DataWriter*)self, schema, snapshot, segment, polyreader);
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
 
     // Assign.
-    self->index_interval = Arch_Index_Interval(arch);
-    self->skip_interval  = Arch_Skip_Interval(arch);
+    ivars->index_interval = Arch_Index_Interval(arch);
+    ivars->skip_interval  = Arch_Skip_Interval(arch);
 
     // Init.
-    self->ix_out             = NULL;
-    self->ixix_out           = NULL;
-    self->dat_out            = NULL;
-    self->count              = 0;
-    self->ix_count           = 0;
-    self->dat_file           = CB_new(30);
-    self->ix_file            = CB_new(30);
-    self->ixix_file          = CB_new(30);
-    self->counts             = Hash_new(0);
-    self->ix_counts          = Hash_new(0);
-    self->temp_mode          = false;
-    self->term_stepper       = NULL;
-    self->tinfo_stepper      = (TermStepper*)MatchTInfoStepper_new(schema);
+    ivars->ix_out             = NULL;
+    ivars->ixix_out           = NULL;
+    ivars->dat_out            = NULL;
+    ivars->count              = 0;
+    ivars->ix_count           = 0;
+    ivars->dat_file           = CB_new(30);
+    ivars->ix_file            = CB_new(30);
+    ivars->ixix_file          = CB_new(30);
+    ivars->counts             = Hash_new(0);
+    ivars->ix_counts          = Hash_new(0);
+    ivars->temp_mode          = false;
+    ivars->term_stepper       = NULL;
+    ivars->tinfo_stepper      = (TermStepper*)MatchTInfoStepper_new(schema);
 
     return self;
 }
 
 void
 LexWriter_destroy(LexiconWriter *self) {
-    DECREF(self->term_stepper);
-    DECREF(self->tinfo_stepper);
-    DECREF(self->dat_file);
-    DECREF(self->ix_file);
-    DECREF(self->ixix_file);
-    DECREF(self->dat_out);
-    DECREF(self->ix_out);
-    DECREF(self->ixix_out);
-    DECREF(self->counts);
-    DECREF(self->ix_counts);
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    DECREF(ivars->term_stepper);
+    DECREF(ivars->tinfo_stepper);
+    DECREF(ivars->dat_file);
+    DECREF(ivars->ix_file);
+    DECREF(ivars->ixix_file);
+    DECREF(ivars->dat_out);
+    DECREF(ivars->ix_out);
+    DECREF(ivars->ixix_out);
+    DECREF(ivars->counts);
+    DECREF(ivars->ix_counts);
     SUPER_DESTROY(self, LEXICONWRITER);
 }
 
 static void
 S_add_last_term_to_ix(LexiconWriter *self) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+
     // Write file pointer to index record.
-    OutStream_Write_I64(self->ixix_out, OutStream_Tell(self->ix_out));
+    OutStream_Write_I64(ivars->ixix_out, OutStream_Tell(ivars->ix_out));
 
     // Write term and file pointer to main record.  Track count of terms added
     // to ix.
-    TermStepper_Write_Key_Frame(self->term_stepper,
-                                self->ix_out, TermStepper_Get_Value(self->term_stepper));
-    TermStepper_Write_Key_Frame(self->tinfo_stepper,
-                                self->ix_out, TermStepper_Get_Value(self->tinfo_stepper));
-    OutStream_Write_C64(self->ix_out, OutStream_Tell(self->dat_out));
-    self->ix_count++;
+    TermStepper_Write_Key_Frame(ivars->term_stepper,
+                                ivars->ix_out, TermStepper_Get_Value(ivars->term_stepper));
+    TermStepper_Write_Key_Frame(ivars->tinfo_stepper,
+                                ivars->ix_out, TermStepper_Get_Value(ivars->tinfo_stepper));
+    OutStream_Write_C64(ivars->ix_out, OutStream_Tell(ivars->dat_out));
+    ivars->ix_count++;
 }
 
 void
 LexWriter_add_term(LexiconWriter* self, CharBuf* term_text, TermInfo* tinfo) {
-    OutStream *dat_out = self->dat_out;
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    OutStream *dat_out = ivars->dat_out;
 
-    if ((self->count % self->index_interval == 0)
-        && !self->temp_mode
+    if ((ivars->count % ivars->index_interval == 0)
+        && !ivars->temp_mode
        ) {
         // Write a subset of entries to lexicon.ix.
         S_add_last_term_to_ix(self);
     }
 
-    TermStepper_Write_Delta(self->term_stepper, dat_out, (Obj*)term_text);
-    TermStepper_Write_Delta(self->tinfo_stepper, dat_out, (Obj*)tinfo);
+    TermStepper_Write_Delta(ivars->term_stepper, dat_out, (Obj*)term_text);
+    TermStepper_Write_Delta(ivars->tinfo_stepper, dat_out, (Obj*)tinfo);
 
     // Track number of terms.
-    self->count++;
+    ivars->count++;
 }
 
 void
 LexWriter_start_field(LexiconWriter *self, int32_t field_num) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
     Segment   *const segment  = LexWriter_Get_Segment(self);
     Folder    *const folder   = LexWriter_Get_Folder(self);
     Schema    *const schema   = LexWriter_Get_Schema(self);
@@ -127,104 +133,110 @@ LexWriter_start_field(LexiconWriter *self, int32_t field_num) {
     FieldType *const type     = Schema_Fetch_Type(schema, field);
 
     // Open outstreams.
-    CB_setf(self->dat_file,  "%o/lexicon-%i32.dat",  seg_name, field_num);
-    CB_setf(self->ix_file,   "%o/lexicon-%i32.ix",   seg_name, field_num);
-    CB_setf(self->ixix_file, "%o/lexicon-%i32.ixix", seg_name, field_num);
-    self->dat_out = Folder_Open_Out(folder, self->dat_file);
-    if (!self->dat_out) { RETHROW(INCREF(Err_get_error())); }
-    self->ix_out = Folder_Open_Out(folder, self->ix_file);
-    if (!self->ix_out) { RETHROW(INCREF(Err_get_error())); }
-    self->ixix_out = Folder_Open_Out(folder, self->ixix_file);
-    if (!self->ixix_out) { RETHROW(INCREF(Err_get_error())); }
+    CB_setf(ivars->dat_file,  "%o/lexicon-%i32.dat",  seg_name, field_num);
+    CB_setf(ivars->ix_file,   "%o/lexicon-%i32.ix",   seg_name, field_num);
+    CB_setf(ivars->ixix_file, "%o/lexicon-%i32.ixix", seg_name, field_num);
+    ivars->dat_out = Folder_Open_Out(folder, ivars->dat_file);
+    if (!ivars->dat_out) { RETHROW(INCREF(Err_get_error())); }
+    ivars->ix_out = Folder_Open_Out(folder, ivars->ix_file);
+    if (!ivars->ix_out) { RETHROW(INCREF(Err_get_error())); }
+    ivars->ixix_out = Folder_Open_Out(folder, ivars->ixix_file);
+    if (!ivars->ixix_out) { RETHROW(INCREF(Err_get_error())); }
 
     // Initialize count and ix_count, term stepper and term info stepper.
-    self->count    = 0;
-    self->ix_count = 0;
-    self->term_stepper = FType_Make_Term_Stepper(type);
-    TermStepper_Reset(self->tinfo_stepper);
+    ivars->count    = 0;
+    ivars->ix_count = 0;
+    ivars->term_stepper = FType_Make_Term_Stepper(type);
+    TermStepper_Reset(ivars->tinfo_stepper);
 }
 
 void
 LexWriter_finish_field(LexiconWriter *self, int32_t field_num) {
-    CharBuf *field = Seg_Field_Name(self->segment, field_num);
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    CharBuf *field = Seg_Field_Name(ivars->segment, field_num);
 
     // Store count of terms for this field as metadata.
-    Hash_Store(self->counts, (Obj*)field,
-               (Obj*)CB_newf("%i32", self->count));
-    Hash_Store(self->ix_counts, (Obj*)field,
-               (Obj*)CB_newf("%i32", self->ix_count));
+    Hash_Store(ivars->counts, (Obj*)field,
+               (Obj*)CB_newf("%i32", ivars->count));
+    Hash_Store(ivars->ix_counts, (Obj*)field,
+               (Obj*)CB_newf("%i32", ivars->ix_count));
 
     // Close streams.
-    OutStream_Close(self->dat_out);
-    OutStream_Close(self->ix_out);
-    OutStream_Close(self->ixix_out);
-    DECREF(self->dat_out);
-    DECREF(self->ix_out);
-    DECREF(self->ixix_out);
-    self->dat_out  = NULL;
-    self->ix_out   = NULL;
-    self->ixix_out = NULL;
+    OutStream_Close(ivars->dat_out);
+    OutStream_Close(ivars->ix_out);
+    OutStream_Close(ivars->ixix_out);
+    DECREF(ivars->dat_out);
+    DECREF(ivars->ix_out);
+    DECREF(ivars->ixix_out);
+    ivars->dat_out  = NULL;
+    ivars->ix_out   = NULL;
+    ivars->ixix_out = NULL;
 
     // Close term stepper.
-    DECREF(self->term_stepper);
-    self->term_stepper = NULL;
+    DECREF(ivars->term_stepper);
+    ivars->term_stepper = NULL;
 }
 
 void
 LexWriter_enter_temp_mode(LexiconWriter *self, const CharBuf *field,
                           OutStream *temp_outstream) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
     Schema    *schema = LexWriter_Get_Schema(self);
     FieldType *type   = Schema_Fetch_Type(schema, field);
 
     // Assign outstream.
-    if (self->dat_out != NULL) {
-        THROW(ERR, "Can't enter temp mode (filename: %o) ", self->dat_file);
+    if (ivars->dat_out != NULL) {
+        THROW(ERR, "Can't enter temp mode (filename: %o) ", ivars->dat_file);
     }
-    self->dat_out = (OutStream*)INCREF(temp_outstream);
+    ivars->dat_out = (OutStream*)INCREF(temp_outstream);
 
     // Initialize count and ix_count, term stepper and term info stepper.
-    self->count    = 0;
-    self->ix_count = 0;
-    self->term_stepper = FType_Make_Term_Stepper(type);
-    TermStepper_Reset(self->tinfo_stepper);
+    ivars->count    = 0;
+    ivars->ix_count = 0;
+    ivars->term_stepper = FType_Make_Term_Stepper(type);
+    TermStepper_Reset(ivars->tinfo_stepper);
 
     // Remember that we're in temp mode.
-    self->temp_mode = true;
+    ivars->temp_mode = true;
 }
 
 void
 LexWriter_leave_temp_mode(LexiconWriter *self) {
-    DECREF(self->term_stepper);
-    self->term_stepper = NULL;
-    DECREF(self->dat_out);
-    self->dat_out   = NULL;
-    self->temp_mode = false;
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    DECREF(ivars->term_stepper);
+    ivars->term_stepper = NULL;
+    DECREF(ivars->dat_out);
+    ivars->dat_out   = NULL;
+    ivars->temp_mode = false;
 }
 
 void
 LexWriter_finish(LexiconWriter *self) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+
     // Ensure that streams were closed (by calling Finish_Field or
     // Leave_Temp_Mode).
-    if (self->dat_out != NULL) {
-        THROW(ERR, "File '%o' never closed", self->dat_file);
+    if (ivars->dat_out != NULL) {
+        THROW(ERR, "File '%o' never closed", ivars->dat_file);
     }
-    else if (self->ix_out != NULL) {
-        THROW(ERR, "File '%o' never closed", self->ix_file);
+    else if (ivars->ix_out != NULL) {
+        THROW(ERR, "File '%o' never closed", ivars->ix_file);
     }
-    else if (self->ix_out != NULL) {
-        THROW(ERR, "File '%o' never closed", self->ix_file);
+    else if (ivars->ix_out != NULL) {
+        THROW(ERR, "File '%o' never closed", ivars->ix_file);
     }
 
     // Store metadata.
-    Seg_Store_Metadata_Str(self->segment, "lexicon", 7,
+    Seg_Store_Metadata_Str(ivars->segment, "lexicon", 7,
                            (Obj*)LexWriter_Metadata(self));
 }
 
 Hash*
 LexWriter_metadata(LexiconWriter *self) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
     Hash *const metadata  = DataWriter_metadata((DataWriter*)self);
-    Hash *const counts    = (Hash*)INCREF(self->counts);
-    Hash *const ix_counts = (Hash*)INCREF(self->ix_counts);
+    Hash *const counts    = (Hash*)INCREF(ivars->counts);
+    Hash *const ix_counts = (Hash*)INCREF(ivars->ix_counts);
 
     // Placeholders.
     if (Hash_Get_Size(counts) == 0) {

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/PolyLexicon.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PolyLexicon.c b/core/Lucy/Index/PolyLexicon.c
index 52553a2..713fda2 100644
--- a/core/Lucy/Index/PolyLexicon.c
+++ b/core/Lucy/Index/PolyLexicon.c
@@ -41,8 +41,9 @@ PolyLex_init(PolyLexicon *self, const CharBuf *field, VArray *sub_readers) {
 
     // Init.
     Lex_init((Lexicon*)self, field);
-    self->term            = NULL;
-    self->lex_q           = SegLexQ_new(num_sub_readers);
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    ivars->term            = NULL;
+    ivars->lex_q           = SegLexQ_new(num_sub_readers);
 
     // Derive.
     for (uint32_t i = 0; i < num_sub_readers; i++) {
@@ -54,7 +55,7 @@ PolyLex_init(PolyLexicon *self, const CharBuf *field, VArray *sub_readers) {
             }
         }
     }
-    self->seg_lexicons  = seg_lexicons;
+    ivars->seg_lexicons  = seg_lexicons;
 
     PolyLex_Reset(self);
 
@@ -63,9 +64,10 @@ PolyLex_init(PolyLexicon *self, const CharBuf *field, VArray *sub_readers) {
 
 void
 PolyLex_destroy(PolyLexicon *self) {
-    DECREF(self->seg_lexicons);
-    DECREF(self->lex_q);
-    DECREF(self->term);
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    DECREF(ivars->seg_lexicons);
+    DECREF(ivars->lex_q);
+    DECREF(ivars->term);
     SUPER_DESTROY(self, POLYLEXICON);
 }
 
@@ -91,9 +93,10 @@ S_refresh_lex_q(SegLexQueue *lex_q, VArray *seg_lexicons, Obj *target) {
 
 void
 PolyLex_reset(PolyLexicon *self) {
-    VArray *seg_lexicons = self->seg_lexicons;
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    VArray *seg_lexicons = ivars->seg_lexicons;
     uint32_t num_segs = VA_Get_Size(seg_lexicons);
-    SegLexQueue *lex_q = self->lex_q;
+    SegLexQueue *lex_q = ivars->lex_q;
 
     // Empty out the queue.
     while (1) {
@@ -108,30 +111,31 @@ PolyLex_reset(PolyLexicon *self) {
             = (SegLexicon*)VA_Fetch(seg_lexicons, i);
         SegLex_Reset(seg_lexicon);
         if (SegLex_Next(seg_lexicon)) {
-            SegLexQ_Insert(self->lex_q, INCREF(seg_lexicon));
+            SegLexQ_Insert(ivars->lex_q, INCREF(seg_lexicon));
         }
     }
 
-    if (self->term != NULL) {
-        DECREF(self->term);
-        self->term = NULL;
+    if (ivars->term != NULL) {
+        DECREF(ivars->term);
+        ivars->term = NULL;
     }
 }
 
 bool
 PolyLex_next(PolyLexicon *self) {
-    SegLexQueue *lex_q = self->lex_q;
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    SegLexQueue *lex_q = ivars->lex_q;
     SegLexicon *top_seg_lexicon = (SegLexicon*)SegLexQ_Peek(lex_q);
 
     // Churn through queue items with equal terms.
     while (top_seg_lexicon != NULL) {
         Obj *const candidate = SegLex_Get_Term(top_seg_lexicon);
-        if ((candidate && !self->term)
-            || Obj_Compare_To(self->term, candidate) != 0
+        if ((candidate && !ivars->term)
+            || Obj_Compare_To(ivars->term, candidate) != 0
            ) {
             // Succeed if the next item in the queue has a different term.
-            DECREF(self->term);
-            self->term = Obj_Clone(candidate);
+            DECREF(ivars->term);
+            ivars->term = Obj_Clone(candidate);
             return true;
         }
         else {
@@ -145,15 +149,16 @@ PolyLex_next(PolyLexicon *self) {
     }
 
     // If queue is empty, iterator is finished.
-    DECREF(self->term);
-    self->term = NULL;
+    DECREF(ivars->term);
+    ivars->term = NULL;
     return false;
 }
 
 void
 PolyLex_seek(PolyLexicon *self, Obj *target) {
-    VArray *seg_lexicons = self->seg_lexicons;
-    SegLexQueue *lex_q = self->lex_q;
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    VArray *seg_lexicons = ivars->seg_lexicons;
+    SegLexQueue *lex_q = ivars->lex_q;
 
     if (target == NULL) {
         PolyLex_Reset(self);
@@ -163,17 +168,17 @@ PolyLex_seek(PolyLexicon *self, Obj *target) {
     // Refresh the queue, set vars.
     S_refresh_lex_q(lex_q, seg_lexicons, target);
     SegLexicon *least = (SegLexicon*)SegLexQ_Peek(lex_q);
-    DECREF(self->term);
-    self->term = NULL;
+    DECREF(ivars->term);
+    ivars->term = NULL;
     if (least) {
         Obj *least_term = SegLex_Get_Term(least);
-        self->term = least_term ? Obj_Clone(least_term) : NULL;
+        ivars->term = least_term ? Obj_Clone(least_term) : NULL;
     }
 
     // Scan up to the real target.
     do {
-        if (self->term) {
-            const int32_t comparison = Obj_Compare_To(self->term, target);
+        if (ivars->term) {
+            const int32_t comparison = Obj_Compare_To(ivars->term, target);
             if (comparison >= 0) { break; }
         }
     } while (PolyLex_Next(self));
@@ -181,12 +186,13 @@ PolyLex_seek(PolyLexicon *self, Obj *target) {
 
 Obj*
 PolyLex_get_term(PolyLexicon *self) {
-    return self->term;
+    return PolyLex_IVARS(self)->term;
 }
 
 uint32_t
 PolyLex_get_num_seg_lexicons(PolyLexicon *self) {
-    return VA_Get_Size(self->seg_lexicons);
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    return VA_Get_Size(ivars->seg_lexicons);
 }
 
 SegLexQueue*

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/PolyReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PolyReader.c b/core/Lucy/Index/PolyReader.c
index ba64e16..92f198b 100644
--- a/core/Lucy/Index/PolyReader.c
+++ b/core/Lucy/Index/PolyReader.c
@@ -100,23 +100,24 @@ S_first_non_null(VArray *array) {
 
 static void
 S_init_sub_readers(PolyReader *self, VArray *sub_readers) {
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
     uint32_t  num_sub_readers = VA_Get_Size(sub_readers);
     int32_t *starts = (int32_t*)MALLOCATE(num_sub_readers * sizeof(int32_t));
     Hash  *data_readers = Hash_new(0);
 
-    DECREF(self->sub_readers);
-    DECREF(self->offsets);
-    self->sub_readers       = (VArray*)INCREF(sub_readers);
+    DECREF(ivars->sub_readers);
+    DECREF(ivars->offsets);
+    ivars->sub_readers       = (VArray*)INCREF(sub_readers);
 
     // Accumulate doc_max, subreader start offsets, and DataReaders.
-    self->doc_max = 0;
+    ivars->doc_max = 0;
     for (uint32_t i = 0; i < num_sub_readers; i++) {
         SegReader *seg_reader = (SegReader*)VA_Fetch(sub_readers, i);
         Hash *components = SegReader_Get_Components(seg_reader);
         CharBuf *api;
         DataReader *component;
-        starts[i] = self->doc_max;
-        self->doc_max += SegReader_Doc_Max(seg_reader);
+        starts[i] = ivars->doc_max;
+        ivars->doc_max += SegReader_Doc_Max(seg_reader);
         Hash_Iterate(components);
         while (Hash_Next(components, (Obj**)&api, (Obj**)&component)) {
             VArray *readers = (VArray*)Hash_Fetch(data_readers, (Obj*)api);
@@ -127,7 +128,7 @@ S_init_sub_readers(PolyReader *self, VArray *sub_readers) {
             VA_Store(readers, i, INCREF(component));
         }
     }
-    self->offsets = I32Arr_new_steal(starts, num_sub_readers);
+    ivars->offsets = I32Arr_new_steal(starts, num_sub_readers);
 
     CharBuf *api;
     VArray  *readers;
@@ -136,26 +137,27 @@ S_init_sub_readers(PolyReader *self, VArray *sub_readers) {
         DataReader *datareader
             = (DataReader*)CERTIFY(S_first_non_null(readers), DATAREADER);
         DataReader *aggregator
-            = DataReader_Aggregator(datareader, readers, self->offsets);
+            = DataReader_Aggregator(datareader, readers, ivars->offsets);
         if (aggregator) {
             CERTIFY(aggregator, DATAREADER);
-            Hash_Store(self->components, (Obj*)api, (Obj*)aggregator);
+            Hash_Store(ivars->components, (Obj*)api, (Obj*)aggregator);
         }
     }
     DECREF(data_readers);
 
     DeletionsReader *del_reader
         = (DeletionsReader*)Hash_Fetch(
-              self->components, (Obj*)VTable_Get_Name(DELETIONSREADER));
-    self->del_count = del_reader ? DelReader_Del_Count(del_reader) : 0;
+              ivars->components, (Obj*)VTable_Get_Name(DELETIONSREADER));
+    ivars->del_count = del_reader ? DelReader_Del_Count(del_reader) : 0;
 }
 
 PolyReader*
 PolyReader_init(PolyReader *self, Schema *schema, Folder *folder,
                 Snapshot *snapshot, IndexManager *manager,
                 VArray *sub_readers) {
-    self->doc_max    = 0;
-    self->del_count  = 0;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    ivars->doc_max    = 0;
+    ivars->del_count  = 0;
 
     if (sub_readers) {
         uint32_t num_segs = VA_Get_Size(sub_readers);
@@ -173,8 +175,8 @@ PolyReader_init(PolyReader *self, Schema *schema, Folder *folder,
     else {
         IxReader_init((IndexReader*)self, schema, folder, snapshot,
                       NULL, -1, manager);
-        self->sub_readers = VA_new(0);
-        self->offsets = I32Arr_new_steal(NULL, 0);
+        ivars->sub_readers = VA_new(0);
+        ivars->offsets = I32Arr_new_steal(NULL, 0);
     }
 
     return self;
@@ -182,10 +184,11 @@ PolyReader_init(PolyReader *self, Schema *schema, Folder *folder,
 
 void
 PolyReader_close(PolyReader *self) {
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
     PolyReader_Close_t super_close
         = SUPER_METHOD_PTR(POLYREADER, Lucy_PolyReader_Close);
-    for (uint32_t i = 0, max = VA_Get_Size(self->sub_readers); i < max; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->sub_readers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->sub_readers); i < max; i++) {
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->sub_readers, i);
         SegReader_Close(seg_reader);
     }
     super_close(self);
@@ -193,8 +196,9 @@ PolyReader_close(PolyReader *self) {
 
 void
 PolyReader_destroy(PolyReader *self) {
-    DECREF(self->sub_readers);
-    DECREF(self->offsets);
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    DECREF(ivars->sub_readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYREADER);
 }
 
@@ -218,7 +222,8 @@ S_try_open_elements(void *context) {
     struct try_open_elements_context *args
         = (struct try_open_elements_context*)context;
     PolyReader *self              = args->self;
-    VArray     *files             = Snapshot_List(self->snapshot);
+    PolyReaderIVARS *const ivars  = PolyReader_IVARS(self);
+    VArray     *files             = Snapshot_List(ivars->snapshot);
     Folder     *folder            = PolyReader_Get_Folder(self);
     uint32_t    num_segs          = 0;
     uint64_t    latest_schema_gen = 0;
@@ -251,8 +256,8 @@ S_try_open_elements(void *context) {
     else {
         Hash *dump = (Hash*)Json_slurp_json(folder, schema_file);
         if (dump) { // read file successfully
-            DECREF(self->schema);
-            self->schema = (Schema*)CERTIFY(
+            DECREF(ivars->schema);
+            ivars->schema = (Schema*)CERTIFY(
                                VTable_Load_Obj(SCHEMA, (Obj*)dump), SCHEMA);
             DECREF(dump);
             DECREF(schema_file);
@@ -329,6 +334,7 @@ int32_t  PolyReader_debug1_num_passes     = 0;
 PolyReader*
 PolyReader_do_open(PolyReader *self, Obj *index, Snapshot *snapshot,
                    IndexManager *manager) {
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
     Folder   *folder   = S_derive_folder(index);
     uint64_t  last_gen = 0;
 
@@ -391,7 +397,7 @@ PolyReader_do_open(PolyReader *self, Obj *index, Snapshot *snapshot,
         // If that's not the case, we must read the file we just picked.
         if (!snapshot) {
             struct try_read_snapshot_context context;
-            context.snapshot = self->snapshot;
+            context.snapshot = ivars->snapshot;
             context.folder   = folder;
             context.path     = target_snap_file;
             Err *error = Err_trap(S_try_read_snapshot, &context);
@@ -463,11 +469,12 @@ S_derive_folder(Obj *index) {
 
 static bool 
 S_obtain_deletion_lock(PolyReader *self) {
-    self->deletion_lock = IxManager_Make_Deletion_Lock(self->manager);
-    Lock_Clear_Stale(self->deletion_lock);
-    if (!Lock_Obtain(self->deletion_lock)) {
-        DECREF(self->deletion_lock);
-        self->deletion_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    ivars->deletion_lock = IxManager_Make_Deletion_Lock(ivars->manager);
+    Lock_Clear_Stale(ivars->deletion_lock);
+    if (!Lock_Obtain(ivars->deletion_lock)) {
+        DECREF(ivars->deletion_lock);
+        ivars->deletion_lock = NULL;
         return false;
     }
     return true;
@@ -475,13 +482,14 @@ S_obtain_deletion_lock(PolyReader *self) {
 
 static bool
 S_obtain_read_lock(PolyReader *self, const CharBuf *snapshot_file_name) {
-    self->read_lock = IxManager_Make_Snapshot_Read_Lock(self->manager,
-                                                        snapshot_file_name);
-
-    Lock_Clear_Stale(self->read_lock);
-    if (!Lock_Obtain(self->read_lock)) {
-        DECREF(self->read_lock);
-        self->read_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    ivars->read_lock = IxManager_Make_Snapshot_Read_Lock(ivars->manager,
+                                                         snapshot_file_name);
+
+    Lock_Clear_Stale(ivars->read_lock);
+    if (!Lock_Obtain(ivars->read_lock)) {
+        DECREF(ivars->read_lock);
+        ivars->read_lock = NULL;
         return false;
     }
     return true;
@@ -489,50 +497,55 @@ S_obtain_read_lock(PolyReader *self, const CharBuf *snapshot_file_name) {
 
 static void
 S_release_read_lock(PolyReader *self) {
-    if (self->read_lock) {
-        Lock_Release(self->read_lock);
-        DECREF(self->read_lock);
-        self->read_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    if (ivars->read_lock) {
+        Lock_Release(ivars->read_lock);
+        DECREF(ivars->read_lock);
+        ivars->read_lock = NULL;
     }
 }
 
 static void
 S_release_deletion_lock(PolyReader *self) {
-    if (self->deletion_lock) {
-        Lock_Release(self->deletion_lock);
-        DECREF(self->deletion_lock);
-        self->deletion_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    if (ivars->deletion_lock) {
+        Lock_Release(ivars->deletion_lock);
+        DECREF(ivars->deletion_lock);
+        ivars->deletion_lock = NULL;
     }
 }
 
 int32_t
 PolyReader_doc_max(PolyReader *self) {
-    return self->doc_max;
+    return PolyReader_IVARS(self)->doc_max;
 }
 
 int32_t
 PolyReader_doc_count(PolyReader *self) {
-    return self->doc_max - self->del_count;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    return ivars->doc_max - ivars->del_count;
 }
 
 int32_t
 PolyReader_del_count(PolyReader *self) {
-    return self->del_count;
+    return PolyReader_IVARS(self)->del_count;
 }
 
 I32Array*
 PolyReader_offsets(PolyReader *self) {
-    return (I32Array*)INCREF(self->offsets);
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    return (I32Array*)INCREF(ivars->offsets);
 }
 
 VArray*
 PolyReader_seg_readers(PolyReader *self) {
-    return (VArray*)VA_Shallow_Copy(self->sub_readers);
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    return (VArray*)VA_Shallow_Copy(ivars->sub_readers);
 }
 
 VArray*
 PolyReader_get_seg_readers(PolyReader *self) {
-    return self->sub_readers;
+    return PolyReader_IVARS(self)->sub_readers;
 }
 
 uint32_t

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/Posting.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Posting.c b/core/Lucy/Index/Posting.c
index 36cb1e9..98ca0f8 100644
--- a/core/Lucy/Index/Posting.c
+++ b/core/Lucy/Index/Posting.c
@@ -30,18 +30,19 @@
 
 Posting*
 Post_init(Posting *self) {
-    self->doc_id = 0;
+    PostingIVARS *const ivars = Post_IVARS(self);
+    ivars->doc_id = 0;
     return self;
 }
 
 void
 Post_set_doc_id(Posting *self, int32_t doc_id) {
-    self->doc_id = doc_id;
+    Post_IVARS(self)->doc_id = doc_id;
 }
 
 int32_t
 Post_get_doc_id(Posting *self) {
-    return self->doc_id;
+    return Post_IVARS(self)->doc_id;
 }
 
 PostingWriter*
@@ -49,7 +50,8 @@ PostWriter_init(PostingWriter *self, Schema *schema, Snapshot *snapshot,
                 Segment *segment, PolyReader *polyreader, int32_t field_num) {
     DataWriter_init((DataWriter*)self, schema, snapshot, segment,
                     polyreader);
-    self->field_num = field_num;
+    PostingWriterIVARS *const ivars = PostWriter_IVARS(self);
+    ivars->field_num = field_num;
     return self;
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/965fdb2a/core/Lucy/Index/PostingListReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PostingListReader.c b/core/Lucy/Index/PostingListReader.c
index c2d3a49..5cb018e 100644
--- a/core/Lucy/Index/PostingListReader.c
+++ b/core/Lucy/Index/PostingListReader.c
@@ -64,10 +64,11 @@ DefPListReader_init(DefaultPostingListReader *self, Schema *schema,
                     int32_t seg_tick, LexiconReader *lex_reader) {
     PListReader_init((PostingListReader*)self, schema, folder, snapshot,
                      segments, seg_tick);
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
     Segment *segment = DefPListReader_Get_Segment(self);
 
     // Derive.
-    self->lex_reader = (LexiconReader*)INCREF(lex_reader);
+    ivars->lex_reader = (LexiconReader*)INCREF(lex_reader);
 
     // Check format.
     Hash *my_meta = (Hash*)Seg_Fetch_Metadata_Str(segment, "postings", 8);
@@ -91,23 +92,26 @@ DefPListReader_init(DefaultPostingListReader *self, Schema *schema,
 
 void
 DefPListReader_close(DefaultPostingListReader *self) {
-    if (self->lex_reader) {
-        LexReader_Close(self->lex_reader);
-        DECREF(self->lex_reader);
-        self->lex_reader = NULL;
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
+    if (ivars->lex_reader) {
+        LexReader_Close(ivars->lex_reader);
+        DECREF(ivars->lex_reader);
+        ivars->lex_reader = NULL;
     }
 }
 
 void
 DefPListReader_destroy(DefaultPostingListReader *self) {
-    DECREF(self->lex_reader);
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
+    DECREF(ivars->lex_reader);
     SUPER_DESTROY(self, DEFAULTPOSTINGLISTREADER);
 }
 
 SegPostingList*
 DefPListReader_posting_list(DefaultPostingListReader *self,
                             const CharBuf *field, Obj *target) {
-    FieldType *type = Schema_Fetch_Type(self->schema, field);
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
+    FieldType *type = Schema_Fetch_Type(ivars->schema, field);
 
     // Only return an object if we've got an indexed field.
     if (type != NULL && FType_Indexed(type)) {
@@ -122,6 +126,6 @@ DefPListReader_posting_list(DefaultPostingListReader *self,
 
 LexiconReader*
 DefPListReader_get_lex_reader(DefaultPostingListReader *self) {
-    return self->lex_reader;
+    return DefPListReader_IVARS(self)->lex_reader;
 }