You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucy.apache.org by ma...@apache.org on 2013/07/01 18:47:22 UTC

[lucy-commits] [1/9] Migrate Lucy's index classes to IVARS.

Updated Branches:
  refs/heads/ivars-wip1 445409a7b -> 6164cddec


http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SortCache/NumericSortCache.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SortCache/NumericSortCache.c b/core/Lucy/Index/SortCache/NumericSortCache.c
index 0898062..d4af636 100644
--- a/core/Lucy/Index/SortCache/NumericSortCache.c
+++ b/core/Lucy/Index/SortCache/NumericSortCache.c
@@ -45,19 +45,20 @@ NumSortCache_init(NumericSortCache *self, const CharBuf *field,
     void    *ords    = InStream_Buf(ord_in, (size_t)ord_len);
     SortCache_init((SortCache*)self, field, type, ords, cardinality, doc_max,
                    null_ord, ord_width);
+    NumericSortCacheIVARS *const ivars = NumSortCache_IVARS(self);
 
     // Assign.
-    self->ord_in = (InStream*)INCREF(ord_in);
-    self->dat_in = (InStream*)INCREF(dat_in);
+    ivars->ord_in = (InStream*)INCREF(ord_in);
+    ivars->dat_in = (InStream*)INCREF(dat_in);
 
     // Validate ord file length.
     double BITS_PER_BYTE = 8.0;
-    double docs_per_byte = BITS_PER_BYTE / self->ord_width;
+    double docs_per_byte = BITS_PER_BYTE / ivars->ord_width;
     double max_ords      = ord_len * docs_per_byte;
-    if (max_ords < self->doc_max + 1) {
+    if (max_ords < ivars->doc_max + 1) {
         DECREF(self);
         THROW(ERR, "Conflict between ord count max %f64 and doc_max %i32 for "
-              "field %o", max_ords, self->doc_max, field);
+              "field %o", max_ords, ivars->doc_max, field);
     }
 
     ABSTRACT_CLASS_CHECK(self, NUMERICSORTCACHE);
@@ -66,13 +67,14 @@ NumSortCache_init(NumericSortCache *self, const CharBuf *field,
 
 void
 NumSortCache_destroy(NumericSortCache *self) {
-    if (self->ord_in) {
-        InStream_Close(self->ord_in);
-        InStream_Dec_RefCount(self->ord_in);
+    NumericSortCacheIVARS *const ivars = NumSortCache_IVARS(self);
+    if (ivars->ord_in) {
+        InStream_Close(ivars->ord_in);
+        InStream_Dec_RefCount(ivars->ord_in);
     }
-    if (self->dat_in) {
-        InStream_Close(self->dat_in);
-        InStream_Dec_RefCount(self->dat_in);
+    if (ivars->dat_in) {
+        InStream_Close(ivars->dat_in);
+        InStream_Dec_RefCount(ivars->dat_in);
     }
     SUPER_DESTROY(self, NUMERICSORTCACHE);
 }
@@ -101,16 +103,17 @@ F64SortCache_init(Float64SortCache *self, const CharBuf *field,
 
 Obj*
 F64SortCache_value(Float64SortCache *self, int32_t ord, Obj *blank) {
-    if (ord == self->null_ord) {
+    Float64SortCacheIVARS *const ivars = F64SortCache_IVARS(self);
+    if (ord == ivars->null_ord) {
         return NULL;
     }
     else if (ord < 0) {
-        THROW(ERR, "Ordinal less than 0 for %o: %i32", self->field, ord);
+        THROW(ERR, "Ordinal less than 0 for %o: %i32", ivars->field, ord);
     }
     else {
         Float64 *num_blank = (Float64*)CERTIFY(blank, FLOAT64);
-        InStream_Seek(self->dat_in, ord * sizeof(double));
-        Float64_Set_Value(num_blank, InStream_Read_F64(self->dat_in));
+        InStream_Seek(ivars->dat_in, ord * sizeof(double));
+        Float64_Set_Value(num_blank, InStream_Read_F64(ivars->dat_in));
     }
     return blank;
 }
@@ -145,16 +148,17 @@ F32SortCache_init(Float32SortCache *self, const CharBuf *field,
 
 Obj*
 F32SortCache_value(Float32SortCache *self, int32_t ord, Obj *blank) {
-    if (ord == self->null_ord) {
+    Float32SortCacheIVARS *const ivars = F32SortCache_IVARS(self);
+    if (ord == ivars->null_ord) {
         return NULL;
     }
     else if (ord < 0) {
-        THROW(ERR, "Ordinal less than 0 for %o: %i32", self->field, ord);
+        THROW(ERR, "Ordinal less than 0 for %o: %i32", ivars->field, ord);
     }
     else {
         Float32 *num_blank = (Float32*)CERTIFY(blank, FLOAT32);
-        InStream_Seek(self->dat_in, ord * sizeof(float));
-        Float32_Set_Value(num_blank, InStream_Read_F32(self->dat_in));
+        InStream_Seek(ivars->dat_in, ord * sizeof(float));
+        Float32_Set_Value(num_blank, InStream_Read_F32(ivars->dat_in));
     }
     return blank;
 }
@@ -189,16 +193,17 @@ I32SortCache_init(Int32SortCache *self, const CharBuf *field,
 
 Obj*
 I32SortCache_value(Int32SortCache *self, int32_t ord, Obj *blank) {
-    if (ord == self->null_ord) {
+    Int32SortCacheIVARS *const ivars = I32SortCache_IVARS(self);
+    if (ord == ivars->null_ord) {
         return NULL;
     }
     else if (ord < 0) {
-        THROW(ERR, "Ordinal less than 0 for %o: %i32", self->field, ord);
+        THROW(ERR, "Ordinal less than 0 for %o: %i32", ivars->field, ord);
     }
     else {
         Integer32 *int_blank = (Integer32*)CERTIFY(blank, INTEGER32);
-        InStream_Seek(self->dat_in, ord * sizeof(int32_t));
-        Int32_Set_Value(int_blank, InStream_Read_I32(self->dat_in));
+        InStream_Seek(ivars->dat_in, ord * sizeof(int32_t));
+        Int32_Set_Value(int_blank, InStream_Read_I32(ivars->dat_in));
     }
     return blank;
 }
@@ -233,16 +238,17 @@ I64SortCache_init(Int64SortCache *self, const CharBuf *field,
 
 Obj*
 I64SortCache_value(Int64SortCache *self, int32_t ord, Obj *blank) {
-    if (ord == self->null_ord) {
+    Int64SortCacheIVARS *const ivars = I64SortCache_IVARS(self);
+    if (ord == ivars->null_ord) {
         return NULL;
     }
     else if (ord < 0) {
-        THROW(ERR, "Ordinal less than 0 for %o: %i32", self->field, ord);
+        THROW(ERR, "Ordinal less than 0 for %o: %i32", ivars->field, ord);
     }
     else {
         Integer64 *int_blank = (Integer64*)CERTIFY(blank, INTEGER64);
-        InStream_Seek(self->dat_in, ord * sizeof(int64_t));
-        Int64_Set_Value(int_blank, InStream_Read_I64(self->dat_in));
+        InStream_Seek(ivars->dat_in, ord * sizeof(int64_t));
+        Int64_Set_Value(int_blank, InStream_Read_I64(ivars->dat_in));
     }
     return blank;
 }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SortCache/TextSortCache.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SortCache/TextSortCache.c b/core/Lucy/Index/SortCache/TextSortCache.c
index 1905215..9c05c63 100644
--- a/core/Lucy/Index/SortCache/TextSortCache.c
+++ b/core/Lucy/Index/SortCache/TextSortCache.c
@@ -49,37 +49,39 @@ TextSortCache_init(TextSortCache *self, const CharBuf *field,
     void *ords = InStream_Buf(ord_in, (size_t)ord_len);
     SortCache_init((SortCache*)self, field, type, ords, cardinality, doc_max,
                    null_ord, ord_width);
+    TextSortCacheIVARS *const ivars = TextSortCache_IVARS(self);
 
     // Validate ords file length.
-    double  bytes_per_doc = self->ord_width / 8.0;
+    double  bytes_per_doc = ivars->ord_width / 8.0;
     double  max_ords      = ord_len / bytes_per_doc;
-    if (max_ords < self->doc_max + 1) {
-        WARN("ORD WIDTH: %i32 %i32", ord_width, self->ord_width);
+    if (max_ords < ivars->doc_max + 1) {
+        WARN("ORD WIDTH: %i32 %i32", ord_width, ivars->ord_width);
         THROW(ERR, "Conflict between ord count max %f64 and doc_max %i32 for "
               "field %o", max_ords, doc_max, field);
     }
 
     // Assign.
-    self->ord_in = (InStream*)INCREF(ord_in);
-    self->ix_in  = (InStream*)INCREF(ix_in);
-    self->dat_in = (InStream*)INCREF(dat_in);
+    ivars->ord_in = (InStream*)INCREF(ord_in);
+    ivars->ix_in  = (InStream*)INCREF(ix_in);
+    ivars->dat_in = (InStream*)INCREF(dat_in);
 
     return self;
 }
 
 void
 TextSortCache_destroy(TextSortCache *self) {
-    if (self->ord_in) {
-        InStream_Close(self->ord_in);
-        InStream_Dec_RefCount(self->ord_in);
+    TextSortCacheIVARS *const ivars = TextSortCache_IVARS(self);
+    if (ivars->ord_in) {
+        InStream_Close(ivars->ord_in);
+        InStream_Dec_RefCount(ivars->ord_in);
     }
-    if (self->ix_in) {
-        InStream_Close(self->ix_in);
-        InStream_Dec_RefCount(self->ix_in);
+    if (ivars->ix_in) {
+        InStream_Close(ivars->ix_in);
+        InStream_Dec_RefCount(ivars->ix_in);
     }
-    if (self->dat_in) {
-        InStream_Close(self->dat_in);
-        InStream_Dec_RefCount(self->dat_in);
+    if (ivars->dat_in) {
+        InStream_Close(ivars->dat_in);
+        InStream_Dec_RefCount(ivars->dat_in);
     }
     SUPER_DESTROY(self, TEXTSORTCACHE);
 }
@@ -88,11 +90,12 @@ TextSortCache_destroy(TextSortCache *self) {
 
 Obj*
 TextSortCache_value(TextSortCache *self, int32_t ord, Obj *blank) {
-    if (ord == self->null_ord) {
+    TextSortCacheIVARS *const ivars = TextSortCache_IVARS(self);
+    if (ord == ivars->null_ord) {
         return NULL;
     }
-    InStream_Seek(self->ix_in, ord * sizeof(int64_t));
-    int64_t offset = InStream_Read_I64(self->ix_in);
+    InStream_Seek(ivars->ix_in, ord * sizeof(int64_t));
+    int64_t offset = InStream_Read_I64(ivars->ix_in);
     if (offset == NULL_SENTINEL) {
         return NULL;
     }
@@ -100,8 +103,8 @@ TextSortCache_value(TextSortCache *self, int32_t ord, Obj *blank) {
         uint32_t next_ord = ord + 1;
         int64_t next_offset;
         while (1) {
-            InStream_Seek(self->ix_in, next_ord * sizeof(int64_t));
-            next_offset = InStream_Read_I64(self->ix_in);
+            InStream_Seek(ivars->ix_in, next_ord * sizeof(int64_t));
+            next_offset = InStream_Read_I64(ivars->ix_in);
             if (next_offset != NULL_SENTINEL) { break; }
             next_ord++;
         }
@@ -110,13 +113,13 @@ TextSortCache_value(TextSortCache *self, int32_t ord, Obj *blank) {
         CERTIFY(blank, CHARBUF);
         int64_t len = next_offset - offset;
         char *ptr = CB_Grow((CharBuf*)blank, (size_t)len);
-        InStream_Seek(self->dat_in, offset);
-        InStream_Read_Bytes(self->dat_in, ptr, (size_t)len);
+        InStream_Seek(ivars->dat_in, offset);
+        InStream_Read_Bytes(ivars->dat_in, ptr, (size_t)len);
         ptr[len] = '\0';
         if (!StrHelp_utf8_valid(ptr, (size_t)len)) {
             CB_Set_Size((CharBuf*)blank, 0);
             THROW(ERR, "Invalid UTF-8 at %i64 in %o", offset,
-                  InStream_Get_Filename(self->dat_in));
+                  InStream_Get_Filename(ivars->dat_in));
         }
         CB_Set_Size((CharBuf*)blank, (size_t)len);
     }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SortFieldWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SortFieldWriter.c b/core/Lucy/Index/SortFieldWriter.c
index 6d3bc16..59a90ec 100644
--- a/core/Lucy/Index/SortFieldWriter.c
+++ b/core/Lucy/Index/SortFieldWriter.c
@@ -77,56 +77,58 @@ SortFieldWriter_init(SortFieldWriter *self, Schema *schema,
                      OutStream *temp_dat_out) {
     // Init.
     SortEx_init((SortExternal*)self, sizeof(SFWriterElem));
-    self->null_ord        = -1;
-    self->count           = 0;
-    self->ord_start       = 0;
-    self->ord_end         = 0;
-    self->ix_start        = 0;
-    self->ix_end          = 0;
-    self->dat_start       = 0;
-    self->dat_end         = 0;
-    self->run_cardinality = -1;
-    self->run_max         = -1;
-    self->sort_cache      = NULL;
-    self->doc_map         = NULL;
-    self->sorted_ids      = NULL;
-    self->run_ord         = 0;
-    self->run_tick        = 0;
-    self->ord_width       = 0;
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+    ivars->null_ord        = -1;
+    ivars->count           = 0;
+    ivars->ord_start       = 0;
+    ivars->ord_end         = 0;
+    ivars->ix_start        = 0;
+    ivars->ix_end          = 0;
+    ivars->dat_start       = 0;
+    ivars->dat_end         = 0;
+    ivars->run_cardinality = -1;
+    ivars->run_max         = -1;
+    ivars->sort_cache      = NULL;
+    ivars->doc_map         = NULL;
+    ivars->sorted_ids      = NULL;
+    ivars->run_ord         = 0;
+    ivars->run_tick        = 0;
+    ivars->ord_width       = 0;
 
     // Assign.
-    self->field        = CB_Clone(field);
-    self->schema       = (Schema*)INCREF(schema);
-    self->snapshot     = (Snapshot*)INCREF(snapshot);
-    self->segment      = (Segment*)INCREF(segment);
-    self->polyreader   = (PolyReader*)INCREF(polyreader);
-    self->mem_pool     = (MemoryPool*)INCREF(memory_pool);
-    self->temp_ord_out = (OutStream*)INCREF(temp_ord_out);
-    self->temp_ix_out  = (OutStream*)INCREF(temp_ix_out);
-    self->temp_dat_out = (OutStream*)INCREF(temp_dat_out);
-    self->mem_thresh   = mem_thresh;
+    ivars->field        = CB_Clone(field);
+    ivars->schema       = (Schema*)INCREF(schema);
+    ivars->snapshot     = (Snapshot*)INCREF(snapshot);
+    ivars->segment      = (Segment*)INCREF(segment);
+    ivars->polyreader   = (PolyReader*)INCREF(polyreader);
+    ivars->mem_pool     = (MemoryPool*)INCREF(memory_pool);
+    ivars->temp_ord_out = (OutStream*)INCREF(temp_ord_out);
+    ivars->temp_ix_out  = (OutStream*)INCREF(temp_ix_out);
+    ivars->temp_dat_out = (OutStream*)INCREF(temp_dat_out);
+    ivars->mem_thresh   = mem_thresh;
 
     // Derive.
-    self->field_num = Seg_Field_Num(segment, field);
+    ivars->field_num = Seg_Field_Num(segment, field);
     FieldType *type = (FieldType*)CERTIFY(
-                          Schema_Fetch_Type(self->schema, field), FIELDTYPE);
-    self->type    = (FieldType*)INCREF(type);
-    self->prim_id = FType_Primitive_ID(type);
-    if (self->prim_id == FType_TEXT || self->prim_id == FType_BLOB) {
-        self->var_width = true;
+                          Schema_Fetch_Type(ivars->schema, field), FIELDTYPE);
+    ivars->type    = (FieldType*)INCREF(type);
+    ivars->prim_id = FType_Primitive_ID(type);
+    if (ivars->prim_id == FType_TEXT || ivars->prim_id == FType_BLOB) {
+        ivars->var_width = true;
     }
     else {
-        self->var_width = false;
+        ivars->var_width = false;
     }
-    self->uniq_vals = (Hash*)ZKHash_new(memory_pool, self->prim_id);
+    ivars->uniq_vals = (Hash*)ZKHash_new(memory_pool, ivars->prim_id);
 
     return self;
 }
 
 void
 SortFieldWriter_clear_cache(SortFieldWriter *self) {
-    if (self->uniq_vals) {
-        Hash_Clear(self->uniq_vals);
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+    if (ivars->uniq_vals) {
+        Hash_Clear(ivars->uniq_vals);
     }
     SortFieldWriter_Clear_Cache_t super_clear_cache
         = SUPER_METHOD_PTR(SORTFIELDWRITER, Lucy_SortFieldWriter_Clear_Cache);
@@ -135,35 +137,36 @@ SortFieldWriter_clear_cache(SortFieldWriter *self) {
 
 void
 SortFieldWriter_destroy(SortFieldWriter *self) {
-    DECREF(self->uniq_vals);
-    self->uniq_vals = NULL;
-    DECREF(self->field);
-    DECREF(self->schema);
-    DECREF(self->snapshot);
-    DECREF(self->segment);
-    DECREF(self->polyreader);
-    DECREF(self->type);
-    DECREF(self->mem_pool);
-    DECREF(self->temp_ord_out);
-    DECREF(self->temp_ix_out);
-    DECREF(self->temp_dat_out);
-    DECREF(self->ord_in);
-    DECREF(self->ix_in);
-    DECREF(self->dat_in);
-    DECREF(self->sort_cache);
-    DECREF(self->doc_map);
-    FREEMEM(self->sorted_ids);
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+    DECREF(ivars->uniq_vals);
+    ivars->uniq_vals = NULL;
+    DECREF(ivars->field);
+    DECREF(ivars->schema);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->segment);
+    DECREF(ivars->polyreader);
+    DECREF(ivars->type);
+    DECREF(ivars->mem_pool);
+    DECREF(ivars->temp_ord_out);
+    DECREF(ivars->temp_ix_out);
+    DECREF(ivars->temp_dat_out);
+    DECREF(ivars->ord_in);
+    DECREF(ivars->ix_in);
+    DECREF(ivars->dat_in);
+    DECREF(ivars->sort_cache);
+    DECREF(ivars->doc_map);
+    FREEMEM(ivars->sorted_ids);
     SUPER_DESTROY(self, SORTFIELDWRITER);
 }
 
 int32_t
 SortFieldWriter_get_null_ord(SortFieldWriter *self) {
-    return self->null_ord;
+    return SortFieldWriter_IVARS(self)->null_ord;
 }
 
 int32_t
 SortFieldWriter_get_ord_width(SortFieldWriter *self) {
-    return self->ord_width;
+    return SortFieldWriter_IVARS(self)->ord_width;
 }
 
 static Obj*
@@ -179,28 +182,32 @@ S_find_unique_value(Hash *uniq_vals, Obj *val) {
 
 void
 SortFieldWriter_add(SortFieldWriter *self, int32_t doc_id, Obj *value) {
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+
     // Uniq-ify the value, and record it for this document.
     SFWriterElem elem;
-    elem.value = S_find_unique_value(self->uniq_vals, value);
+    elem.value = S_find_unique_value(ivars->uniq_vals, value);
     elem.doc_id = doc_id;
     SortFieldWriter_Feed(self, &elem);
-    self->count++;
+    ivars->count++;
 }
 
 void
 SortFieldWriter_add_segment(SortFieldWriter *self, SegReader *reader,
                             I32Array *doc_map, SortCache *sort_cache) {
     if (!sort_cache) { return; }
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
     SortFieldWriter *run
-        = SortFieldWriter_new(self->schema, self->snapshot, self->segment,
-                              self->polyreader, self->field, self->mem_pool,
-                              self->mem_thresh, NULL, NULL, NULL);
-    run->sort_cache = (SortCache*)INCREF(sort_cache);
-    run->doc_map    = (I32Array*)INCREF(doc_map);
-    run->run_max    = SegReader_Doc_Max(reader);
-    run->run_cardinality = SortCache_Get_Cardinality(sort_cache);
-    run->null_ord   = SortCache_Get_Null_Ord(sort_cache);
-    run->run_tick   = 1;
+        = SortFieldWriter_new(ivars->schema, ivars->snapshot, ivars->segment,
+                              ivars->polyreader, ivars->field, ivars->mem_pool,
+                              ivars->mem_thresh, NULL, NULL, NULL);
+    SortFieldWriterIVARS *const run_ivars = SortFieldWriter_IVARS(run);
+    run_ivars->sort_cache = (SortCache*)INCREF(sort_cache);
+    run_ivars->doc_map    = (I32Array*)INCREF(doc_map);
+    run_ivars->run_max    = SegReader_Doc_Max(reader);
+    run_ivars->run_cardinality = SortCache_Get_Cardinality(sort_cache);
+    run_ivars->null_ord   = SortCache_Get_Null_Ord(sort_cache);
+    run_ivars->run_tick   = 1;
     SortFieldWriter_Add_Run(self, (SortExternal*)run);
 }
 
@@ -322,10 +329,11 @@ S_write_val(Obj *val, int8_t prim_id, OutStream *ix_out, OutStream *dat_out,
 
 int
 SortFieldWriter_compare(SortFieldWriter *self, void *va, void *vb) {
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
     SFWriterElem *a = (SFWriterElem*)va;
     SFWriterElem *b = (SFWriterElem*)vb;
     int32_t comparison
-        = FType_null_back_compare_values(self->type, a->value, b->value);
+        = FType_null_back_compare_values(ivars->type, a->value, b->value);
     if (comparison == 0) { comparison = b->doc_id - a->doc_id; }
     return comparison;
 }
@@ -342,64 +350,68 @@ S_compare_doc_ids_by_ord_rev(void *context, const void *va, const void *vb) {
 
 static void
 S_lazy_init_sorted_ids(SortFieldWriter *self) {
-    if (!self->sorted_ids) {
-        self->sorted_ids
-            = (int32_t*)MALLOCATE((self->run_max + 1) * sizeof(int32_t));
-        for (int32_t i = 0, max = self->run_max; i <= max; i++) {
-            self->sorted_ids[i] = i;
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+    if (!ivars->sorted_ids) {
+        ivars->sorted_ids
+            = (int32_t*)MALLOCATE((ivars->run_max + 1) * sizeof(int32_t));
+        for (int32_t i = 0, max = ivars->run_max; i <= max; i++) {
+            ivars->sorted_ids[i] = i;
         }
-        Sort_quicksort(self->sorted_ids + 1, self->run_max, sizeof(int32_t),
-                       S_compare_doc_ids_by_ord_rev, self->sort_cache);
+        Sort_quicksort(ivars->sorted_ids + 1, ivars->run_max, sizeof(int32_t),
+                       S_compare_doc_ids_by_ord_rev, ivars->sort_cache);
     }
 }
 
 void
 SortFieldWriter_flush(SortFieldWriter *self) {
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+
     // Don't add a run unless we have data to put in it.
     if (SortFieldWriter_Cache_Count(self) == 0) { return; }
 
-    OutStream *const temp_ord_out = self->temp_ord_out;
-    OutStream *const temp_ix_out  = self->temp_ix_out;
-    OutStream *const temp_dat_out = self->temp_dat_out;
+    OutStream *const temp_ord_out = ivars->temp_ord_out;
+    OutStream *const temp_ix_out  = ivars->temp_ix_out;
+    OutStream *const temp_dat_out = ivars->temp_dat_out;
 
     SortFieldWriter_Sort_Cache(self);
     SortFieldWriter *run
-        = SortFieldWriter_new(self->schema, self->snapshot, self->segment,
-                              self->polyreader, self->field, self->mem_pool,
-                              self->mem_thresh, NULL, NULL, NULL);
+        = SortFieldWriter_new(ivars->schema, ivars->snapshot, ivars->segment,
+                              ivars->polyreader, ivars->field, ivars->mem_pool,
+                              ivars->mem_thresh, NULL, NULL, NULL);
+    SortFieldWriterIVARS *const run_ivars = SortFieldWriter_IVARS(run);
 
     // Record stream starts and align.
-    run->ord_start = OutStream_Align(temp_ord_out, sizeof(int64_t));
-    if (self->var_width) {
-        run->ix_start  = OutStream_Align(temp_ix_out, sizeof(int64_t));
+    run_ivars->ord_start = OutStream_Align(temp_ord_out, sizeof(int64_t));
+    if (ivars->var_width) {
+        run_ivars->ix_start  = OutStream_Align(temp_ix_out, sizeof(int64_t));
     }
-    run->dat_start = OutStream_Align(temp_dat_out, sizeof(int64_t));
+    run_ivars->dat_start = OutStream_Align(temp_dat_out, sizeof(int64_t));
 
     // Have the run borrow the array of elems.
-    run->cache      = self->cache;
-    run->cache_max  = self->cache_max;
-    run->cache_tick = self->cache_tick;
-    run->cache_cap  = self->cache_cap;
+    run_ivars->cache      = ivars->cache;
+    run_ivars->cache_max  = ivars->cache_max;
+    run_ivars->cache_tick = ivars->cache_tick;
+    run_ivars->cache_cap  = ivars->cache_cap;
 
     // Write files, record stats.
-    run->run_max = (int32_t)Seg_Get_Count(self->segment);
-    run->run_cardinality = S_write_files(run, temp_ord_out, temp_ix_out,
-                                         temp_dat_out);
+    run_ivars->run_max = (int32_t)Seg_Get_Count(ivars->segment);
+    run_ivars->run_cardinality = S_write_files(run, temp_ord_out, temp_ix_out,
+                                               temp_dat_out);
 
     // Reclaim the buffer from the run and empty it.
-    run->cache       = NULL;
-    run->cache_max   = 0;
-    run->cache_tick  = 0;
-    run->cache_cap   = 0;
-    self->cache_tick = self->cache_max;
+    run_ivars->cache       = NULL;
+    run_ivars->cache_max   = 0;
+    run_ivars->cache_tick  = 0;
+    run_ivars->cache_cap   = 0;
+    ivars->cache_tick = ivars->cache_max;
     SortFieldWriter_Clear_Cache(self);
 
     // Record stream ends.
-    run->ord_end = OutStream_Tell(temp_ord_out);
-    if (self->var_width) {
-        run->ix_end  = OutStream_Tell(temp_ix_out);
+    run_ivars->ord_end = OutStream_Tell(temp_ord_out);
+    if (ivars->var_width) {
+        run_ivars->ix_end  = OutStream_Tell(temp_ix_out);
     }
-    run->dat_end = OutStream_Tell(temp_dat_out);
+    run_ivars->dat_end = OutStream_Tell(temp_dat_out);
 
     // Add the run to the array.
     SortFieldWriter_Add_Run(self, (SortExternal*)run);
@@ -407,7 +419,8 @@ SortFieldWriter_flush(SortFieldWriter *self) {
 
 uint32_t
 SortFieldWriter_refill(SortFieldWriter *self) {
-    if (!self->sort_cache) { return 0; }
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+    if (!ivars->sort_cache) { return 0; }
 
     // Sanity check, then reset the cache and prepare to start loading items.
     uint32_t cache_count = SortFieldWriter_Cache_Count(self);
@@ -416,28 +429,28 @@ SortFieldWriter_refill(SortFieldWriter *self) {
               cache_count);
     }
     SortFieldWriter_Clear_Cache(self);
-    MemPool_Release_All(self->mem_pool);
+    MemPool_Release_All(ivars->mem_pool);
     S_lazy_init_sorted_ids(self);
 
-    const int32_t    null_ord   = self->null_ord;
-    Hash *const      uniq_vals  = self->uniq_vals;
-    I32Array *const  doc_map    = self->doc_map;
-    SortCache *const sort_cache = self->sort_cache;
+    const int32_t    null_ord   = ivars->null_ord;
+    Hash *const      uniq_vals  = ivars->uniq_vals;
+    I32Array *const  doc_map    = ivars->doc_map;
+    SortCache *const sort_cache = ivars->sort_cache;
     Obj *const       blank      = SortCache_Make_Blank(sort_cache);
 
-    while (self->run_ord < self->run_cardinality
-           && MemPool_Get_Consumed(self->mem_pool) < self->mem_thresh
+    while (ivars->run_ord < ivars->run_cardinality
+           && MemPool_Get_Consumed(ivars->mem_pool) < ivars->mem_thresh
           ) {
-        Obj *val = SortCache_Value(sort_cache, self->run_ord, blank);
+        Obj *val = SortCache_Value(sort_cache, ivars->run_ord, blank);
         if (val) {
             Hash_Store(uniq_vals, val, (Obj*)CFISH_TRUE);
             break;
         }
-        self->run_ord++;
+        ivars->run_ord++;
     }
     uint32_t count = 0;
-    while (self->run_tick <= self->run_max) {
-        int32_t raw_doc_id = self->sorted_ids[self->run_tick];
+    while (ivars->run_tick <= ivars->run_max) {
+        int32_t raw_doc_id = ivars->sorted_ids[ivars->run_tick];
         int32_t ord = SortCache_Ordinal(sort_cache, raw_doc_id);
         if (ord != null_ord) {
             int32_t remapped = doc_map
@@ -449,17 +462,17 @@ SortFieldWriter_refill(SortFieldWriter *self) {
                 count++;
             }
         }
-        else if (ord > self->run_ord) {
+        else if (ord > ivars->run_ord) {
             break;
         }
-        self->run_tick++;
+        ivars->run_tick++;
     }
-    self->run_ord++;
+    ivars->run_ord++;
     SortFieldWriter_Sort_Cache(self);
 
-    if (self->run_ord >= self->run_cardinality) {
-        DECREF(self->sort_cache);
-        self->sort_cache = NULL;
+    if (ivars->run_ord >= ivars->run_cardinality) {
+        DECREF(ivars->sort_cache);
+        ivars->sort_cache = NULL;
     }
 
     DECREF(blank);
@@ -468,11 +481,12 @@ SortFieldWriter_refill(SortFieldWriter *self) {
 
 void
 SortFieldWriter_flip(SortFieldWriter *self) {
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
     uint32_t num_items = SortFieldWriter_Cache_Count(self);
-    uint32_t num_runs = VA_Get_Size(self->runs);
+    uint32_t num_runs = VA_Get_Size(ivars->runs);
 
-    if (self->flipped) { THROW(ERR, "Can't call Flip() twice"); }
-    self->flipped = true;
+    if (ivars->flipped) { THROW(ERR, "Can't call Flip() twice"); }
+    ivars->flipped = true;
 
     // Sanity check.
     if (num_runs && num_items) {
@@ -484,40 +498,41 @@ SortFieldWriter_flip(SortFieldWriter *self) {
         SortFieldWriter_Sort_Cache(self);
     }
     else if (num_runs) {
-        Folder  *folder = PolyReader_Get_Folder(self->polyreader);
-        CharBuf *seg_name = Seg_Get_Name(self->segment);
+        Folder  *folder = PolyReader_Get_Folder(ivars->polyreader);
+        CharBuf *seg_name = Seg_Get_Name(ivars->segment);
         CharBuf *filepath = CB_newf("%o/sort_ord_temp", seg_name);
-        self->ord_in = Folder_Open_In(folder, filepath);
-        if (!self->ord_in) { RETHROW(INCREF(Err_get_error())); }
-        if (self->var_width) {
+        ivars->ord_in = Folder_Open_In(folder, filepath);
+        if (!ivars->ord_in) { RETHROW(INCREF(Err_get_error())); }
+        if (ivars->var_width) {
             CB_setf(filepath, "%o/sort_ix_temp", seg_name);
-            self->ix_in = Folder_Open_In(folder, filepath);
-            if (!self->ix_in) { RETHROW(INCREF(Err_get_error())); }
+            ivars->ix_in = Folder_Open_In(folder, filepath);
+            if (!ivars->ix_in) { RETHROW(INCREF(Err_get_error())); }
         }
         CB_setf(filepath, "%o/sort_dat_temp", seg_name);
-        self->dat_in = Folder_Open_In(folder, filepath);
-        if (!self->dat_in) { RETHROW(INCREF(Err_get_error())); }
+        ivars->dat_in = Folder_Open_In(folder, filepath);
+        if (!ivars->dat_in) { RETHROW(INCREF(Err_get_error())); }
         DECREF(filepath);
 
         // Assign streams and a slice of mem_thresh.
-        size_t sub_thresh = self->mem_thresh / num_runs;
+        size_t sub_thresh = ivars->mem_thresh / num_runs;
         if (sub_thresh < 65536) { sub_thresh = 65536; }
         for (uint32_t i = 0; i < num_runs; i++) {
-            SortFieldWriter *run = (SortFieldWriter*)VA_Fetch(self->runs, i);
-            S_flip_run(run, sub_thresh, self->ord_in, self->ix_in,
-                       self->dat_in);
+            SortFieldWriter *run = (SortFieldWriter*)VA_Fetch(ivars->runs, i);
+            S_flip_run(run, sub_thresh, ivars->ord_in, ivars->ix_in,
+                       ivars->dat_in);
         }
     }
 
-    self->flipped = true;
+    ivars->flipped = true;
 }
 
 static int32_t
 S_write_files(SortFieldWriter *self, OutStream *ord_out, OutStream *ix_out,
               OutStream *dat_out) {
-    int8_t    prim_id   = self->prim_id;
-    int32_t   doc_max   = (int32_t)Seg_Get_Count(self->segment);
-    bool      has_nulls = self->count == doc_max ? false : true;
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+    int8_t    prim_id   = ivars->prim_id;
+    int32_t   doc_max   = (int32_t)Seg_Get_Count(ivars->segment);
+    bool      has_nulls = ivars->count == doc_max ? false : true;
     size_t    size      = (doc_max + 1) * sizeof(int32_t);
     int32_t  *ords      = (int32_t*)MALLOCATE(size);
     int32_t   ord       = 0;
@@ -541,7 +556,7 @@ S_write_files(SortFieldWriter *self, OutStream *ord_out, OutStream *ix_out,
     while (NULL != (elem = (SFWriterElem*)SortFieldWriter_Fetch(self))) {
         if (elem->value != last_val_address) {
             int32_t comparison
-                = FType_Compare_Values(self->type, elem->value, val);
+                = FType_Compare_Values(ivars->type, elem->value, val);
             if (comparison != 0) {
                 ord++;
                 S_write_val(elem->value, prim_id, ix_out, dat_out, dat_start);
@@ -557,19 +572,19 @@ S_write_files(SortFieldWriter *self, OutStream *ord_out, OutStream *ix_out,
     if (has_nulls) {
         S_write_val(NULL, prim_id, ix_out, dat_out, dat_start);
         ord++;
-        self->null_ord = ord;
+        ivars->null_ord = ord;
     }
-    int32_t null_ord = self->null_ord;
+    int32_t null_ord = ivars->null_ord;
 
     // Write one extra file pointer so that we can always derive length.
-    if (self->var_width) {
+    if (ivars->var_width) {
         OutStream_Write_I64(ix_out, OutStream_Tell(dat_out) - dat_start);
     }
 
     // Calculate cardinality and ord width.
     int32_t cardinality = ord + 1;
-    self->ord_width     = S_calc_width(cardinality);
-    int32_t ord_width   = self->ord_width;
+    ivars->ord_width     = S_calc_width(cardinality);
+    int32_t ord_width   = ivars->ord_width;
 
     // Write ords.
     const double BITS_PER_BYTE = 8.0;
@@ -590,19 +605,21 @@ S_write_files(SortFieldWriter *self, OutStream *ord_out, OutStream *ix_out,
 
 int32_t
 SortFieldWriter_finish(SortFieldWriter *self) {
+    SortFieldWriterIVARS *const ivars = SortFieldWriter_IVARS(self);
+
     // Bail if there's no data.
     if (!SortFieldWriter_Peek(self)) { return 0; }
 
-    int32_t  field_num = self->field_num;
-    Folder  *folder    = PolyReader_Get_Folder(self->polyreader);
-    CharBuf *seg_name  = Seg_Get_Name(self->segment);
+    int32_t  field_num = ivars->field_num;
+    Folder  *folder    = PolyReader_Get_Folder(ivars->polyreader);
+    CharBuf *seg_name  = Seg_Get_Name(ivars->segment);
     CharBuf *path      = CB_newf("%o/sort-%i32.ord", seg_name, field_num);
 
     // Open streams.
     OutStream *ord_out = Folder_Open_Out(folder, path);
     if (!ord_out) { RETHROW(INCREF(Err_get_error())); }
     OutStream *ix_out = NULL;
-    if (self->var_width) {
+    if (ivars->var_width) {
         CB_setf(path, "%o/sort-%i32.ix", seg_name, field_num);
         ix_out = Folder_Open_Out(folder, path);
         if (!ix_out) { RETHROW(INCREF(Err_get_error())); }
@@ -628,78 +645,80 @@ SortFieldWriter_finish(SortFieldWriter *self) {
 static void
 S_flip_run(SortFieldWriter *run, size_t sub_thresh, InStream *ord_in,
            InStream *ix_in, InStream *dat_in) {
-    if (run->flipped) { THROW(ERR, "Can't Flip twice"); }
-    run->flipped = true;
+    SortFieldWriterIVARS *const run_ivars = SortFieldWriter_IVARS(run);
+
+    if (run_ivars->flipped) { THROW(ERR, "Can't Flip twice"); }
+    run_ivars->flipped = true;
 
     // Get our own MemoryPool, ZombieKeyedHash, and slice of mem_thresh.
-    DECREF(run->uniq_vals);
-    DECREF(run->mem_pool);
-    run->mem_pool   = MemPool_new(0);
-    run->uniq_vals  = (Hash*)ZKHash_new(run->mem_pool, run->prim_id);
-    run->mem_thresh = sub_thresh;
+    DECREF(run_ivars->uniq_vals);
+    DECREF(run_ivars->mem_pool);
+    run_ivars->mem_pool   = MemPool_new(0);
+    run_ivars->uniq_vals  = (Hash*)ZKHash_new(run_ivars->mem_pool, run_ivars->prim_id);
+    run_ivars->mem_thresh = sub_thresh;
 
     // Done if we already have a SortCache to read from.
-    if (run->sort_cache) { return; }
+    if (run_ivars->sort_cache) { return; }
 
     // Open the temp files for reading.
-    CharBuf *seg_name = Seg_Get_Name(run->segment);
+    CharBuf *seg_name = Seg_Get_Name(run_ivars->segment);
     CharBuf *alias    = CB_newf("%o/sort_ord_temp-%i64-to-%i64", seg_name,
-                                run->ord_start, run->ord_end);
-    InStream *ord_in_dupe = InStream_Reopen(ord_in, alias, run->ord_start,
-                                            run->ord_end - run->ord_start);
+                                run_ivars->ord_start, run_ivars->ord_end);
+    InStream *ord_in_dupe = InStream_Reopen(ord_in, alias, run_ivars->ord_start,
+                                            run_ivars->ord_end - run_ivars->ord_start);
     InStream *ix_in_dupe = NULL;
-    if (run->var_width) {
+    if (run_ivars->var_width) {
         CB_setf(alias, "%o/sort_ix_temp-%i64-to-%i64", seg_name,
-                run->ix_start, run->ix_end);
-        ix_in_dupe = InStream_Reopen(ix_in, alias, run->ix_start,
-                                     run->ix_end - run->ix_start);
+                run_ivars->ix_start, run_ivars->ix_end);
+        ix_in_dupe = InStream_Reopen(ix_in, alias, run_ivars->ix_start,
+                                     run_ivars->ix_end - run_ivars->ix_start);
     }
     CB_setf(alias, "%o/sort_dat_temp-%i64-to-%i64", seg_name,
-            run->dat_start, run->dat_end);
-    InStream *dat_in_dupe = InStream_Reopen(dat_in, alias, run->dat_start,
-                                            run->dat_end - run->dat_start);
+            run_ivars->dat_start, run_ivars->dat_end);
+    InStream *dat_in_dupe = InStream_Reopen(dat_in, alias, run_ivars->dat_start,
+                                            run_ivars->dat_end - run_ivars->dat_start);
     DECREF(alias);
 
     // Get a SortCache.
-    CharBuf *field = Seg_Field_Name(run->segment, run->field_num);
-    switch (run->prim_id & FType_PRIMITIVE_ID_MASK) {
+    CharBuf *field = Seg_Field_Name(run_ivars->segment, run_ivars->field_num);
+    switch (run_ivars->prim_id & FType_PRIMITIVE_ID_MASK) {
         case FType_TEXT:
-            run->sort_cache = (SortCache*)TextSortCache_new(
-                                  field, run->type, run->run_cardinality,
-                                  run->run_max, run->null_ord,
-                                  run->ord_width, ord_in_dupe,
+            run_ivars->sort_cache = (SortCache*)TextSortCache_new(
+                                  field, run_ivars->type, run_ivars->run_cardinality,
+                                  run_ivars->run_max, run_ivars->null_ord,
+                                  run_ivars->ord_width, ord_in_dupe,
                                   ix_in_dupe, dat_in_dupe);
             break;
         case FType_INT32:
-            run->sort_cache = (SortCache*)I32SortCache_new(
-                                  field, run->type, run->run_cardinality,
-                                  run->run_max, run->null_ord,
-                                  run->ord_width, ord_in_dupe,
+            run_ivars->sort_cache = (SortCache*)I32SortCache_new(
+                                  field, run_ivars->type, run_ivars->run_cardinality,
+                                  run_ivars->run_max, run_ivars->null_ord,
+                                  run_ivars->ord_width, ord_in_dupe,
                                   dat_in_dupe);
             break;
         case FType_INT64:
-            run->sort_cache = (SortCache*)I64SortCache_new(
-                                  field, run->type, run->run_cardinality,
-                                  run->run_max, run->null_ord,
-                                  run->ord_width, ord_in_dupe,
+            run_ivars->sort_cache = (SortCache*)I64SortCache_new(
+                                  field, run_ivars->type, run_ivars->run_cardinality,
+                                  run_ivars->run_max, run_ivars->null_ord,
+                                  run_ivars->ord_width, ord_in_dupe,
                                   dat_in_dupe);
             break;
         case FType_FLOAT32:
-            run->sort_cache = (SortCache*)F32SortCache_new(
-                                  field, run->type, run->run_cardinality,
-                                  run->run_max, run->null_ord,
-                                  run->ord_width, ord_in_dupe,
+            run_ivars->sort_cache = (SortCache*)F32SortCache_new(
+                                  field, run_ivars->type, run_ivars->run_cardinality,
+                                  run_ivars->run_max, run_ivars->null_ord,
+                                  run_ivars->ord_width, ord_in_dupe,
                                   dat_in_dupe);
             break;
         case FType_FLOAT64:
-            run->sort_cache = (SortCache*)F64SortCache_new(
-                                  field, run->type, run->run_cardinality,
-                                  run->run_max, run->null_ord,
-                                  run->ord_width, ord_in_dupe,
+            run_ivars->sort_cache = (SortCache*)F64SortCache_new(
+                                  field, run_ivars->type, run_ivars->run_cardinality,
+                                  run_ivars->run_max, run_ivars->null_ord,
+                                  run_ivars->ord_width, ord_in_dupe,
                                   dat_in_dupe);
             break;
         default:
-            THROW(ERR, "No SortCache class for %o", run->type);
+            THROW(ERR, "No SortCache class for %o", run_ivars->type);
     }
 
     DECREF(ord_in_dupe);

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SortReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SortReader.c b/core/Lucy/Index/SortReader.c
index 159d837..25f2df4 100644
--- a/core/Lucy/Index/SortReader.c
+++ b/core/Lucy/Index/SortReader.c
@@ -58,57 +58,56 @@ DefSortReader_new(Schema *schema, Folder *folder, Snapshot *snapshot,
 DefaultSortReader*
 DefSortReader_init(DefaultSortReader *self, Schema *schema, Folder *folder,
                    Snapshot *snapshot, VArray *segments, int32_t seg_tick) {
-    Segment *segment;
-    Hash    *metadata;
     DataReader_init((DataReader*)self, schema, folder, snapshot, segments,
                     seg_tick);
-    segment = DefSortReader_Get_Segment(self);
-    metadata = (Hash*)Seg_Fetch_Metadata_Str(segment, "sort", 4);
+    DefaultSortReaderIVARS *const ivars = DefSortReader_IVARS(self);
+    Segment *segment  = DefSortReader_Get_Segment(self);
+    Hash    *metadata = (Hash*)Seg_Fetch_Metadata_Str(segment, "sort", 4);
 
     // Check format.
-    self->format = 0;
+    ivars->format = 0;
     if (metadata) {
         Obj *format = Hash_Fetch_Str(metadata, "format", 6);
         if (!format) { THROW(ERR, "Missing 'format' var"); }
         else {
-            self->format = (int32_t)Obj_To_I64(format);
-            if (self->format < 2 || self->format > 3) {
+            ivars->format = (int32_t)Obj_To_I64(format);
+            if (ivars->format < 2 || ivars->format > 3) {
                 THROW(ERR, "Unsupported sort cache format: %i32",
-                      self->format);
+                      ivars->format);
             }
         }
     }
 
     // Init.
-    self->caches = Hash_new(0);
+    ivars->caches = Hash_new(0);
 
     // Either extract or fake up the "counts", "null_ords", and "ord_widths"
     // hashes.
     if (metadata) {
-        self->counts
+        ivars->counts
             = (Hash*)INCREF(CERTIFY(Hash_Fetch_Str(metadata, "counts", 6),
                                     HASH));
-        self->null_ords = (Hash*)Hash_Fetch_Str(metadata, "null_ords", 9);
-        if (self->null_ords) {
-            CERTIFY(self->null_ords, HASH);
-            INCREF(self->null_ords);
+        ivars->null_ords = (Hash*)Hash_Fetch_Str(metadata, "null_ords", 9);
+        if (ivars->null_ords) {
+            CERTIFY(ivars->null_ords, HASH);
+            INCREF(ivars->null_ords);
         }
         else {
-            self->null_ords = Hash_new(0);
+            ivars->null_ords = Hash_new(0);
         }
-        self->ord_widths = (Hash*)Hash_Fetch_Str(metadata, "ord_widths", 10);
-        if (self->ord_widths) {
-            CERTIFY(self->ord_widths, HASH);
-            INCREF(self->ord_widths);
+        ivars->ord_widths = (Hash*)Hash_Fetch_Str(metadata, "ord_widths", 10);
+        if (ivars->ord_widths) {
+            CERTIFY(ivars->ord_widths, HASH);
+            INCREF(ivars->ord_widths);
         }
         else {
-            self->ord_widths = Hash_new(0);
+            ivars->ord_widths = Hash_new(0);
         }
     }
     else {
-        self->counts     = Hash_new(0);
-        self->null_ords  = Hash_new(0);
-        self->ord_widths = Hash_new(0);
+        ivars->counts     = Hash_new(0);
+        ivars->null_ords  = Hash_new(0);
+        ivars->ord_widths = Hash_new(0);
     }
 
     return self;
@@ -116,30 +115,32 @@ DefSortReader_init(DefaultSortReader *self, Schema *schema, Folder *folder,
 
 void
 DefSortReader_close(DefaultSortReader *self) {
-    if (self->caches) {
-        Hash_Dec_RefCount(self->caches);
-        self->caches = NULL;
+    DefaultSortReaderIVARS *const ivars = DefSortReader_IVARS(self);
+    if (ivars->caches) {
+        Hash_Dec_RefCount(ivars->caches);
+        ivars->caches = NULL;
     }
-    if (self->counts) {
-        Hash_Dec_RefCount(self->counts);
-        self->counts = NULL;
+    if (ivars->counts) {
+        Hash_Dec_RefCount(ivars->counts);
+        ivars->counts = NULL;
     }
-    if (self->null_ords) {
-        Hash_Dec_RefCount(self->null_ords);
-        self->null_ords = NULL;
+    if (ivars->null_ords) {
+        Hash_Dec_RefCount(ivars->null_ords);
+        ivars->null_ords = NULL;
     }
-    if (self->ord_widths) {
-        Hash_Dec_RefCount(self->ord_widths);
-        self->ord_widths = NULL;
+    if (ivars->ord_widths) {
+        Hash_Dec_RefCount(ivars->ord_widths);
+        ivars->ord_widths = NULL;
     }
 }
 
 void
 DefSortReader_destroy(DefaultSortReader *self) {
-    DECREF(self->caches);
-    DECREF(self->counts);
-    DECREF(self->null_ords);
-    DECREF(self->ord_widths);
+    DefaultSortReaderIVARS *const ivars = DefSortReader_IVARS(self);
+    DECREF(ivars->caches);
+    DECREF(ivars->counts);
+    DECREF(ivars->null_ords);
+    DECREF(ivars->ord_widths);
     SUPER_DESTROY(self, DEFAULTSORTREADER);
 }
 
@@ -155,8 +156,10 @@ S_calc_ord_width(int32_t cardinality) {
 
 static SortCache*
 S_lazy_init_sort_cache(DefaultSortReader *self, const CharBuf *field) {
+    DefaultSortReaderIVARS *const ivars = DefSortReader_IVARS(self);
+
     // See if we have any values.
-    Obj *count_obj = Hash_Fetch(self->counts, (Obj*)field);
+    Obj *count_obj = Hash_Fetch(ivars->counts, (Obj*)field);
     int32_t count = count_obj ? (int32_t)Obj_To_I64(count_obj) : 0;
     if (!count) { return NULL; }
 
@@ -203,9 +206,9 @@ S_lazy_init_sort_cache(DefaultSortReader *self, const CharBuf *field) {
     }
     DECREF(path);
 
-    Obj     *null_ord_obj = Hash_Fetch(self->null_ords, (Obj*)field);
+    Obj     *null_ord_obj = Hash_Fetch(ivars->null_ords, (Obj*)field);
     int32_t  null_ord = null_ord_obj ? (int32_t)Obj_To_I64(null_ord_obj) : -1;
-    Obj     *ord_width_obj = Hash_Fetch(self->ord_widths, (Obj*)field);
+    Obj     *ord_width_obj = Hash_Fetch(ivars->ord_widths, (Obj*)field);
     int32_t  ord_width = ord_width_obj
                          ? (int32_t)Obj_To_I64(ord_width_obj)
                          : S_calc_ord_width(count);
@@ -241,9 +244,9 @@ S_lazy_init_sort_cache(DefaultSortReader *self, const CharBuf *field) {
         default:
             THROW(ERR, "No SortCache class for %o", type);
     }
-    Hash_Store(self->caches, (Obj*)field, (Obj*)cache);
+    Hash_Store(ivars->caches, (Obj*)field, (Obj*)cache);
 
-    if (self->format == 2) { // bug compatibility
+    if (ivars->format == 2) { // bug compatibility
         SortCache_Set_Native_Ords(cache, true);
     }
 
@@ -259,7 +262,8 @@ DefSortReader_fetch_sort_cache(DefaultSortReader *self, const CharBuf *field) {
     SortCache *cache = NULL;
 
     if (field) {
-        cache = (SortCache*)Hash_Fetch(self->caches, (Obj*)field);
+        DefaultSortReaderIVARS *const ivars = DefSortReader_IVARS(self);
+        cache = (SortCache*)Hash_Fetch(ivars->caches, (Obj*)field);
         if (!cache) {
             cache = S_lazy_init_sort_cache(self, field);
         }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SortWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SortWriter.c b/core/Lucy/Index/SortWriter.c
index fedde9a..28a88b7 100644
--- a/core/Lucy/Index/SortWriter.c
+++ b/core/Lucy/Index/SortWriter.c
@@ -52,32 +52,34 @@ SortWriter_init(SortWriter *self, Schema *schema, Snapshot *snapshot,
                 Segment *segment, PolyReader *polyreader) {
     uint32_t field_max = Schema_Num_Fields(schema) + 1;
     DataWriter_init((DataWriter*)self, schema, snapshot, segment, polyreader);
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
 
     // Init.
-    self->field_writers   = VA_new(field_max);
-    self->counts          = Hash_new(0);
-    self->null_ords       = Hash_new(0);
-    self->ord_widths      = Hash_new(0);
-    self->temp_ord_out    = NULL;
-    self->temp_ix_out     = NULL;
-    self->temp_dat_out    = NULL;
-    self->mem_pool        = MemPool_new(0);
-    self->mem_thresh      = default_mem_thresh;
-    self->flush_at_finish = false;
+    ivars->field_writers   = VA_new(field_max);
+    ivars->counts          = Hash_new(0);
+    ivars->null_ords       = Hash_new(0);
+    ivars->ord_widths      = Hash_new(0);
+    ivars->temp_ord_out    = NULL;
+    ivars->temp_ix_out     = NULL;
+    ivars->temp_dat_out    = NULL;
+    ivars->mem_pool        = MemPool_new(0);
+    ivars->mem_thresh      = default_mem_thresh;
+    ivars->flush_at_finish = false;
 
     return self;
 }
 
 void
 SortWriter_destroy(SortWriter *self) {
-    DECREF(self->field_writers);
-    DECREF(self->counts);
-    DECREF(self->null_ords);
-    DECREF(self->ord_widths);
-    DECREF(self->temp_ord_out);
-    DECREF(self->temp_ix_out);
-    DECREF(self->temp_dat_out);
-    DECREF(self->mem_pool);
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
+    DECREF(ivars->field_writers);
+    DECREF(ivars->counts);
+    DECREF(ivars->null_ords);
+    DECREF(ivars->ord_widths);
+    DECREF(ivars->temp_ord_out);
+    DECREF(ivars->temp_ix_out);
+    DECREF(ivars->temp_dat_out);
+    DECREF(ivars->mem_pool);
     SUPER_DESTROY(self, SORTWRITER);
 }
 
@@ -88,42 +90,44 @@ SortWriter_set_default_mem_thresh(size_t mem_thresh) {
 
 static SortFieldWriter*
 S_lazy_init_field_writer(SortWriter *self, int32_t field_num) {
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
+
     SortFieldWriter *field_writer
-        = (SortFieldWriter*)VA_Fetch(self->field_writers, field_num);
+        = (SortFieldWriter*)VA_Fetch(ivars->field_writers, field_num);
     if (!field_writer) {
 
         // Open temp files.
-        if (!self->temp_ord_out) {
-            Folder  *folder   = self->folder;
-            CharBuf *seg_name = Seg_Get_Name(self->segment);
+        if (!ivars->temp_ord_out) {
+            Folder  *folder   = ivars->folder;
+            CharBuf *seg_name = Seg_Get_Name(ivars->segment);
             CharBuf *path     = CB_newf("%o/sort_ord_temp", seg_name);
-            self->temp_ord_out = Folder_Open_Out(folder, path);
-            if (!self->temp_ord_out) {
+            ivars->temp_ord_out = Folder_Open_Out(folder, path);
+            if (!ivars->temp_ord_out) {
                 DECREF(path);
                 RETHROW(INCREF(Err_get_error()));
             }
             CB_setf(path, "%o/sort_ix_temp", seg_name);
-            self->temp_ix_out = Folder_Open_Out(folder, path);
-            if (!self->temp_ix_out) {
+            ivars->temp_ix_out = Folder_Open_Out(folder, path);
+            if (!ivars->temp_ix_out) {
                 DECREF(path);
                 RETHROW(INCREF(Err_get_error()));
             }
             CB_setf(path, "%o/sort_dat_temp", seg_name);
-            self->temp_dat_out = Folder_Open_Out(folder, path);
-            if (!self->temp_dat_out) {
+            ivars->temp_dat_out = Folder_Open_Out(folder, path);
+            if (!ivars->temp_dat_out) {
                 DECREF(path);
                 RETHROW(INCREF(Err_get_error()));
             }
             DECREF(path);
         }
 
-        CharBuf *field = Seg_Field_Name(self->segment, field_num);
+        CharBuf *field = Seg_Field_Name(ivars->segment, field_num);
         field_writer
-            = SortFieldWriter_new(self->schema, self->snapshot, self->segment,
-                                  self->polyreader, field, self->mem_pool,
-                                  self->mem_thresh, self->temp_ord_out,
-                                  self->temp_ix_out, self->temp_dat_out);
-        VA_Store(self->field_writers, field_num, (Obj*)field_writer);
+            = SortFieldWriter_new(ivars->schema, ivars->snapshot, ivars->segment,
+                                  ivars->polyreader, field, ivars->mem_pool,
+                                  ivars->mem_thresh, ivars->temp_ord_out,
+                                  ivars->temp_ix_out, ivars->temp_dat_out);
+        VA_Store(ivars->field_writers, field_num, (Obj*)field_writer);
     }
     return field_writer;
 }
@@ -131,6 +135,7 @@ S_lazy_init_field_writer(SortWriter *self, int32_t field_num) {
 void
 SortWriter_add_inverted_doc(SortWriter *self, Inverter *inverter,
                             int32_t doc_id) {
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
     int32_t field_num;
 
     Inverter_Iterate(inverter);
@@ -146,21 +151,22 @@ SortWriter_add_inverted_doc(SortWriter *self, Inverter *inverter,
 
     // If our SortFieldWriters have collectively passed the memory threshold,
     // flush all of them, then release all unique values with a single action.
-    if (MemPool_Get_Consumed(self->mem_pool) > self->mem_thresh) {
-        for (uint32_t i = 0; i < VA_Get_Size(self->field_writers); i++) {
+    if (MemPool_Get_Consumed(ivars->mem_pool) > ivars->mem_thresh) {
+        for (uint32_t i = 0; i < VA_Get_Size(ivars->field_writers); i++) {
             SortFieldWriter *const field_writer
-                = (SortFieldWriter*)VA_Fetch(self->field_writers, i);
+                = (SortFieldWriter*)VA_Fetch(ivars->field_writers, i);
             if (field_writer) { SortFieldWriter_Flush(field_writer); }
         }
-        MemPool_Release_All(self->mem_pool);
-        self->flush_at_finish = true;
+        MemPool_Release_All(ivars->mem_pool);
+        ivars->flush_at_finish = true;
     }
 }
 
 void
 SortWriter_add_segment(SortWriter *self, SegReader *reader,
                        I32Array *doc_map) {
-    VArray *fields = Schema_All_Fields(self->schema);
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
+    VArray *fields = Schema_All_Fields(ivars->schema);
 
     // Proceed field-at-a-time, rather than doc-at-a-time.
     for (uint32_t i = 0, max = VA_Get_Size(fields); i < max; i++) {
@@ -171,11 +177,11 @@ SortWriter_add_segment(SortWriter *self, SegReader *reader,
                            ? SortReader_Fetch_Sort_Cache(sort_reader, field)
                            : NULL;
         if (cache) {
-            int32_t field_num = Seg_Field_Num(self->segment, field);
+            int32_t field_num = Seg_Field_Num(ivars->segment, field);
             SortFieldWriter *field_writer
                 = S_lazy_init_field_writer(self, field_num);
             SortFieldWriter_Add_Segment(field_writer, reader, doc_map, cache);
-            self->flush_at_finish = true;
+            ivars->flush_at_finish = true;
         }
     }
 
@@ -184,14 +190,15 @@ SortWriter_add_segment(SortWriter *self, SegReader *reader,
 
 void
 SortWriter_finish(SortWriter *self) {
-    VArray *const field_writers = self->field_writers;
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
+    VArray *const field_writers = ivars->field_writers;
 
     // If we have no data, bail out.
-    if (!self->temp_ord_out) { return; }
+    if (!ivars->temp_ord_out) { return; }
 
     // If we've either flushed or added segments, flush everything so that any
     // one field can use the entire margin up to mem_thresh.
-    if (self->flush_at_finish) {
+    if (ivars->flush_at_finish) {
         for (uint32_t i = 1, max = VA_Get_Size(field_writers); i < max; i++) {
             SortFieldWriter *field_writer
                 = (SortFieldWriter*)VA_Fetch(field_writers, i);
@@ -202,26 +209,26 @@ SortWriter_finish(SortWriter *self) {
     }
 
     // Close down temp streams.
-    OutStream_Close(self->temp_ord_out);
-    OutStream_Close(self->temp_ix_out);
-    OutStream_Close(self->temp_dat_out);
+    OutStream_Close(ivars->temp_ord_out);
+    OutStream_Close(ivars->temp_ix_out);
+    OutStream_Close(ivars->temp_dat_out);
 
     for (uint32_t i = 1, max = VA_Get_Size(field_writers); i < max; i++) {
         SortFieldWriter *field_writer
             = (SortFieldWriter*)VA_Delete(field_writers, i);
         if (field_writer) {
-            CharBuf *field = Seg_Field_Name(self->segment, i);
+            CharBuf *field = Seg_Field_Name(ivars->segment, i);
             SortFieldWriter_Flip(field_writer);
             int32_t count = SortFieldWriter_Finish(field_writer);
-            Hash_Store(self->counts, (Obj*)field,
+            Hash_Store(ivars->counts, (Obj*)field,
                        (Obj*)CB_newf("%i32", count));
             int32_t null_ord = SortFieldWriter_Get_Null_Ord(field_writer);
             if (null_ord != -1) {
-                Hash_Store(self->null_ords, (Obj*)field,
+                Hash_Store(ivars->null_ords, (Obj*)field,
                            (Obj*)CB_newf("%i32", null_ord));
             }
             int32_t ord_width = SortFieldWriter_Get_Ord_Width(field_writer);
-            Hash_Store(self->ord_widths, (Obj*)field,
+            Hash_Store(ivars->ord_widths, (Obj*)field,
                        (Obj*)CB_newf("%i32", ord_width));
         }
 
@@ -230,12 +237,12 @@ SortWriter_finish(SortWriter *self) {
     VA_Clear(field_writers);
 
     // Store metadata.
-    Seg_Store_Metadata_Str(self->segment, "sort", 4,
+    Seg_Store_Metadata_Str(ivars->segment, "sort", 4,
                            (Obj*)SortWriter_Metadata(self));
 
     // Clean up.
-    Folder  *folder   = self->folder;
-    CharBuf *seg_name = Seg_Get_Name(self->segment);
+    Folder  *folder   = ivars->folder;
+    CharBuf *seg_name = Seg_Get_Name(ivars->segment);
     CharBuf *path     = CB_newf("%o/sort_ord_temp", seg_name);
     Folder_Delete(folder, path);
     CB_setf(path, "%o/sort_ix_temp", seg_name);
@@ -247,10 +254,11 @@ SortWriter_finish(SortWriter *self) {
 
 Hash*
 SortWriter_metadata(SortWriter *self) {
+    SortWriterIVARS *const ivars = SortWriter_IVARS(self);
     Hash *const metadata  = DataWriter_metadata((DataWriter*)self);
-    Hash_Store_Str(metadata, "counts", 6, INCREF(self->counts));
-    Hash_Store_Str(metadata, "null_ords", 9, INCREF(self->null_ords));
-    Hash_Store_Str(metadata, "ord_widths", 10, INCREF(self->ord_widths));
+    Hash_Store_Str(metadata, "counts", 6, INCREF(ivars->counts));
+    Hash_Store_Str(metadata, "null_ords", 9, INCREF(ivars->null_ords));
+    Hash_Store_Str(metadata, "ord_widths", 10, INCREF(ivars->ord_widths));
     return metadata;
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/TermInfo.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/TermInfo.c b/core/Lucy/Index/TermInfo.c
index ef40da2..b4fc2a9 100644
--- a/core/Lucy/Index/TermInfo.c
+++ b/core/Lucy/Index/TermInfo.c
@@ -28,91 +28,98 @@ TInfo_new(int32_t doc_freq) {
 
 TermInfo*
 TInfo_init(TermInfo *self, int32_t doc_freq) {
-    self->doc_freq      = doc_freq;
-    self->post_filepos  = 0;
-    self->skip_filepos  = 0;
-    self->lex_filepos   = 0;
+    TermInfoIVARS *const ivars = TInfo_IVARS(self);
+    ivars->doc_freq      = doc_freq;
+    ivars->post_filepos  = 0;
+    ivars->skip_filepos  = 0;
+    ivars->lex_filepos   = 0;
     return self;
 }
 
 TermInfo*
 TInfo_clone(TermInfo *self) {
-    TermInfo *twin = TInfo_new(self->doc_freq);
-    twin->post_filepos = self->post_filepos;
-    twin->skip_filepos = self->skip_filepos;
-    twin->lex_filepos  = self->lex_filepos;
+    TermInfoIVARS *const ivars = TInfo_IVARS(self);
+    TermInfo *twin = TInfo_new(ivars->doc_freq);
+    TermInfoIVARS *const twin_ivars = TInfo_IVARS(twin);
+    twin_ivars->post_filepos = ivars->post_filepos;
+    twin_ivars->skip_filepos = ivars->skip_filepos;
+    twin_ivars->lex_filepos  = ivars->lex_filepos;
     return twin;
 }
 
 int32_t
 TInfo_get_doc_freq(TermInfo *self) {
-    return self->doc_freq;
+    return TInfo_IVARS(self)->doc_freq;
 }
 
 int64_t
 TInfo_get_lex_filepos(TermInfo *self) {
-    return self->lex_filepos;
+    return TInfo_IVARS(self)->lex_filepos;
 }
 
 int64_t
 TInfo_get_post_filepos(TermInfo *self) {
-    return self->post_filepos;
+    return TInfo_IVARS(self)->post_filepos;
 }
 
 int64_t
 TInfo_get_skip_filepos(TermInfo *self) {
-    return self->skip_filepos;
+    return TInfo_IVARS(self)->skip_filepos;
 }
 
 void
 TInfo_set_doc_freq(TermInfo *self, int32_t doc_freq) {
-    self->doc_freq = doc_freq;
+    TInfo_IVARS(self)->doc_freq = doc_freq;
 }
 
 void
 TInfo_set_lex_filepos(TermInfo *self, int64_t filepos) {
-    self->lex_filepos = filepos;
+    TInfo_IVARS(self)->lex_filepos = filepos;
 }
 
 void
 TInfo_set_post_filepos(TermInfo *self, int64_t filepos) {
-    self->post_filepos = filepos;
+    TInfo_IVARS(self)->post_filepos = filepos;
 }
 
 void
 TInfo_set_skip_filepos(TermInfo *self, int64_t filepos) {
-    self->skip_filepos = filepos;
+    TInfo_IVARS(self)->skip_filepos = filepos;
 }
 
 // TODO: this should probably be some sort of Dump variant rather than
 // To_String.
 CharBuf*
 TInfo_to_string(TermInfo *self) {
+    TermInfoIVARS *const ivars = TInfo_IVARS(self);
     return CB_newf(
                "doc freq:      %i32\n"
                "post filepos:  %i64\n"
                "skip filepos:  %i64\n"
                "index filepos: %i64",
-               self->doc_freq, self->post_filepos,
-               self->skip_filepos, self->lex_filepos
+               ivars->doc_freq, ivars->post_filepos,
+               ivars->skip_filepos, ivars->lex_filepos
            );
 }
 
 void
 TInfo_mimic(TermInfo *self, Obj *other) {
-    TermInfo *twin = (TermInfo*)CERTIFY(other, TERMINFO);
-    self->doc_freq     = twin->doc_freq;
-    self->post_filepos = twin->post_filepos;
-    self->skip_filepos = twin->skip_filepos;
-    self->lex_filepos  = twin->lex_filepos;
+    CERTIFY(other, TERMINFO);
+    TermInfoIVARS *const ivars = TInfo_IVARS(self);
+    TermInfoIVARS *const ovars = TInfo_IVARS((TermInfo*)other);
+    ivars->doc_freq     = ovars->doc_freq;
+    ivars->post_filepos = ovars->post_filepos;
+    ivars->skip_filepos = ovars->skip_filepos;
+    ivars->lex_filepos  = ovars->lex_filepos;
 }
 
 void
 TInfo_reset(TermInfo *self) {
-    self->doc_freq      = 0;
-    self->post_filepos  = 0;
-    self->skip_filepos  = 0;
-    self->lex_filepos   = 0;
+    TermInfoIVARS *const ivars = TInfo_IVARS(self);
+    ivars->doc_freq      = 0;
+    ivars->post_filepos  = 0;
+    ivars->skip_filepos  = 0;
+    ivars->lex_filepos   = 0;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/TermStepper.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/TermStepper.c b/core/Lucy/Index/TermStepper.c
index b78030e..466d7da 100644
--- a/core/Lucy/Index/TermStepper.c
+++ b/core/Lucy/Index/TermStepper.c
@@ -26,31 +26,35 @@
 TermStepper*
 TermStepper_init(TermStepper *self) {
     Stepper_init((Stepper*)self);
-    self->value = NULL;
+    TermStepperIVARS *const ivars = TermStepper_IVARS(self);
+    ivars->value = NULL;
     return self;
 }
 
 void
 TermStepper_destroy(TermStepper *self) {
-    DECREF(self->value);
+    TermStepperIVARS *const ivars = TermStepper_IVARS(self);
+    DECREF(ivars->value);
     SUPER_DESTROY(self, TERMSTEPPER);
 }
 
 void
 TermStepper_reset(TermStepper *self) {
-    DECREF(self->value);
-    self->value = NULL;
+    TermStepperIVARS *const ivars = TermStepper_IVARS(self);
+    DECREF(ivars->value);
+    ivars->value = NULL;
 }
 
 Obj*
 TermStepper_get_value(TermStepper *self) {
-    return self->value;
+    return TermStepper_IVARS(self)->value;
 }
 
 void
 TermStepper_set_value(TermStepper *self, Obj *value) {
-    DECREF(self->value);
-    self->value = value ? INCREF(value) : NULL;
+    TermStepperIVARS *const ivars = TermStepper_IVARS(self);
+    DECREF(ivars->value);
+    ivars->value = value ? INCREF(value) : NULL;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/TermVector.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/TermVector.c b/core/Lucy/Index/TermVector.c
index 4c3216b..4b0ee04 100644
--- a/core/Lucy/Index/TermVector.c
+++ b/core/Lucy/Index/TermVector.c
@@ -33,18 +33,20 @@ TV_new(const CharBuf *field, const CharBuf *text, I32Array *positions,
 TermVector*
 TV_init(TermVector *self, const CharBuf *field, const CharBuf *text,
         I32Array *positions, I32Array *start_offsets, I32Array *end_offsets) {
+    TermVectorIVARS *const ivars = TV_IVARS(self);
+
     // Assign.
-    self->field          = CB_Clone(field);
-    self->text           = CB_Clone(text);
-    self->num_pos        = I32Arr_Get_Size(positions);
-    self->positions      = (I32Array*)INCREF(positions);
-    self->start_offsets  = (I32Array*)INCREF(start_offsets);
-    self->end_offsets    = (I32Array*)INCREF(end_offsets);
-
-    if (I32Arr_Get_Size(start_offsets) != self->num_pos
-        || I32Arr_Get_Size(end_offsets) != self->num_pos
+    ivars->field          = CB_Clone(field);
+    ivars->text           = CB_Clone(text);
+    ivars->num_pos        = I32Arr_Get_Size(positions);
+    ivars->positions      = (I32Array*)INCREF(positions);
+    ivars->start_offsets  = (I32Array*)INCREF(start_offsets);
+    ivars->end_offsets    = (I32Array*)INCREF(end_offsets);
+
+    if (I32Arr_Get_Size(start_offsets) != ivars->num_pos
+        || I32Arr_Get_Size(end_offsets) != ivars->num_pos
        ) {
-        THROW(ERR, "Unbalanced arrays: %u32 %u32 %u32", self->num_pos,
+        THROW(ERR, "Unbalanced arrays: %u32 %u32 %u32", ivars->num_pos,
               I32Arr_Get_Size(start_offsets), I32Arr_Get_Size(end_offsets));
     }
 
@@ -53,40 +55,42 @@ TV_init(TermVector *self, const CharBuf *field, const CharBuf *text,
 
 void
 TV_destroy(TermVector *self) {
-    DECREF(self->field);
-    DECREF(self->text);
-    DECREF(self->positions);
-    DECREF(self->start_offsets);
-    DECREF(self->end_offsets);
+    TermVectorIVARS *const ivars = TV_IVARS(self);
+    DECREF(ivars->field);
+    DECREF(ivars->text);
+    DECREF(ivars->positions);
+    DECREF(ivars->start_offsets);
+    DECREF(ivars->end_offsets);
     SUPER_DESTROY(self, TERMVECTOR);
 }
 
 I32Array*
 TV_get_positions(TermVector *self) {
-    return self->positions;
+    return TV_IVARS(self)->positions;
 }
 
 I32Array*
 TV_get_start_offsets(TermVector *self) {
-    return self->start_offsets;
+    return TV_IVARS(self)->start_offsets;
 }
 
 I32Array*
 TV_get_end_offsets(TermVector *self) {
-    return self->end_offsets;
+    return TV_IVARS(self)->end_offsets;
 }
 
 void
 TV_serialize(TermVector *self, OutStream *target) {
-    int32_t *posits = self->positions->ints;
-    int32_t *starts = self->start_offsets->ints;
-    int32_t *ends   = self->start_offsets->ints;
+    TermVectorIVARS *const ivars = TV_IVARS(self);
+    int32_t *posits = I32Arr_IVARS(ivars->positions)->ints;
+    int32_t *starts = I32Arr_IVARS(ivars->start_offsets)->ints;
+    int32_t *ends   = I32Arr_IVARS(ivars->start_offsets)->ints;
 
-    Freezer_serialize_charbuf(self->field, target);
-    Freezer_serialize_charbuf(self->text, target);
-    OutStream_Write_C32(target, self->num_pos);
+    Freezer_serialize_charbuf(ivars->field, target);
+    Freezer_serialize_charbuf(ivars->text, target);
+    OutStream_Write_C32(target, ivars->num_pos);
 
-    for (uint32_t i = 0; i < self->num_pos; i++) {
+    for (uint32_t i = 0; i < ivars->num_pos; i++) {
         OutStream_Write_C32(target, posits[i]);
         OutStream_Write_C32(target, starts[i]);
         OutStream_Write_C32(target, ends[i]);
@@ -125,21 +129,20 @@ TV_deserialize(TermVector *self, InStream *instream) {
 
 bool
 TV_equals(TermVector *self, Obj *other) {
-    TermVector *const twin = (TermVector*)other;
-    int32_t *const posits       = self->positions->ints;
-    int32_t *const starts       = self->start_offsets->ints;
-    int32_t *const ends         = self->start_offsets->ints;
-    int32_t *const other_posits = twin->positions->ints;
-    int32_t *const other_starts = twin->start_offsets->ints;
-    int32_t *const other_ends   = twin->start_offsets->ints;
-
-    if (twin == self) { return true; }
-
-    if (!CB_Equals(self->field, (Obj*)twin->field)) { return false; }
-    if (!CB_Equals(self->text, (Obj*)twin->text))   { return false; }
-    if (self->num_pos != twin->num_pos)             { return false; }
-
-    for (uint32_t i = 0; i < self->num_pos; i++) {
+    if ((TermVector*)other == self) { return true; }
+    TermVectorIVARS *const ivars = TV_IVARS(self);
+    TermVectorIVARS *const ovars = TV_IVARS((TermVector*)other);
+    if (!CB_Equals(ivars->field, (Obj*)ovars->field)) { return false; }
+    if (!CB_Equals(ivars->text, (Obj*)ovars->text))   { return false; }
+    if (ivars->num_pos != ovars->num_pos)             { return false; }
+
+    int32_t *const posits       = I32Arr_IVARS(ivars->positions)->ints;
+    int32_t *const starts       = I32Arr_IVARS(ivars->start_offsets)->ints;
+    int32_t *const ends         = I32Arr_IVARS(ivars->start_offsets)->ints;
+    int32_t *const other_posits = I32Arr_IVARS(ovars->positions)->ints;
+    int32_t *const other_starts = I32Arr_IVARS(ovars->start_offsets)->ints;
+    int32_t *const other_ends   = I32Arr_IVARS(ovars->start_offsets)->ints;
+    for (uint32_t i = 0; i < ivars->num_pos; i++) {
         if (posits[i] != other_posits[i]) { return false; }
         if (starts[i] != other_starts[i]) { return false; }
         if (ends[i]   != other_ends[i])   { return false; }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/ZombieKeyedHash.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/ZombieKeyedHash.c b/core/Lucy/Index/ZombieKeyedHash.c
index a69a5a7..512e2e5 100644
--- a/core/Lucy/Index/ZombieKeyedHash.c
+++ b/core/Lucy/Index/ZombieKeyedHash.c
@@ -26,33 +26,36 @@ ZKHash_new(MemoryPool *memory_pool, uint8_t primitive_id) {
     ZombieKeyedHash *self
         = (ZombieKeyedHash*)VTable_Make_Obj(ZOMBIEKEYEDHASH);
     Hash_init((Hash*)self, 0);
-    self->mem_pool = (MemoryPool*)INCREF(memory_pool);
-    self->prim_id  = primitive_id;
+    ZombieKeyedHashIVARS *const ivars = ZKHash_IVARS(self);
+    ivars->mem_pool = (MemoryPool*)INCREF(memory_pool);
+    ivars->prim_id  = primitive_id;
     return self;
 }
 
 void
 ZKHash_destroy(ZombieKeyedHash *self) {
-    DECREF(self->mem_pool);
+    ZombieKeyedHashIVARS *const ivars = ZKHash_IVARS(self);
+    DECREF(ivars->mem_pool);
     SUPER_DESTROY(self, ZOMBIEKEYEDHASH);
 }
 
 Obj*
 ZKHash_make_key(ZombieKeyedHash *self, Obj *key, int32_t hash_sum) {
+    ZombieKeyedHashIVARS *const ivars = ZKHash_IVARS(self);
     UNUSED_VAR(hash_sum);
     Obj *retval = NULL;
-    switch (self->prim_id & FType_PRIMITIVE_ID_MASK) {
+    switch (ivars->prim_id & FType_PRIMITIVE_ID_MASK) {
         case FType_TEXT: {
                 CharBuf *source = (CharBuf*)key;
                 size_t size = ZCB_size() + CB_Get_Size(source) + 1;
-                void *allocation = MemPool_grab(self->mem_pool, size);
+                void *allocation = MemPool_grab(ivars->mem_pool, size);
                 retval = (Obj*)ZCB_newf(allocation, size, "%o", source);
             }
             break;
         case FType_INT32: {
                 size_t size = VTable_Get_Obj_Alloc_Size(INTEGER32);
                 Integer32 *copy
-                    = (Integer32*)MemPool_grab(self->mem_pool, size);
+                    = (Integer32*)MemPool_grab(ivars->mem_pool, size);
                 VTable_Init_Obj(INTEGER32, copy);
                 Int32_init(copy, 0);
                 Int32_Mimic(copy, key);
@@ -62,7 +65,7 @@ ZKHash_make_key(ZombieKeyedHash *self, Obj *key, int32_t hash_sum) {
         case FType_INT64: {
                 size_t size = VTable_Get_Obj_Alloc_Size(INTEGER64);
                 Integer64 *copy
-                    = (Integer64*)MemPool_Grab(self->mem_pool, size);
+                    = (Integer64*)MemPool_Grab(ivars->mem_pool, size);
                 VTable_Init_Obj(INTEGER64, copy);
                 Int64_init(copy, 0);
                 Int64_Mimic(copy, key);
@@ -71,7 +74,7 @@ ZKHash_make_key(ZombieKeyedHash *self, Obj *key, int32_t hash_sum) {
             break;
         case FType_FLOAT32: {
                 size_t size = VTable_Get_Obj_Alloc_Size(FLOAT32);
-                Float32 *copy = (Float32*)MemPool_Grab(self->mem_pool, size);
+                Float32 *copy = (Float32*)MemPool_Grab(ivars->mem_pool, size);
                 VTable_Init_Obj(FLOAT32, copy);
                 Float32_init(copy, 0);
                 Float32_Mimic(copy, key);
@@ -80,7 +83,7 @@ ZKHash_make_key(ZombieKeyedHash *self, Obj *key, int32_t hash_sum) {
             break;
         case FType_FLOAT64: {
                 size_t size = VTable_Get_Obj_Alloc_Size(FLOAT64);
-                Float64 *copy = (Float64*)MemPool_Grab(self->mem_pool, size);
+                Float64 *copy = (Float64*)MemPool_Grab(ivars->mem_pool, size);
                 VTable_Init_Obj(FLOAT64, copy);
                 Float64_init(copy, 0);
                 Float64_Mimic(copy, key);
@@ -88,7 +91,7 @@ ZKHash_make_key(ZombieKeyedHash *self, Obj *key, int32_t hash_sum) {
             }
             break;
         default:
-            THROW(ERR, "Unrecognized primitive id: %i8", self->prim_id);
+            THROW(ERR, "Unrecognized primitive id: %i8", ivars->prim_id);
     }
 
     /* FIXME This is a hack.  It will leak memory if host objects get cached,


[lucy-commits] [2/9] Migrate Lucy's index classes to IVARS.

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/PostingPool.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PostingPool.c b/core/Lucy/Index/PostingPool.c
index f2c1146..1580a6a 100644
--- a/core/Lucy/Index/PostingPool.c
+++ b/core/Lucy/Index/PostingPool.c
@@ -76,61 +76,63 @@ PostPool_init(PostingPool *self, Schema *schema, Snapshot *snapshot,
               OutStream *skip_out) {
     // Init.
     SortEx_init((SortExternal*)self, sizeof(Obj*));
-    self->doc_base         = 0;
-    self->last_doc_id      = 0;
-    self->doc_map          = NULL;
-    self->post_count       = 0;
-    self->lexicon          = NULL;
-    self->plist            = NULL;
-    self->lex_temp_in      = NULL;
-    self->post_temp_in     = NULL;
-    self->lex_start        = INT64_MAX;
-    self->post_start       = INT64_MAX;
-    self->lex_end          = 0;
-    self->post_end         = 0;
-    self->skip_stepper     = SkipStepper_new();
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    ivars->doc_base         = 0;
+    ivars->last_doc_id      = 0;
+    ivars->doc_map          = NULL;
+    ivars->post_count       = 0;
+    ivars->lexicon          = NULL;
+    ivars->plist            = NULL;
+    ivars->lex_temp_in      = NULL;
+    ivars->post_temp_in     = NULL;
+    ivars->lex_start        = INT64_MAX;
+    ivars->post_start       = INT64_MAX;
+    ivars->lex_end          = 0;
+    ivars->post_end         = 0;
+    ivars->skip_stepper     = SkipStepper_new();
 
     // Assign.
-    self->schema         = (Schema*)INCREF(schema);
-    self->snapshot       = (Snapshot*)INCREF(snapshot);
-    self->segment        = (Segment*)INCREF(segment);
-    self->polyreader     = (PolyReader*)INCREF(polyreader);
-    self->lex_writer     = (LexiconWriter*)INCREF(lex_writer);
-    self->mem_pool       = (MemoryPool*)INCREF(mem_pool);
-    self->field          = CB_Clone(field);
-    self->lex_temp_out   = (OutStream*)INCREF(lex_temp_out);
-    self->post_temp_out  = (OutStream*)INCREF(post_temp_out);
-    self->skip_out       = (OutStream*)INCREF(skip_out);
+    ivars->schema         = (Schema*)INCREF(schema);
+    ivars->snapshot       = (Snapshot*)INCREF(snapshot);
+    ivars->segment        = (Segment*)INCREF(segment);
+    ivars->polyreader     = (PolyReader*)INCREF(polyreader);
+    ivars->lex_writer     = (LexiconWriter*)INCREF(lex_writer);
+    ivars->mem_pool       = (MemoryPool*)INCREF(mem_pool);
+    ivars->field          = CB_Clone(field);
+    ivars->lex_temp_out   = (OutStream*)INCREF(lex_temp_out);
+    ivars->post_temp_out  = (OutStream*)INCREF(post_temp_out);
+    ivars->skip_out       = (OutStream*)INCREF(skip_out);
 
     // Derive.
     Similarity *sim = Schema_Fetch_Sim(schema, field);
-    self->posting   = Sim_Make_Posting(sim);
-    self->type      = (FieldType*)INCREF(Schema_Fetch_Type(schema, field));
-    self->field_num = Seg_Field_Num(segment, field);
+    ivars->posting   = Sim_Make_Posting(sim);
+    ivars->type      = (FieldType*)INCREF(Schema_Fetch_Type(schema, field));
+    ivars->field_num = Seg_Field_Num(segment, field);
 
     return self;
 }
 
 void
 PostPool_destroy(PostingPool *self) {
-    DECREF(self->schema);
-    DECREF(self->snapshot);
-    DECREF(self->segment);
-    DECREF(self->polyreader);
-    DECREF(self->lex_writer);
-    DECREF(self->mem_pool);
-    DECREF(self->field);
-    DECREF(self->doc_map);
-    DECREF(self->lexicon);
-    DECREF(self->plist);
-    DECREF(self->lex_temp_out);
-    DECREF(self->post_temp_out);
-    DECREF(self->skip_out);
-    DECREF(self->lex_temp_in);
-    DECREF(self->post_temp_in);
-    DECREF(self->posting);
-    DECREF(self->skip_stepper);
-    DECREF(self->type);
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    DECREF(ivars->schema);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->segment);
+    DECREF(ivars->polyreader);
+    DECREF(ivars->lex_writer);
+    DECREF(ivars->mem_pool);
+    DECREF(ivars->field);
+    DECREF(ivars->doc_map);
+    DECREF(ivars->lexicon);
+    DECREF(ivars->plist);
+    DECREF(ivars->lex_temp_out);
+    DECREF(ivars->post_temp_out);
+    DECREF(ivars->skip_out);
+    DECREF(ivars->lex_temp_in);
+    DECREF(ivars->post_temp_in);
+    DECREF(ivars->posting);
+    DECREF(ivars->skip_stepper);
+    DECREF(ivars->type);
     SUPER_DESTROY(self, POSTINGPOOL);
 }
 
@@ -159,27 +161,28 @@ PostPool_compare(PostingPool *self, void *va, void *vb) {
 
 MemoryPool*
 PostPool_get_mem_pool(PostingPool *self) {
-    return self->mem_pool;
+    return PostPool_IVARS(self)->mem_pool;
 }
 
 void
 PostPool_flip(PostingPool *self) {
-    uint32_t num_runs   = VA_Get_Size(self->runs);
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    uint32_t num_runs   = VA_Get_Size(ivars->runs);
     uint32_t sub_thresh = num_runs > 0
-                          ? self->mem_thresh / num_runs
-                          : self->mem_thresh;
+                          ? ivars->mem_thresh / num_runs
+                          : ivars->mem_thresh;
 
     if (num_runs) {
-        Folder  *folder = PolyReader_Get_Folder(self->polyreader);
-        CharBuf *seg_name = Seg_Get_Name(self->segment);
+        Folder  *folder = PolyReader_Get_Folder(ivars->polyreader);
+        CharBuf *seg_name = Seg_Get_Name(ivars->segment);
         CharBuf *lex_temp_path  = CB_newf("%o/lextemp", seg_name);
         CharBuf *post_temp_path = CB_newf("%o/ptemp", seg_name);
-        self->lex_temp_in = Folder_Open_In(folder, lex_temp_path);
-        if (!self->lex_temp_in) {
+        ivars->lex_temp_in = Folder_Open_In(folder, lex_temp_path);
+        if (!ivars->lex_temp_in) {
             RETHROW(INCREF(Err_get_error()));
         }
-        self->post_temp_in = Folder_Open_In(folder, post_temp_path);
-        if (!self->post_temp_in) {
+        ivars->post_temp_in = Folder_Open_In(folder, post_temp_path);
+        if (!ivars->post_temp_in) {
             RETHROW(INCREF(Err_get_error()));
         }
         DECREF(lex_temp_path);
@@ -187,44 +190,45 @@ PostPool_flip(PostingPool *self) {
     }
 
     PostPool_Sort_Cache(self);
-    if (num_runs && (self->cache_max - self->cache_tick) > 0) {
+    if (num_runs && (ivars->cache_max - ivars->cache_tick) > 0) {
         uint32_t num_items = PostPool_Cache_Count(self);
         // Cheap imitation of flush. FIXME.
         PostingPool *run
-            = PostPool_new(self->schema, self->snapshot, self->segment,
-                           self->polyreader, self->field, self->lex_writer,
-                           self->mem_pool, self->lex_temp_out,
-                           self->post_temp_out, self->skip_out);
+            = PostPool_new(ivars->schema, ivars->snapshot, ivars->segment,
+                           ivars->polyreader, ivars->field, ivars->lex_writer,
+                           ivars->mem_pool, ivars->lex_temp_out,
+                           ivars->post_temp_out, ivars->skip_out);
         PostPool_Grow_Cache(run, num_items);
-        memcpy(run->cache, ((Obj**)self->cache) + self->cache_tick,
+        memcpy(run->cache, ((Obj**)ivars->cache) + ivars->cache_tick,
                num_items * sizeof(Obj*));
         run->cache_max = num_items;
         PostPool_Add_Run(self, (SortExternal*)run);
-        self->cache_tick = 0;
-        self->cache_max = 0;
+        ivars->cache_tick = 0;
+        ivars->cache_max = 0;
     }
 
     // Assign.
     for (uint32_t i = 0; i < num_runs; i++) {
-        PostingPool *run = (PostingPool*)VA_Fetch(self->runs, i);
+        PostingPool *run = (PostingPool*)VA_Fetch(ivars->runs, i);
         if (run != NULL) {
             PostPool_Set_Mem_Thresh(run, sub_thresh);
             if (!run->lexicon) {
-                S_fresh_flip(run, self->lex_temp_in, self->post_temp_in);
+                S_fresh_flip(run, ivars->lex_temp_in, ivars->post_temp_in);
             }
         }
     }
 
-    self->flipped = true;
+    ivars->flipped = true;
 }
 
 void
 PostPool_add_segment(PostingPool *self, SegReader *reader, I32Array *doc_map,
                      int32_t doc_base) {
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
     LexiconReader *lex_reader = (LexiconReader*)SegReader_Fetch(
                                     reader, VTable_Get_Name(LEXICONREADER));
     Lexicon *lexicon = lex_reader
-                       ? LexReader_Lexicon(lex_reader, self->field, NULL)
+                       ? LexReader_Lexicon(lex_reader, ivars->field, NULL)
                        : NULL;
 
     if (lexicon) {
@@ -232,17 +236,17 @@ PostPool_add_segment(PostingPool *self, SegReader *reader, I32Array *doc_map,
             = (PostingListReader*)SegReader_Fetch(
                   reader, VTable_Get_Name(POSTINGLISTREADER));
         PostingList *plist = plist_reader
-                             ? PListReader_Posting_List(plist_reader, self->field, NULL)
+                             ? PListReader_Posting_List(plist_reader, ivars->field, NULL)
                              : NULL;
         if (!plist) {
             THROW(ERR, "Got a Lexicon but no PostingList for '%o' in '%o'",
-                  self->field, SegReader_Get_Seg_Name(reader));
+                  ivars->field, SegReader_Get_Seg_Name(reader));
         }
         PostingPool *run
-            = PostPool_new(self->schema, self->snapshot, self->segment,
-                           self->polyreader, self->field, self->lex_writer,
-                           self->mem_pool, self->lex_temp_out,
-                           self->post_temp_out, self->skip_out);
+            = PostPool_new(ivars->schema, ivars->snapshot, ivars->segment,
+                           ivars->polyreader, ivars->field, ivars->lex_writer,
+                           ivars->mem_pool, ivars->lex_temp_out,
+                           ivars->post_temp_out, ivars->skip_out);
         run->lexicon  = lexicon;
         run->plist    = plist;
         run->doc_base = doc_base;
@@ -253,28 +257,29 @@ PostPool_add_segment(PostingPool *self, SegReader *reader, I32Array *doc_map,
 
 void
 PostPool_shrink(PostingPool *self) {
-    if (self->cache_max - self->cache_tick > 0) {
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    if (ivars->cache_max - ivars->cache_tick > 0) {
         size_t cache_count = PostPool_Cache_Count(self);
         size_t size        = cache_count * sizeof(Obj*);
-        if (self->cache_tick > 0) {
-            Obj **start = ((Obj**)self->cache) + self->cache_tick;
-            memmove(self->cache, start, size);
+        if (ivars->cache_tick > 0) {
+            Obj **start = ((Obj**)ivars->cache) + ivars->cache_tick;
+            memmove(ivars->cache, start, size);
         }
-        self->cache      = (uint8_t*)REALLOCATE(self->cache, size);
-        self->cache_tick = 0;
-        self->cache_max  = cache_count;
-        self->cache_cap  = cache_count;
+        ivars->cache      = (uint8_t*)REALLOCATE(ivars->cache, size);
+        ivars->cache_tick = 0;
+        ivars->cache_max  = cache_count;
+        ivars->cache_cap  = cache_count;
     }
     else {
-        FREEMEM(self->cache);
-        self->cache      = NULL;
-        self->cache_tick = 0;
-        self->cache_max  = 0;
-        self->cache_cap  = 0;
+        FREEMEM(ivars->cache);
+        ivars->cache      = NULL;
+        ivars->cache_tick = 0;
+        ivars->cache_max  = 0;
+        ivars->cache_cap  = 0;
     }
-    self->scratch_cap = 0;
-    FREEMEM(self->scratch);
-    self->scratch = NULL;
+    ivars->scratch_cap = 0;
+    FREEMEM(ivars->scratch);
+    ivars->scratch = NULL;
 
     // It's not necessary to iterate over the runs, because they don't have
     // any cache costs until Refill() gets called.
@@ -282,36 +287,38 @@ PostPool_shrink(PostingPool *self) {
 
 void
 PostPool_flush(PostingPool *self) {
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+
     // Don't add a run unless we have data to put in it.
     if (PostPool_Cache_Count(self) == 0) { return; }
 
     PostingPool *run
-        = PostPool_new(self->schema, self->snapshot, self->segment,
-                       self->polyreader, self->field, self->lex_writer,
-                       self->mem_pool, self->lex_temp_out,
-                       self->post_temp_out, self->skip_out);
+        = PostPool_new(ivars->schema, ivars->snapshot, ivars->segment,
+                       ivars->polyreader, ivars->field, ivars->lex_writer,
+                       ivars->mem_pool, ivars->lex_temp_out,
+                       ivars->post_temp_out, ivars->skip_out);
     PostingWriter *post_writer
-        = (PostingWriter*)RawPostWriter_new(self->schema, self->snapshot,
-                                            self->segment, self->polyreader,
-                                            self->post_temp_out);
+        = (PostingWriter*)RawPostWriter_new(ivars->schema, ivars->snapshot,
+                                            ivars->segment, ivars->polyreader,
+                                            ivars->post_temp_out);
 
     // Borrow the cache.
-    run->cache      = self->cache;
-    run->cache_tick = self->cache_tick;
-    run->cache_max  = self->cache_max;
-    run->cache_cap  = self->cache_cap;
+    run->cache      = ivars->cache;
+    run->cache_tick = ivars->cache_tick;
+    run->cache_max  = ivars->cache_max;
+    run->cache_cap  = ivars->cache_cap;
 
     // Write to temp files.
-    LexWriter_Enter_Temp_Mode(self->lex_writer, self->field,
-                              self->lex_temp_out);
-    run->lex_start  = OutStream_Tell(self->lex_temp_out);
-    run->post_start = OutStream_Tell(self->post_temp_out);
+    LexWriter_Enter_Temp_Mode(ivars->lex_writer, ivars->field,
+                              ivars->lex_temp_out);
+    run->lex_start  = OutStream_Tell(ivars->lex_temp_out);
+    run->post_start = OutStream_Tell(ivars->post_temp_out);
     PostPool_Sort_Cache(self);
     S_write_terms_and_postings(run, post_writer, NULL);
 
-    run->lex_end  = OutStream_Tell(self->lex_temp_out);
-    run->post_end = OutStream_Tell(self->post_temp_out);
-    LexWriter_Leave_Temp_Mode(self->lex_writer);
+    run->lex_end  = OutStream_Tell(ivars->lex_temp_out);
+    run->post_end = OutStream_Tell(ivars->post_temp_out);
+    LexWriter_Leave_Temp_Mode(ivars->lex_writer);
 
     // Return the cache and empty it.
     run->cache      = NULL;
@@ -328,32 +335,39 @@ PostPool_flush(PostingPool *self) {
 
 void
 PostPool_finish(PostingPool *self) {
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+
     // Bail if there's no data.
     if (!PostPool_Peek(self)) { return; }
 
-    Similarity *sim = Schema_Fetch_Sim(self->schema, self->field);
+    Similarity *sim = Schema_Fetch_Sim(ivars->schema, ivars->field);
     PostingWriter *post_writer
-        = Sim_Make_Posting_Writer(sim, self->schema, self->snapshot,
-                                  self->segment, self->polyreader,
-                                  self->field_num);
-    LexWriter_Start_Field(self->lex_writer, self->field_num);
-    S_write_terms_and_postings(self, post_writer, self->skip_out);
-    LexWriter_Finish_Field(self->lex_writer, self->field_num);
+        = Sim_Make_Posting_Writer(sim, ivars->schema, ivars->snapshot,
+                                  ivars->segment, ivars->polyreader,
+                                  ivars->field_num);
+    LexWriter_Start_Field(ivars->lex_writer, ivars->field_num);
+    S_write_terms_and_postings(self, post_writer, ivars->skip_out);
+    LexWriter_Finish_Field(ivars->lex_writer, ivars->field_num);
     DECREF(post_writer);
 }
 
 static void
 S_write_terms_and_postings(PostingPool *self, PostingWriter *post_writer,
                            OutStream *skip_stream) {
-    TermInfo      *const tinfo          = TInfo_new(0);
-    TermInfo      *const skip_tinfo     = TInfo_new(0);
-    CharBuf       *const last_term_text = CB_new(0);
-    LexiconWriter *const lex_writer     = self->lex_writer;
-    SkipStepper   *const skip_stepper   = self->skip_stepper;
-    int32_t        last_skip_doc        = 0;
-    int64_t        last_skip_filepos    = 0;
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    TermInfo      *const tinfo            = TInfo_new(0);
+    TermInfo      *const skip_tinfo       = TInfo_new(0);
+    TermInfoIVARS *const tinfo_ivars      = TInfo_IVARS(tinfo);
+    TermInfoIVARS *const skip_tinfo_ivars = TInfo_IVARS(skip_tinfo);
+    CharBuf       *const last_term_text   = CB_new(0);
+    LexiconWriter *const lex_writer       = ivars->lex_writer;
+    SkipStepper   *const skip_stepper     = ivars->skip_stepper;
+    SkipStepperIVARS *const skip_stepper_ivars
+        = SkipStepper_IVARS(skip_stepper);
+    int32_t        last_skip_doc          = 0;
+    int64_t        last_skip_filepos      = 0;
     const int32_t  skip_interval
-        = Arch_Skip_Interval(Schema_Get_Architecture(self->schema));
+        = Arch_Skip_Interval(Schema_Get_Architecture(ivars->schema));
 
     // Prime heldover variables.
     RawPosting *posting = (RawPosting*)CERTIFY(
@@ -400,10 +414,10 @@ S_write_terms_and_postings(PostingPool *self, PostingWriter *post_writer,
             PostWriter_Start_Term(post_writer, tinfo);
 
             // Init skip data in preparation for the next term.
-            skip_stepper->doc_id  = 0;
-            skip_stepper->filepos = tinfo->post_filepos;
+            skip_stepper_ivars->doc_id  = 0;
+            skip_stepper_ivars->filepos = tinfo_ivars->post_filepos;
             last_skip_doc         = 0;
-            last_skip_filepos     = tinfo->post_filepos;
+            last_skip_filepos     = tinfo_ivars->post_filepos;
 
             // Remember the term_text so we can write string diffs.
             CB_Mimic_Str(last_term_text, posting->blob,
@@ -419,24 +433,24 @@ S_write_terms_and_postings(PostingPool *self, PostingWriter *post_writer,
         PostWriter_Write_Posting(post_writer, posting);
 
         // Doc freq lags by one iter.
-        tinfo->doc_freq++;
+        tinfo_ivars->doc_freq++;
 
         //  Write skip data.
         if (skip_stream != NULL
             && same_text_as_last
-            && tinfo->doc_freq % skip_interval == 0
-            && tinfo->doc_freq != 0
+            && tinfo_ivars->doc_freq % skip_interval == 0
+            && tinfo_ivars->doc_freq != 0
            ) {
             // If first skip group, save skip stream pos for term info.
-            if (tinfo->doc_freq == skip_interval) {
-                tinfo->skip_filepos = OutStream_Tell(skip_stream);
+            if (tinfo_ivars->doc_freq == skip_interval) {
+                tinfo_ivars->skip_filepos = OutStream_Tell(skip_stream);
             }
             // Write deltas.
-            last_skip_doc         = skip_stepper->doc_id;
-            last_skip_filepos     = skip_stepper->filepos;
-            skip_stepper->doc_id  = posting->doc_id;
+            last_skip_doc               = skip_stepper_ivars->doc_id;
+            last_skip_filepos           = skip_stepper_ivars->filepos;
+            skip_stepper_ivars->doc_id  = posting->doc_id;
             PostWriter_Update_Skip_Info(post_writer, skip_tinfo);
-            skip_stepper->filepos = skip_tinfo->post_filepos;
+            skip_stepper_ivars->filepos = skip_tinfo_ivars->post_filepos;
             SkipStepper_Write_Record(skip_stepper, skip_stream,
                                      last_skip_doc, last_skip_filepos);
         }
@@ -458,45 +472,47 @@ S_write_terms_and_postings(PostingPool *self, PostingWriter *post_writer,
 
 uint32_t
 PostPool_refill(PostingPool *self) {
-    Lexicon *const     lexicon     = self->lexicon;
-    PostingList *const plist       = self->plist;
-    I32Array    *const doc_map     = self->doc_map;
-    const uint32_t     mem_thresh  = self->mem_thresh;
-    const int32_t      doc_base    = self->doc_base;
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    Lexicon *const     lexicon     = ivars->lexicon;
+    PostingList *const plist       = ivars->plist;
+    I32Array    *const doc_map     = ivars->doc_map;
+    const uint32_t     mem_thresh  = ivars->mem_thresh;
+    const int32_t      doc_base    = ivars->doc_base;
     uint32_t           num_elems   = 0; // number of items recovered
-    MemoryPool        *mem_pool;
     CharBuf           *term_text   = NULL;
 
-    if (self->lexicon == NULL) { return 0; }
+    if (ivars->lexicon == NULL) { return 0; }
     else { term_text = (CharBuf*)Lex_Get_Term(lexicon); }
 
     // Make sure cache is empty.
-    if (self->cache_max - self->cache_tick > 0) {
+    if (ivars->cache_max - ivars->cache_tick > 0) {
         THROW(ERR, "Refill called but cache contains %u32 items",
-              self->cache_max - self->cache_tick);
+              ivars->cache_max - ivars->cache_tick);
     }
-    self->cache_max  = 0;
-    self->cache_tick = 0;
+    ivars->cache_max  = 0;
+    ivars->cache_tick = 0;
 
     // Ditch old MemoryPool and get another.
-    DECREF(self->mem_pool);
-    self->mem_pool = MemPool_new(0);
-    mem_pool       = self->mem_pool;
+    DECREF(ivars->mem_pool);
+    ivars->mem_pool = MemPool_new(0);
+    MemoryPool *const mem_pool = ivars->mem_pool;
+    MemoryPoolIVARS *const mem_pool_ivars = MemPool_IVARS(mem_pool);
+
 
     while (1) {
         RawPosting *raw_posting;
 
-        if (self->post_count == 0) {
+        if (ivars->post_count == 0) {
             // Read a term.
             if (Lex_Next(lexicon)) {
-                self->post_count = Lex_Doc_Freq(lexicon);
+                ivars->post_count = Lex_Doc_Freq(lexicon);
                 term_text = (CharBuf*)Lex_Get_Term(lexicon);
                 if (term_text && !Obj_Is_A((Obj*)term_text, CHARBUF)) {
                     THROW(ERR, "Only CharBuf terms are supported for now");
                 }
                 Posting *posting = PList_Get_Posting(plist);
                 Post_Set_Doc_ID(posting, doc_base);
-                self->last_doc_id = doc_base;
+                ivars->last_doc_id = doc_base;
             }
             // Bail if we've read everything in this run.
             else {
@@ -505,15 +521,15 @@ PostPool_refill(PostingPool *self) {
         }
 
         // Bail if we've hit the ceiling for this run's cache.
-        if (mem_pool->consumed >= mem_thresh && num_elems > 0) {
+        if (mem_pool_ivars->consumed >= mem_thresh && num_elems > 0) {
             break;
         }
 
         // Read a posting from the input stream.
-        raw_posting = PList_Read_Raw(plist, self->last_doc_id, term_text,
+        raw_posting = PList_Read_Raw(plist, ivars->last_doc_id, term_text,
                                      mem_pool);
-        self->last_doc_id = raw_posting->doc_id;
-        self->post_count--;
+        ivars->last_doc_id = raw_posting->doc_id;
+        ivars->post_count--;
 
         // Skip deletions.
         if (doc_map != NULL) {
@@ -526,18 +542,18 @@ PostPool_refill(PostingPool *self) {
         }
 
         // Add to the run's cache.
-        if (num_elems >= self->cache_cap) {
+        if (num_elems >= ivars->cache_cap) {
             size_t new_cap = Memory_oversize(num_elems + 1, sizeof(Obj*));
             PostPool_Grow_Cache(self, new_cap);
         }
-        Obj **cache = (Obj**)self->cache;
+        Obj **cache = (Obj**)ivars->cache;
         cache[num_elems] = (Obj*)raw_posting;
         num_elems++;
     }
 
     // Reset the cache array position and length; remember file pos.
-    self->cache_max   = num_elems;
-    self->cache_tick  = 0;
+    ivars->cache_max   = num_elems;
+    ivars->cache_tick  = 0;
 
     return num_elems;
 }
@@ -545,46 +561,48 @@ PostPool_refill(PostingPool *self) {
 void
 PostPool_add_inversion(PostingPool *self, Inversion *inversion, int32_t doc_id,
                        float doc_boost, float length_norm) {
-    Post_Add_Inversion_To_Pool(self->posting, self, inversion, self->type,
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    Post_Add_Inversion_To_Pool(ivars->posting, self, inversion, ivars->type,
                                doc_id, doc_boost, length_norm);
 }
 
 static void
 S_fresh_flip(PostingPool *self, InStream *lex_temp_in,
              InStream *post_temp_in) {
-    if (self->flipped) { THROW(ERR, "Can't Flip twice"); }
-    self->flipped = true;
+    PostingPoolIVARS *const ivars = PostPool_IVARS(self);
+    if (ivars->flipped) { THROW(ERR, "Can't Flip twice"); }
+    ivars->flipped = true;
 
     // Sort RawPostings in cache, if any.
     PostPool_Sort_Cache(self);
 
     // Bail if never flushed.
-    if (self->lex_end == 0) { return; }
+    if (ivars->lex_end == 0) { return; }
 
     // Get a Lexicon.
     CharBuf *lex_alias = CB_newf("%o-%i64-to-%i64",
                                  InStream_Get_Filename(lex_temp_in),
-                                 self->lex_start, self->lex_end);
+                                 ivars->lex_start, ivars->lex_end);
     InStream *lex_temp_in_dupe = InStream_Reopen(
-                                     lex_temp_in, lex_alias, self->lex_start,
-                                     self->lex_end - self->lex_start);
-    self->lexicon = (Lexicon*)RawLex_new(
-                        self->schema, self->field, lex_temp_in_dupe, 0,
-                        self->lex_end - self->lex_start);
+                                     lex_temp_in, lex_alias, ivars->lex_start,
+                                     ivars->lex_end - ivars->lex_start);
+    ivars->lexicon = (Lexicon*)RawLex_new(
+                        ivars->schema, ivars->field, lex_temp_in_dupe, 0,
+                        ivars->lex_end - ivars->lex_start);
     DECREF(lex_alias);
     DECREF(lex_temp_in_dupe);
 
     // Get a PostingList.
     CharBuf *post_alias
         = CB_newf("%o-%i64-to-%i64", InStream_Get_Filename(post_temp_in),
-                  self->post_start, self->post_end);
+                  ivars->post_start, ivars->post_end);
     InStream *post_temp_in_dupe
-        = InStream_Reopen(post_temp_in, post_alias, self->post_start,
-                          self->post_end - self->post_start);
-    self->plist
-        = (PostingList*)RawPList_new(self->schema, self->field,
+        = InStream_Reopen(post_temp_in, post_alias, ivars->post_start,
+                          ivars->post_end - ivars->post_start);
+    ivars->plist
+        = (PostingList*)RawPList_new(ivars->schema, ivars->field,
                                      post_temp_in_dupe, 0,
-                                     self->post_end - self->post_start);
+                                     ivars->post_end - ivars->post_start);
     DECREF(post_alias);
     DECREF(post_temp_in_dupe);
 }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/RawLexicon.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/RawLexicon.c b/core/Lucy/Index/RawLexicon.c
index eef6eac..49e4e20 100644
--- a/core/Lucy/Index/RawLexicon.c
+++ b/core/Lucy/Index/RawLexicon.c
@@ -37,47 +37,52 @@ RawLex_init(RawLexicon *self, Schema *schema, const CharBuf *field,
             InStream *instream, int64_t start, int64_t end) {
     FieldType *type = Schema_Fetch_Type(schema, field);
     Lex_init((Lexicon*)self, field);
+    RawLexiconIVARS *const ivars = RawLex_IVARS(self);
 
     // Assign
-    self->start = start;
-    self->end   = end;
-    self->len   = end - start;
-    self->instream = (InStream*)INCREF(instream);
+    ivars->start = start;
+    ivars->end   = end;
+    ivars->len   = end - start;
+    ivars->instream = (InStream*)INCREF(instream);
 
     // Get ready to begin.
-    InStream_Seek(self->instream, self->start);
+    InStream_Seek(ivars->instream, ivars->start);
 
     // Get steppers.
-    self->term_stepper  = FType_Make_Term_Stepper(type);
-    self->tinfo_stepper = (TermStepper*)MatchTInfoStepper_new(schema);
+    ivars->term_stepper  = FType_Make_Term_Stepper(type);
+    ivars->tinfo_stepper = (TermStepper*)MatchTInfoStepper_new(schema);
 
     return self;
 }
 
 void
 RawLex_destroy(RawLexicon *self) {
-    DECREF(self->instream);
-    DECREF(self->term_stepper);
-    DECREF(self->tinfo_stepper);
+    RawLexiconIVARS *const ivars = RawLex_IVARS(self);
+    DECREF(ivars->instream);
+    DECREF(ivars->term_stepper);
+    DECREF(ivars->tinfo_stepper);
     SUPER_DESTROY(self, RAWLEXICON);
 }
 
 bool
 RawLex_next(RawLexicon *self) {
-    if (InStream_Tell(self->instream) >= self->len) { return false; }
-    TermStepper_Read_Delta(self->term_stepper, self->instream);
-    TermStepper_Read_Delta(self->tinfo_stepper, self->instream);
+    RawLexiconIVARS *const ivars = RawLex_IVARS(self);
+    if (InStream_Tell(ivars->instream) >= ivars->len) { return false; }
+    TermStepper_Read_Delta(ivars->term_stepper, ivars->instream);
+    TermStepper_Read_Delta(ivars->tinfo_stepper, ivars->instream);
     return true;
 }
 
 Obj*
 RawLex_get_term(RawLexicon *self) {
-    return TermStepper_Get_Value(self->term_stepper);
+    RawLexiconIVARS *const ivars = RawLex_IVARS(self);
+    return TermStepper_Get_Value(ivars->term_stepper);
 }
 
 int32_t
 RawLex_doc_freq(RawLexicon *self) {
-    TermInfo *tinfo = (TermInfo*)TermStepper_Get_Value(self->tinfo_stepper);
+    RawLexiconIVARS *const ivars = RawLex_IVARS(self);
+    TermInfo *tinfo = (TermInfo*)TermStepper_Get_Value(ivars->tinfo_stepper);
     return tinfo ? TInfo_Get_Doc_Freq(tinfo) : 0;
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/RawPostingList.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/RawPostingList.c b/core/Lucy/Index/RawPostingList.c
index a22a015..9d5dd0a 100644
--- a/core/Lucy/Index/RawPostingList.c
+++ b/core/Lucy/Index/RawPostingList.c
@@ -36,32 +36,35 @@ RawPostingList*
 RawPList_init(RawPostingList *self, Schema *schema, const CharBuf *field,
               InStream *instream, int64_t start, int64_t end) {
     PList_init((PostingList*)self);
-    self->start     = start;
-    self->end       = end;
-    self->len       = end - start;
-    self->instream  = (InStream*)INCREF(instream);
-    Similarity *sim = Schema_Fetch_Sim(schema, field);
-    self->posting   = Sim_Make_Posting(sim);
-    InStream_Seek(self->instream, self->start);
+    RawPostingListIVARS *const ivars = RawPList_IVARS(self);
+    ivars->start     = start;
+    ivars->end       = end;
+    ivars->len       = end - start;
+    ivars->instream  = (InStream*)INCREF(instream);
+    Similarity *sim  = Schema_Fetch_Sim(schema, field);
+    ivars->posting   = Sim_Make_Posting(sim);
+    InStream_Seek(ivars->instream, ivars->start);
     return self;
 }
 
 void
 RawPList_destroy(RawPostingList *self) {
-    DECREF(self->instream);
-    DECREF(self->posting);
+    RawPostingListIVARS *const ivars = RawPList_IVARS(self);
+    DECREF(ivars->instream);
+    DECREF(ivars->posting);
     SUPER_DESTROY(self, RAWPOSTINGLIST);
 }
 
 Posting*
 RawPList_get_posting(RawPostingList *self) {
-    return self->posting;
+    return RawPList_IVARS(self)->posting;
 }
 
 RawPosting*
 RawPList_read_raw(RawPostingList *self, int32_t last_doc_id, CharBuf *term_text,
                   MemoryPool *mem_pool) {
-    return Post_Read_Raw(self->posting, self->instream,
+    RawPostingListIVARS *const ivars = RawPList_IVARS(self);
+    return Post_Read_Raw(ivars->posting, ivars->instream,
                          last_doc_id, term_text, mem_pool);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SegLexicon.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SegLexicon.c b/core/Lucy/Index/SegLexicon.c
index 06704f2..3a6523f 100644
--- a/core/Lucy/Index/SegLexicon.c
+++ b/core/Lucy/Index/SegLexicon.c
@@ -58,6 +58,7 @@ SegLex_init(SegLexicon *self, Schema *schema, Folder *folder,
     CharBuf *filename = CB_newf("%o/lexicon-%i32.dat", seg_name, field_num);
 
     Lex_init((Lexicon*)self, field);
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
 
     // Check format.
     if (!format) { THROW(ERR, "Missing 'format'"); }
@@ -72,19 +73,19 @@ SegLex_init(SegLexicon *self, Schema *schema, Folder *folder,
     if (!counts) { THROW(ERR, "Failed to extract 'counts'"); }
     else {
         Obj *count = CERTIFY(Hash_Fetch(counts, (Obj*)field), OBJ);
-        self->size = (int32_t)Obj_To_I64(count);
+        ivars->size = (int32_t)Obj_To_I64(count);
     }
 
     // Assign.
-    self->segment        = (Segment*)INCREF(segment);
+    ivars->segment        = (Segment*)INCREF(segment);
 
     // Derive.
-    self->lex_index      = LexIndex_new(schema, folder, segment, field);
-    self->field_num      = field_num;
-    self->index_interval = Arch_Index_Interval(arch);
-    self->skip_interval  = Arch_Skip_Interval(arch);
-    self->instream       = Folder_Open_In(folder, filename);
-    if (!self->instream) {
+    ivars->lex_index      = LexIndex_new(schema, folder, segment, field);
+    ivars->field_num      = field_num;
+    ivars->index_interval = Arch_Index_Interval(arch);
+    ivars->skip_interval  = Arch_Skip_Interval(arch);
+    ivars->instream       = Folder_Open_In(folder, filename);
+    if (!ivars->instream) {
         Err *error = (Err*)INCREF(Err_get_error());
         DECREF(filename);
         DECREF(self);
@@ -93,28 +94,30 @@ SegLex_init(SegLexicon *self, Schema *schema, Folder *folder,
     DECREF(filename);
 
     // Define the term_num as "not yet started".
-    self->term_num = -1;
+    ivars->term_num = -1;
 
     // Get steppers.
-    self->term_stepper  = FType_Make_Term_Stepper(type);
-    self->tinfo_stepper = (TermStepper*)MatchTInfoStepper_new(schema);
+    ivars->term_stepper  = FType_Make_Term_Stepper(type);
+    ivars->tinfo_stepper = (TermStepper*)MatchTInfoStepper_new(schema);
 
     return self;
 }
 
 void
 SegLex_destroy(SegLexicon *self) {
-    DECREF(self->segment);
-    DECREF(self->term_stepper);
-    DECREF(self->tinfo_stepper);
-    DECREF(self->lex_index);
-    DECREF(self->instream);
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+    DECREF(ivars->segment);
+    DECREF(ivars->term_stepper);
+    DECREF(ivars->tinfo_stepper);
+    DECREF(ivars->lex_index);
+    DECREF(ivars->instream);
     SUPER_DESTROY(self, SEGLEXICON);
 }
 
 void
 SegLex_seek(SegLexicon *self, Obj *target) {
-    LexIndex *const lex_index = self->lex_index;
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+    LexIndex *const lex_index = ivars->lex_index;
 
     // Reset upon null term.
     if (target == NULL) {
@@ -126,13 +129,13 @@ SegLex_seek(SegLexicon *self, Obj *target) {
     LexIndex_Seek(lex_index, target);
     TermInfo *target_tinfo = LexIndex_Get_Term_Info(lex_index);
     TermInfo *my_tinfo
-        = (TermInfo*)TermStepper_Get_Value(self->tinfo_stepper);
+        = (TermInfo*)TermStepper_Get_Value(ivars->tinfo_stepper);
     Obj *lex_index_term = Obj_Clone(LexIndex_Get_Term(lex_index));
     TInfo_Mimic(my_tinfo, (Obj*)target_tinfo);
-    TermStepper_Set_Value(self->term_stepper, lex_index_term);
+    TermStepper_Set_Value(ivars->term_stepper, lex_index_term);
     DECREF(lex_index_term);
-    InStream_Seek(self->instream, TInfo_Get_Lex_FilePos(target_tinfo));
-    self->term_num = LexIndex_Get_Term_Num(lex_index);
+    InStream_Seek(ivars->instream, TInfo_Get_Lex_FilePos(target_tinfo));
+    ivars->term_num = LexIndex_Get_Term_Num(lex_index);
 
     // Scan to the precise location.
     S_scan_to(self, target);
@@ -140,59 +143,67 @@ SegLex_seek(SegLexicon *self, Obj *target) {
 
 void
 SegLex_reset(SegLexicon* self) {
-    self->term_num = -1;
-    InStream_Seek(self->instream, 0);
-    TermStepper_Reset(self->term_stepper);
-    TermStepper_Reset(self->tinfo_stepper);
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+    ivars->term_num = -1;
+    InStream_Seek(ivars->instream, 0);
+    TermStepper_Reset(ivars->term_stepper);
+    TermStepper_Reset(ivars->tinfo_stepper);
 }
 
 int32_t
 SegLex_get_field_num(SegLexicon *self) {
-    return self->field_num;
+    return SegLex_IVARS(self)->field_num;
 }
 
 Obj*
 SegLex_get_term(SegLexicon *self) {
-    return TermStepper_Get_Value(self->term_stepper);
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+    return TermStepper_Get_Value(ivars->term_stepper);
 }
 
 int32_t
 SegLex_doc_freq(SegLexicon *self) {
-    TermInfo *tinfo = (TermInfo*)TermStepper_Get_Value(self->tinfo_stepper);
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+    TermInfo *tinfo = (TermInfo*)TermStepper_Get_Value(ivars->tinfo_stepper);
     return tinfo ? TInfo_Get_Doc_Freq(tinfo) : 0;
 }
 
 TermInfo*
 SegLex_get_term_info(SegLexicon *self) {
-    return (TermInfo*)TermStepper_Get_Value(self->tinfo_stepper);
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+    return (TermInfo*)TermStepper_Get_Value(ivars->tinfo_stepper);
 }
 
 Segment*
 SegLex_get_segment(SegLexicon *self) {
-    return self->segment;
+    return SegLex_IVARS(self)->segment;
 }
 
 bool
 SegLex_next(SegLexicon *self) {
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+
     // If we've run out of terms, null out and return.
-    if (++self->term_num >= self->size) {
-        self->term_num = self->size; // don't keep growing
-        TermStepper_Reset(self->term_stepper);
-        TermStepper_Reset(self->tinfo_stepper);
+    if (++ivars->term_num >= ivars->size) {
+        ivars->term_num = ivars->size; // don't keep growing
+        TermStepper_Reset(ivars->term_stepper);
+        TermStepper_Reset(ivars->tinfo_stepper);
         return false;
     }
 
     // Read next term/terminfo.
-    TermStepper_Read_Delta(self->term_stepper, self->instream);
-    TermStepper_Read_Delta(self->tinfo_stepper, self->instream);
+    TermStepper_Read_Delta(ivars->term_stepper, ivars->instream);
+    TermStepper_Read_Delta(ivars->tinfo_stepper, ivars->instream);
 
     return true;
 }
 
 static void
 S_scan_to(SegLexicon *self, Obj *target) {
+    SegLexiconIVARS *const ivars = SegLex_IVARS(self);
+
     // (mildly evil encapsulation violation, since value can be null)
-    Obj *current = TermStepper_Get_Value(self->term_stepper);
+    Obj *current = TermStepper_Get_Value(ivars->term_stepper);
     if (!Obj_Is_A(target, Obj_Get_VTable(current))) {
         THROW(ERR, "Target is a %o, and not comparable to a %o",
               Obj_Get_Class_Name(target), Obj_Get_Class_Name(current));
@@ -201,7 +212,7 @@ S_scan_to(SegLexicon *self, Obj *target) {
     // Keep looping until the term text is ge target.
     do {
         const int32_t comparison = Obj_Compare_To(current, target);
-        if (comparison >= 0 &&  self->term_num != -1) { break; }
+        if (comparison >= 0 && ivars->term_num != -1) { break; }
     } while (SegLex_Next(self));
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SegPostingList.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SegPostingList.c b/core/Lucy/Index/SegPostingList.c
index ae9122e..bebd52d 100644
--- a/core/Lucy/Index/SegPostingList.c
+++ b/core/Lucy/Index/SegPostingList.c
@@ -51,6 +51,7 @@ SegPList_new(PostingListReader *plist_reader, const CharBuf *field) {
 SegPostingList*
 SegPList_init(SegPostingList *self, PostingListReader *plist_reader,
               const CharBuf *field) {
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
     Schema       *const schema   = PListReader_Get_Schema(plist_reader);
     Folder       *const folder   = PListReader_Get_Folder(plist_reader);
     Segment      *const segment  = PListReader_Get_Segment(plist_reader);
@@ -62,36 +63,36 @@ SegPList_init(SegPostingList *self, PostingListReader *plist_reader,
     CharBuf      *skip_file      = CB_newf("%o/postings.skip", seg_name);
 
     // Init.
-    self->doc_freq        = 0;
-    self->count           = 0;
+    ivars->doc_freq        = 0;
+    ivars->count           = 0;
 
     // Init skipping vars.
-    self->skip_stepper    = SkipStepper_new();
-    self->skip_count      = 0;
-    self->num_skips       = 0;
+    ivars->skip_stepper    = SkipStepper_new();
+    ivars->skip_count      = 0;
+    ivars->num_skips       = 0;
 
     // Assign.
-    self->plist_reader    = (PostingListReader*)INCREF(plist_reader);
-    self->field           = CB_Clone(field);
-    self->skip_interval   = Arch_Skip_Interval(arch);
+    ivars->plist_reader    = (PostingListReader*)INCREF(plist_reader);
+    ivars->field           = CB_Clone(field);
+    ivars->skip_interval   = Arch_Skip_Interval(arch);
 
     // Derive.
-    Similarity *sim = Schema_Fetch_Sim(schema, field);
-    self->posting   = Sim_Make_Posting(sim);
-    self->field_num = field_num;
+    Similarity *sim  = Schema_Fetch_Sim(schema, field);
+    ivars->posting   = Sim_Make_Posting(sim);
+    ivars->field_num = field_num;
 
     // Open both a main stream and a skip stream if the field exists.
     if (Folder_Exists(folder, post_file)) {
-        self->post_stream = Folder_Open_In(folder, post_file);
-        if (!self->post_stream) {
+        ivars->post_stream = Folder_Open_In(folder, post_file);
+        if (!ivars->post_stream) {
             Err *error = (Err*)INCREF(Err_get_error());
             DECREF(post_file);
             DECREF(skip_file);
             DECREF(self);
             RETHROW(error);
         }
-        self->skip_stream = Folder_Open_In(folder, skip_file);
-        if (!self->skip_stream) {
+        ivars->skip_stream = Folder_Open_In(folder, skip_file);
+        if (!ivars->skip_stream) {
             Err *error = (Err*)INCREF(Err_get_error());
             DECREF(post_file);
             DECREF(skip_file);
@@ -101,8 +102,8 @@ SegPList_init(SegPostingList *self, PostingListReader *plist_reader,
     }
     else {
         //  Empty, so don't bother with these.
-        self->post_stream = NULL;
-        self->skip_stream = NULL;
+        ivars->post_stream = NULL;
+        ivars->skip_stream = NULL;
     }
     DECREF(post_file);
     DECREF(skip_file);
@@ -112,16 +113,17 @@ SegPList_init(SegPostingList *self, PostingListReader *plist_reader,
 
 void
 SegPList_destroy(SegPostingList *self) {
-    DECREF(self->plist_reader);
-    DECREF(self->posting);
-    DECREF(self->skip_stepper);
-    DECREF(self->field);
-
-    if (self->post_stream != NULL) {
-        InStream_Close(self->post_stream);
-        InStream_Close(self->skip_stream);
-        DECREF(self->post_stream);
-        DECREF(self->skip_stream);
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    DECREF(ivars->plist_reader);
+    DECREF(ivars->posting);
+    DECREF(ivars->skip_stepper);
+    DECREF(ivars->field);
+
+    if (ivars->post_stream != NULL) {
+        InStream_Close(ivars->post_stream);
+        InStream_Close(ivars->skip_stream);
+        DECREF(ivars->post_stream);
+        DECREF(ivars->skip_stream);
     }
 
     SUPER_DESTROY(self, SEGPOSTINGLIST);
@@ -129,56 +131,61 @@ SegPList_destroy(SegPostingList *self) {
 
 Posting*
 SegPList_get_posting(SegPostingList *self) {
-    return self->posting;
+    return SegPList_IVARS(self)->posting;
 }
 
 uint32_t
 SegPList_get_doc_freq(SegPostingList *self) {
-    return self->doc_freq;
+    return SegPList_IVARS(self)->doc_freq;
 }
 
 int32_t
 SegPList_get_doc_id(SegPostingList *self) {
-    return self->posting->doc_id;
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    return Post_IVARS(ivars->posting)->doc_id;
 }
 
 uint32_t
 SegPList_get_count(SegPostingList *self) {
-    return self->count;
+    return SegPList_IVARS(self)->count;
 }
 
 InStream*
 SegPList_get_post_stream(SegPostingList *self) {
-    return self->post_stream;
+    return SegPList_IVARS(self)->post_stream;
 }
 
 int32_t
 SegPList_next(SegPostingList *self) {
-    InStream *const post_stream = self->post_stream;
-    Posting  *const posting     = self->posting;
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    InStream *const post_stream = ivars->post_stream;
+    Posting  *const posting     = ivars->posting;
 
     // Bail if we're out of docs.
-    if (self->count >= self->doc_freq) {
+    if (ivars->count >= ivars->doc_freq) {
         Post_Reset(posting);
         return 0;
     }
-    self->count++;
+    ivars->count++;
 
     Post_Read_Record(posting, post_stream);
 
-    return posting->doc_id;
+    return Post_IVARS(posting)->doc_id;
 }
 
 int32_t
 SegPList_advance(SegPostingList *self, int32_t target) {
-    Posting *posting          = self->posting;
-    const uint32_t skip_interval = self->skip_interval;
-
-    if (self->doc_freq >= skip_interval) {
-        InStream *post_stream           = self->post_stream;
-        InStream *skip_stream           = self->skip_stream;
-        SkipStepper *const skip_stepper = self->skip_stepper;
-        uint32_t new_doc_id             = skip_stepper->doc_id;
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    PostingIVARS *const posting_ivars = Post_IVARS(ivars->posting);
+    const uint32_t skip_interval = ivars->skip_interval;
+
+    if (ivars->doc_freq >= skip_interval) {
+        InStream *post_stream           = ivars->post_stream;
+        InStream *skip_stream           = ivars->skip_stream;
+        SkipStepper *const skip_stepper = ivars->skip_stepper;
+        SkipStepperIVARS *const skip_stepper_ivars
+            = SkipStepper_IVARS(skip_stepper);
+        uint32_t new_doc_id             = skip_stepper_ivars->doc_id;
         int64_t new_filepos             = InStream_Tell(post_stream);
 
         /* Assuming the default skip_interval of 16...
@@ -188,28 +195,28 @@ SegPList_advance(SegPostingList *self, int32_t target) {
          * yet, but we'll have already gone past 5 of the 16 skip docs --
          * ergo, the modulus in the following formula.
          */
-        int32_t num_skipped = 0 - (self->count % skip_interval);
-        if (num_skipped == 0 && self->count != 0) {
+        int32_t num_skipped = 0 - (ivars->count % skip_interval);
+        if (num_skipped == 0 && ivars->count != 0) {
             num_skipped = 0 - skip_interval;
         }
 
         // See if there's anything to skip.
-        while (target > skip_stepper->doc_id) {
-            new_doc_id  = skip_stepper->doc_id;
-            new_filepos = skip_stepper->filepos;
+        while (target > skip_stepper_ivars->doc_id) {
+            new_doc_id  = skip_stepper_ivars->doc_id;
+            new_filepos = skip_stepper_ivars->filepos;
 
-            if (skip_stepper->doc_id != 0
-                && skip_stepper->doc_id >= posting->doc_id
+            if (skip_stepper_ivars->doc_id != 0
+                && skip_stepper_ivars->doc_id >= posting_ivars->doc_id
                ) {
                 num_skipped += skip_interval;
             }
 
-            if (self->skip_count >= self->num_skips) {
+            if (ivars->skip_count >= ivars->num_skips) {
                 break;
             }
 
             SkipStepper_Read_Record(skip_stepper, skip_stream);
-            self->skip_count++;
+            ivars->skip_count++;
         }
 
         // If we found something to skip, skip it.
@@ -219,10 +226,10 @@ SegPList_advance(SegPostingList *self, int32_t target) {
             InStream_Seek(post_stream, new_filepos);
 
             // Jump to the new doc id.
-            posting->doc_id = new_doc_id;
+            posting_ivars->doc_id = new_doc_id;
 
             // Increase count by the number of docs we skipped over.
-            self->count += num_skipped;
+            ivars->count += num_skipped;
         }
     }
 
@@ -237,22 +244,25 @@ SegPList_advance(SegPostingList *self, int32_t target) {
 
 void
 SegPList_seek(SegPostingList *self, Obj *target) {
-    LexiconReader *lex_reader = PListReader_Get_Lex_Reader(self->plist_reader);
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    LexiconReader *lex_reader = PListReader_Get_Lex_Reader(ivars->plist_reader);
     TermInfo      *tinfo      = LexReader_Fetch_Term_Info(lex_reader,
-                                                          self->field, target);
+                                                          ivars->field, target);
     S_seek_tinfo(self, tinfo);
     DECREF(tinfo);
 }
 
 void
 SegPList_seek_lex(SegPostingList *self, Lexicon *lexicon) {
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+
     // Maybe true, maybe not.
     SegLexicon *const seg_lexicon = (SegLexicon*)lexicon;
 
     // Optimized case.
     if (Obj_Is_A((Obj*)lexicon, SEGLEXICON)
         && (SegLex_Get_Segment(seg_lexicon)
-            == PListReader_Get_Segment(self->plist_reader)) // i.e. same segment
+            == PListReader_Get_Segment(ivars->plist_reader)) // i.e. same segment
        ) {
         S_seek_tinfo(self, SegLex_Get_Term_Info(seg_lexicon));
     }
@@ -266,40 +276,43 @@ SegPList_seek_lex(SegPostingList *self, Lexicon *lexicon) {
 
 static void
 S_seek_tinfo(SegPostingList *self, TermInfo *tinfo) {
-    self->count = 0;
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    ivars->count = 0;
 
     if (tinfo == NULL) {
         // Next will return false; other methods invalid now.
-        self->doc_freq = 0;
+        ivars->doc_freq = 0;
     }
     else {
         // Transfer doc_freq, seek main stream.
         int64_t post_filepos = TInfo_Get_Post_FilePos(tinfo);
-        self->doc_freq       = TInfo_Get_Doc_Freq(tinfo);
-        InStream_Seek(self->post_stream, post_filepos);
+        ivars->doc_freq      = TInfo_Get_Doc_Freq(tinfo);
+        InStream_Seek(ivars->post_stream, post_filepos);
 
         // Prepare posting.
-        Post_Reset(self->posting);
+        Post_Reset(ivars->posting);
 
         // Prepare to skip.
-        self->skip_count = 0;
-        self->num_skips  = self->doc_freq / self->skip_interval;
-        SkipStepper_Set_ID_And_Filepos(self->skip_stepper, 0, post_filepos);
-        InStream_Seek(self->skip_stream, TInfo_Get_Skip_FilePos(tinfo));
+        ivars->skip_count = 0;
+        ivars->num_skips  = ivars->doc_freq / ivars->skip_interval;
+        SkipStepper_Set_ID_And_Filepos(ivars->skip_stepper, 0, post_filepos);
+        InStream_Seek(ivars->skip_stream, TInfo_Get_Skip_FilePos(tinfo));
     }
 }
 
 Matcher*
 SegPList_make_matcher(SegPostingList *self, Similarity *sim,
                       Compiler *compiler, bool need_score) {
-    return Post_Make_Matcher(self->posting, sim, (PostingList*)self, compiler,
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    return Post_Make_Matcher(ivars->posting, sim, (PostingList*)self, compiler,
                              need_score);
 }
 
 RawPosting*
 SegPList_read_raw(SegPostingList *self, int32_t last_doc_id, CharBuf *term_text,
                   MemoryPool *mem_pool) {
-    return Post_Read_Raw(self->posting, self->post_stream,
+    SegPostingListIVARS *const ivars = SegPList_IVARS(self);
+    return Post_Read_Raw(ivars->posting, ivars->post_stream,
                          last_doc_id, term_text, mem_pool);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SegReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SegReader.c b/core/Lucy/Index/SegReader.c
index ad803b2..e9a1ba6 100644
--- a/core/Lucy/Index/SegReader.c
+++ b/core/Lucy/Index/SegReader.c
@@ -47,11 +47,12 @@ SegReader_init(SegReader *self, Schema *schema, Folder *folder,
 
     IxReader_init((IndexReader*)self, schema, folder, snapshot, segments,
                   seg_tick, NULL);
+    SegReaderIVARS *const ivars = SegReader_IVARS(self);
     segment = SegReader_Get_Segment(self);
 
-    self->doc_max    = (int32_t)Seg_Get_Count(segment);
-    self->seg_name   = (CharBuf*)INCREF(Seg_Get_Name(segment));
-    self->seg_num    = Seg_Get_Number(segment);
+    ivars->doc_max    = (int32_t)Seg_Get_Count(segment);
+    ivars->seg_name   = (CharBuf*)INCREF(Seg_Get_Name(segment));
+    ivars->seg_num    = Seg_Get_Number(segment);
     Err *error = Err_trap(S_try_init_components, self);
     if (error) {
         // An error occurred, so clean up self and rethrow the exception.
@@ -61,8 +62,8 @@ SegReader_init(SegReader *self, Schema *schema, Folder *folder,
 
     DeletionsReader *del_reader
         = (DeletionsReader*)Hash_Fetch(
-              self->components, (Obj*)VTable_Get_Name(DELETIONSREADER));
-    self->del_count = del_reader ? DelReader_Del_Count(del_reader) : 0;
+              ivars->components, (Obj*)VTable_Get_Name(DELETIONSREADER));
+    ivars->del_count = del_reader ? DelReader_Del_Count(del_reader) : 0;
 
     return self;
 }
@@ -77,43 +78,46 @@ S_try_init_components(void *context) {
 
 void
 SegReader_destroy(SegReader *self) {
-    DECREF(self->seg_name);
+    SegReaderIVARS *const ivars = SegReader_IVARS(self);
+    DECREF(ivars->seg_name);
     SUPER_DESTROY(self, SEGREADER);
 }
 
 void
 SegReader_register(SegReader *self, const CharBuf *api,
                    DataReader *component) {
-    if (Hash_Fetch(self->components, (Obj*)api)) {
+    SegReaderIVARS *const ivars = SegReader_IVARS(self);
+    if (Hash_Fetch(ivars->components, (Obj*)api)) {
         THROW(ERR, "Interface '%o' already registered");
     }
     CERTIFY(component, DATAREADER);
-    Hash_Store(self->components, (Obj*)api, (Obj*)component);
+    Hash_Store(ivars->components, (Obj*)api, (Obj*)component);
 }
 
 CharBuf*
 SegReader_get_seg_name(SegReader *self) {
-    return self->seg_name;
+    return SegReader_IVARS(self)->seg_name;
 }
 
 int64_t
 SegReader_get_seg_num(SegReader *self) {
-    return self->seg_num;
+    return SegReader_IVARS(self)->seg_num;
 }
 
 int32_t
 SegReader_del_count(SegReader *self) {
-    return self->del_count;
+    return SegReader_IVARS(self)->del_count;
 }
 
 int32_t
 SegReader_doc_max(SegReader *self) {
-    return self->doc_max;
+    return SegReader_IVARS(self)->doc_max;
 }
 
 int32_t
 SegReader_doc_count(SegReader *self) {
-    return self->doc_max - self->del_count;
+    SegReaderIVARS *const ivars = SegReader_IVARS(self);
+    return ivars->doc_max - ivars->del_count;
 }
 
 I32Array*

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SegWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SegWriter.c b/core/Lucy/Index/SegWriter.c
index 02fe391..6280664 100644
--- a/core/Lucy/Index/SegWriter.c
+++ b/core/Lucy/Index/SegWriter.c
@@ -42,46 +42,52 @@ SegWriter_init(SegWriter *self, Schema *schema, Snapshot *snapshot,
                Segment *segment, PolyReader *polyreader) {
     Architecture *arch   = Schema_Get_Architecture(schema);
     DataWriter_init((DataWriter*)self, schema, snapshot, segment, polyreader);
-    self->by_api   = Hash_new(0);
-    self->inverter = Inverter_new(schema, segment);
-    self->writers  = VA_new(16);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    ivars->by_api   = Hash_new(0);
+    ivars->inverter = Inverter_new(schema, segment);
+    ivars->writers  = VA_new(16);
     Arch_Init_Seg_Writer(arch, self);
     return self;
 }
 
 void
 SegWriter_destroy(SegWriter *self) {
-    DECREF(self->inverter);
-    DECREF(self->writers);
-    DECREF(self->by_api);
-    DECREF(self->del_writer);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    DECREF(ivars->inverter);
+    DECREF(ivars->writers);
+    DECREF(ivars->by_api);
+    DECREF(ivars->del_writer);
     SUPER_DESTROY(self, SEGWRITER);
 }
 
 void
 SegWriter_register(SegWriter *self, const CharBuf *api,
                    DataWriter *component) {
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
     CERTIFY(component, DATAWRITER);
-    if (Hash_Fetch(self->by_api, (Obj*)api)) {
+    if (Hash_Fetch(ivars->by_api, (Obj*)api)) {
         THROW(ERR, "API %o already registered", api);
     }
-    Hash_Store(self->by_api, (Obj*)api, (Obj*)component);
+    Hash_Store(ivars->by_api, (Obj*)api, (Obj*)component);
 }
 
 Obj*
 SegWriter_fetch(SegWriter *self, const CharBuf *api) {
-    return Hash_Fetch(self->by_api, (Obj*)api);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    return Hash_Fetch(ivars->by_api, (Obj*)api);
 }
 
 void
 SegWriter_add_writer(SegWriter *self, DataWriter *writer) {
-    VA_Push(self->writers, (Obj*)writer);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    VA_Push(ivars->writers, (Obj*)writer);
 }
 
 void
 SegWriter_prep_seg_dir(SegWriter *self) {
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
     Folder  *folder   = SegWriter_Get_Folder(self);
-    CharBuf *seg_name = Seg_Get_Name(self->segment);
+    CharBuf *seg_name = Seg_Get_Name(ivars->segment);
 
     // Clear stale segment files from crashed indexing sessions.
     if (Folder_Exists(folder, seg_name)) {
@@ -98,17 +104,19 @@ SegWriter_prep_seg_dir(SegWriter *self) {
 
 void
 SegWriter_add_doc(SegWriter *self, Doc *doc, float boost) {
-    int32_t doc_id = (int32_t)Seg_Increment_Count(self->segment, 1);
-    Inverter_Invert_Doc(self->inverter, doc);
-    Inverter_Set_Boost(self->inverter, boost);
-    SegWriter_Add_Inverted_Doc(self, self->inverter, doc_id);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    int32_t doc_id = (int32_t)Seg_Increment_Count(ivars->segment, 1);
+    Inverter_Invert_Doc(ivars->inverter, doc);
+    Inverter_Set_Boost(ivars->inverter, boost);
+    SegWriter_Add_Inverted_Doc(self, ivars->inverter, doc_id);
 }
 
 void
 SegWriter_add_inverted_doc(SegWriter *self, Inverter *inverter,
                            int32_t doc_id) {
-    for (uint32_t i = 0, max = VA_Get_Size(self->writers); i < max; i++) {
-        DataWriter *writer = (DataWriter*)VA_Fetch(self->writers, i);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->writers); i < max; i++) {
+        DataWriter *writer = (DataWriter*)VA_Fetch(ivars->writers, i);
         DataWriter_Add_Inverted_Doc(writer, inverter, doc_id);
     }
 }
@@ -118,24 +126,27 @@ SegWriter_add_inverted_doc(SegWriter *self, Inverter *inverter,
 // probably out of sync.
 static void
 S_adjust_doc_id(SegWriter *self, SegReader *reader, I32Array *doc_map) {
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
     uint32_t doc_count = SegReader_Doc_Max(reader);
     for (uint32_t i = 1, max = I32Arr_Get_Size(doc_map); i < max; i++) {
         if (I32Arr_Get(doc_map, i) == 0) { doc_count--; }
     }
-    Seg_Increment_Count(self->segment, doc_count);
+    Seg_Increment_Count(ivars->segment, doc_count);
 }
 
 void
 SegWriter_add_segment(SegWriter *self, SegReader *reader, I32Array *doc_map) {
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+
     // Bulk add the slab of documents to the various writers.
-    for (uint32_t i = 0, max = VA_Get_Size(self->writers); i < max; i++) {
-        DataWriter *writer = (DataWriter*)VA_Fetch(self->writers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->writers); i < max; i++) {
+        DataWriter *writer = (DataWriter*)VA_Fetch(ivars->writers, i);
         DataWriter_Add_Segment(writer, reader, doc_map);
     }
 
     // Bulk add the segment to the DeletionsWriter, so that it can merge
     // previous segment files as necessary.
-    DelWriter_Add_Segment(self->del_writer, reader, doc_map);
+    DelWriter_Add_Segment(ivars->del_writer, reader, doc_map);
 
     // Adust the document id.
     S_adjust_doc_id(self, reader, doc_map);
@@ -144,15 +155,16 @@ SegWriter_add_segment(SegWriter *self, SegReader *reader, I32Array *doc_map) {
 void
 SegWriter_merge_segment(SegWriter *self, SegReader *reader,
                         I32Array *doc_map) {
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
     Snapshot *snapshot = SegWriter_Get_Snapshot(self);
     CharBuf  *seg_name = Seg_Get_Name(SegReader_Get_Segment(reader));
 
     // Have all the sub-writers merge the segment.
-    for (uint32_t i = 0, max = VA_Get_Size(self->writers); i < max; i++) {
-        DataWriter *writer = (DataWriter*)VA_Fetch(self->writers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->writers); i < max; i++) {
+        DataWriter *writer = (DataWriter*)VA_Fetch(ivars->writers, i);
         DataWriter_Merge_Segment(writer, reader, doc_map);
     }
-    DelWriter_Merge_Segment(self->del_writer, reader, doc_map);
+    DelWriter_Merge_Segment(ivars->del_writer, reader, doc_map);
 
     // Remove seg directory from snapshot.
     Snapshot_Delete_Entry(snapshot, seg_name);
@@ -163,15 +175,16 @@ SegWriter_merge_segment(SegWriter *self, SegReader *reader,
 
 void
 SegWriter_delete_segment(SegWriter *self, SegReader *reader) {
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
     Snapshot *snapshot = SegWriter_Get_Snapshot(self);
     CharBuf  *seg_name = Seg_Get_Name(SegReader_Get_Segment(reader));
 
     // Have all the sub-writers delete the segment.
-    for (uint32_t i = 0, max = VA_Get_Size(self->writers); i < max; i++) {
-        DataWriter *writer = (DataWriter*)VA_Fetch(self->writers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->writers); i < max; i++) {
+        DataWriter *writer = (DataWriter*)VA_Fetch(ivars->writers, i);
         DataWriter_Delete_Segment(writer, reader);
     }
-    DelWriter_Delete_Segment(self->del_writer, reader);
+    DelWriter_Delete_Segment(ivars->del_writer, reader);
 
     // Remove seg directory from snapshot.
     Snapshot_Delete_Entry(snapshot, seg_name);
@@ -179,39 +192,42 @@ SegWriter_delete_segment(SegWriter *self, SegReader *reader) {
 
 void
 SegWriter_finish(SegWriter *self) {
-    CharBuf *seg_name = Seg_Get_Name(self->segment);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    CharBuf *seg_name = Seg_Get_Name(ivars->segment);
 
     // Finish off children.
-    for (uint32_t i = 0, max = VA_Get_Size(self->writers); i < max; i++) {
-        DataWriter *writer = (DataWriter*)VA_Fetch(self->writers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->writers); i < max; i++) {
+        DataWriter *writer = (DataWriter*)VA_Fetch(ivars->writers, i);
         DataWriter_Finish(writer);
     }
 
     // Write segment metadata and add the segment directory to the snapshot.
     Snapshot *snapshot = SegWriter_Get_Snapshot(self);
     CharBuf *segmeta_filename = CB_newf("%o/segmeta.json", seg_name);
-    Seg_Write_File(self->segment, self->folder);
+    Seg_Write_File(ivars->segment, ivars->folder);
     Snapshot_Add_Entry(snapshot, seg_name);
     DECREF(segmeta_filename);
 
     // Collapse segment files into compound file.
-    Folder_Consolidate(self->folder, seg_name);
+    Folder_Consolidate(ivars->folder, seg_name);
 }
 
 void
 SegWriter_add_data_writer(SegWriter *self, DataWriter *writer) {
-    VA_Push(self->writers, (Obj*)writer);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    VA_Push(ivars->writers, (Obj*)writer);
 }
 
 void
 SegWriter_set_del_writer(SegWriter *self, DeletionsWriter *del_writer) {
-    DECREF(self->del_writer);
-    self->del_writer = (DeletionsWriter*)INCREF(del_writer);
+    SegWriterIVARS *const ivars = SegWriter_IVARS(self);
+    DECREF(ivars->del_writer);
+    ivars->del_writer = (DeletionsWriter*)INCREF(del_writer);
 }
 
 DeletionsWriter*
 SegWriter_get_del_writer(SegWriter *self) {
-    return self->del_writer;
+    return SegWriter_IVARS(self)->del_writer;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Segment.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Segment.c b/core/Lucy/Index/Segment.c
index 69365ed..ddcebee 100644
--- a/core/Lucy/Index/Segment.c
+++ b/core/Lucy/Index/Segment.c
@@ -33,23 +33,25 @@ Seg_new(int64_t number) {
 
 Segment*
 Seg_init(Segment *self, int64_t number) {
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+
     // Validate.
     if (number < 0) { THROW(ERR, "Segment number %i64 less than 0", number); }
 
     // Init.
-    self->metadata  = Hash_new(0);
-    self->count     = 0;
-    self->by_num    = VA_new(2);
-    self->by_name   = Hash_new(0);
+    ivars->metadata  = Hash_new(0);
+    ivars->count     = 0;
+    ivars->by_num    = VA_new(2);
+    ivars->by_name   = Hash_new(0);
 
     // Start field numbers at 1, not 0.
-    VA_Push(self->by_num, (Obj*)CB_newf(""));
+    VA_Push(ivars->by_num, (Obj*)CB_newf(""));
 
     // Assign.
-    self->number = number;
+    ivars->number = number;
 
     // Derive.
-    self->name = Seg_num_to_name(number);
+    ivars->name = Seg_num_to_name(number);
 
     return self;
 }
@@ -77,16 +79,18 @@ Seg_valid_seg_name(const CharBuf *name) {
 
 void
 Seg_destroy(Segment *self) {
-    DECREF(self->name);
-    DECREF(self->metadata);
-    DECREF(self->by_name);
-    DECREF(self->by_num);
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    DECREF(ivars->name);
+    DECREF(ivars->metadata);
+    DECREF(ivars->by_name);
+    DECREF(ivars->by_num);
     SUPER_DESTROY(self, SEGMENT);
 }
 
 bool
 Seg_read_file(Segment *self, Folder *folder) {
-    CharBuf *filename = CB_newf("%o/segmeta.json", self->name);
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    CharBuf *filename = CB_newf("%o/segmeta.json", ivars->name);
     Hash    *metadata = (Hash*)Json_slurp_json(folder, filename);
     Hash    *my_metadata;
 
@@ -96,16 +100,16 @@ Seg_read_file(Segment *self, Folder *folder) {
     CERTIFY(metadata, HASH);
 
     // Grab metadata for the Segment object itself.
-    DECREF(self->metadata);
-    self->metadata = metadata;
+    DECREF(ivars->metadata);
+    ivars->metadata = metadata;
     my_metadata
-        = (Hash*)CERTIFY(Hash_Fetch_Str(self->metadata, "segmeta", 7), HASH);
+        = (Hash*)CERTIFY(Hash_Fetch_Str(ivars->metadata, "segmeta", 7), HASH);
 
     // Assign.
     Obj *count = Hash_Fetch_Str(my_metadata, "count", 5);
     if (!count) { count = Hash_Fetch_Str(my_metadata, "doc_count", 9); }
     if (!count) { THROW(ERR, "Missing 'count'"); }
-    else { self->count = Obj_To_I64(count); }
+    else { ivars->count = Obj_To_I64(count); }
 
     // Get list of field nums.
     VArray *source_by_num = (VArray*)Hash_Fetch_Str(my_metadata,
@@ -116,10 +120,10 @@ Seg_read_file(Segment *self, Folder *folder) {
     }
 
     // Init.
-    DECREF(self->by_num);
-    DECREF(self->by_name);
-    self->by_num  = VA_new(num_fields);
-    self->by_name = Hash_new(num_fields);
+    DECREF(ivars->by_num);
+    DECREF(ivars->by_name);
+    ivars->by_num  = VA_new(num_fields);
+    ivars->by_name = Hash_new(num_fields);
 
     // Copy the list of fields from the source.
     for (uint32_t i = 0; i < num_fields; i++) {
@@ -132,68 +136,72 @@ Seg_read_file(Segment *self, Folder *folder) {
 
 void
 Seg_write_file(Segment *self, Folder *folder) {
+    SegmentIVARS *const ivars = Seg_IVARS(self);
     Hash *my_metadata = Hash_new(16);
 
     // Store metadata specific to this Segment object.
     Hash_Store_Str(my_metadata, "count", 5,
-                   (Obj*)CB_newf("%i64", self->count));
-    Hash_Store_Str(my_metadata, "name", 4, (Obj*)CB_Clone(self->name));
-    Hash_Store_Str(my_metadata, "field_names", 11, INCREF(self->by_num));
+                   (Obj*)CB_newf("%i64", ivars->count));
+    Hash_Store_Str(my_metadata, "name", 4, (Obj*)CB_Clone(ivars->name));
+    Hash_Store_Str(my_metadata, "field_names", 11, INCREF(ivars->by_num));
     Hash_Store_Str(my_metadata, "format", 6, (Obj*)CB_newf("%i32", 1));
-    Hash_Store_Str(self->metadata, "segmeta", 7, (Obj*)my_metadata);
+    Hash_Store_Str(ivars->metadata, "segmeta", 7, (Obj*)my_metadata);
 
-    CharBuf *filename = CB_newf("%o/segmeta.json", self->name);
-    bool result = Json_spew_json((Obj*)self->metadata, folder, filename);
+    CharBuf *filename = CB_newf("%o/segmeta.json", ivars->name);
+    bool result = Json_spew_json((Obj*)ivars->metadata, folder, filename);
     DECREF(filename);
     if (!result) { RETHROW(INCREF(Err_get_error())); }
 }
 
 int32_t
 Seg_add_field(Segment *self, const CharBuf *field) {
-    Integer32 *num = (Integer32*)Hash_Fetch(self->by_name, (Obj*)field);
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    Integer32 *num = (Integer32*)Hash_Fetch(ivars->by_name, (Obj*)field);
     if (num) {
         return Int32_Get_Value(num);
     }
     else {
-        int32_t field_num = VA_Get_Size(self->by_num);
-        Hash_Store(self->by_name, (Obj*)field, (Obj*)Int32_new(field_num));
-        VA_Push(self->by_num, (Obj*)CB_Clone(field));
+        int32_t field_num = VA_Get_Size(ivars->by_num);
+        Hash_Store(ivars->by_name, (Obj*)field, (Obj*)Int32_new(field_num));
+        VA_Push(ivars->by_num, (Obj*)CB_Clone(field));
         return field_num;
     }
 }
 
 CharBuf*
 Seg_get_name(Segment *self) {
-    return self->name;
+    return Seg_IVARS(self)->name;
 }
 
 int64_t
 Seg_get_number(Segment *self) {
-    return self->number;
+    return Seg_IVARS(self)->number;
 }
 
 void
 Seg_set_count(Segment *self, int64_t count) {
-    self->count = count;
+    Seg_IVARS(self)->count = count;
 }
 
 int64_t
 Seg_get_count(Segment *self) {
-    return self->count;
+    return Seg_IVARS(self)->count;
 }
 
 int64_t
 Seg_increment_count(Segment *self, int64_t increment) {
-    self->count += increment;
-    return self->count;
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    ivars->count += increment;
+    return ivars->count;
 }
 
 void
 Seg_store_metadata(Segment *self, const CharBuf *key, Obj *value) {
-    if (Hash_Fetch(self->metadata, (Obj*)key)) {
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    if (Hash_Fetch(ivars->metadata, (Obj*)key)) {
         THROW(ERR, "Metadata key '%o' already registered", key);
     }
-    Hash_Store(self->metadata, (Obj*)key, value);
+    Hash_Store(ivars->metadata, (Obj*)key, value);
 }
 
 void
@@ -205,31 +213,36 @@ Seg_store_metadata_str(Segment *self, const char *key, size_t key_len,
 
 Obj*
 Seg_fetch_metadata(Segment *self, const CharBuf *key) {
-    return Hash_Fetch(self->metadata, (Obj*)key);
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    return Hash_Fetch(ivars->metadata, (Obj*)key);
 }
 
 Obj*
 Seg_fetch_metadata_str(Segment *self, const char *key, size_t len) {
-    return Hash_Fetch_Str(self->metadata, key, len);
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    return Hash_Fetch_Str(ivars->metadata, key, len);
 }
 
 Hash*
 Seg_get_metadata(Segment *self) {
-    return self->metadata;
+    return Seg_IVARS(self)->metadata;
 }
 
 int32_t
 Seg_compare_to(Segment *self, Obj *other) {
     Segment *other_seg = (Segment*)CERTIFY(other, SEGMENT);
-    if (self->number <  other_seg->number)      { return -1; }
-    else if (self->number == other_seg->number) { return 0;  }
-    else                                        { return 1;  }
+    SegmentIVARS *const ivars = Seg_IVARS(self);
+    SegmentIVARS *const ovars = Seg_IVARS(other_seg);
+    if (ivars->number < ovars->number)       { return -1; }
+    else if (ivars->number == ovars->number) { return 0;  }
+    else                                     { return 1;  }
 }
 
 CharBuf*
 Seg_field_name(Segment *self, int32_t field_num) {
+    SegmentIVARS *const ivars = Seg_IVARS(self);
     return field_num
-           ? (CharBuf*)VA_Fetch(self->by_num, field_num)
+           ? (CharBuf*)VA_Fetch(ivars->by_num, field_num)
            : NULL;
 }
 
@@ -239,7 +252,8 @@ Seg_field_num(Segment *self, const CharBuf *field) {
         return 0;
     }
     else {
-        Integer32 *num = (Integer32*)Hash_Fetch(self->by_name, (Obj*)field);
+        SegmentIVARS *const ivars = Seg_IVARS(self);
+        Integer32 *num = (Integer32*)Hash_Fetch(ivars->by_name, (Obj*)field);
         return num ? Int32_Get_Value(num) : 0;
     }
 }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Similarity.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Similarity.c b/core/Lucy/Index/Similarity.c
index 2118cdc..a583ef1 100644
--- a/core/Lucy/Index/Similarity.c
+++ b/core/Lucy/Index/Similarity.c
@@ -39,14 +39,16 @@ Sim_new() {
 
 Similarity*
 Sim_init(Similarity *self) {
-    self->norm_decoder = NULL;
+    SimilarityIVARS *const ivars = Sim_IVARS(self);
+    ivars->norm_decoder = NULL;
     return self;
 }
 
 void
 Sim_destroy(Similarity *self) {
-    if (self->norm_decoder) {
-        FREEMEM(self->norm_decoder);
+    SimilarityIVARS *const ivars = Sim_IVARS(self);
+    if (ivars->norm_decoder) {
+        FREEMEM(ivars->norm_decoder);
     }
     SUPER_DESTROY(self, SIMILARITY);
 }
@@ -67,14 +69,15 @@ Sim_make_posting_writer(Similarity *self, Schema *schema, Snapshot *snapshot,
 
 float*
 Sim_get_norm_decoder(Similarity *self) {
-    if (!self->norm_decoder) {
+    SimilarityIVARS *const ivars = Sim_IVARS(self);
+    if (!ivars->norm_decoder) {
         // Cache decoded boost bytes.
-        self->norm_decoder = (float*)MALLOCATE(256 * sizeof(float));
+        ivars->norm_decoder = (float*)MALLOCATE(256 * sizeof(float));
         for (uint32_t i = 0; i < 256; i++) {
-            self->norm_decoder[i] = Sim_Decode_Norm(self, i);
+            ivars->norm_decoder[i] = Sim_Decode_Norm(self, i);
         }
     }
-    return self->norm_decoder;
+    return ivars->norm_decoder;
 }
 
 Obj*

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SkipStepper.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SkipStepper.c b/core/Lucy/Index/SkipStepper.c
index f07b9f3..33ce450 100644
--- a/core/Lucy/Index/SkipStepper.c
+++ b/core/Lucy/Index/SkipStepper.c
@@ -27,10 +27,11 @@
 SkipStepper*
 SkipStepper_new() {
     SkipStepper *self = (SkipStepper*)VTable_Make_Obj(SKIPSTEPPER);
+    SkipStepperIVARS *const ivars = SkipStepper_IVARS(self);
 
     // Init.
-    self->doc_id   = 0;
-    self->filepos  = 0;
+    ivars->doc_id   = 0;
+    ivars->filepos  = 0;
 
     return self;
 }
@@ -38,29 +39,33 @@ SkipStepper_new() {
 void
 SkipStepper_set_id_and_filepos(SkipStepper *self, int32_t doc_id,
                                int64_t filepos) {
-    self->doc_id  = doc_id;
-    self->filepos = filepos;
+    SkipStepperIVARS *const ivars = SkipStepper_IVARS(self);
+    ivars->doc_id  = doc_id;
+    ivars->filepos = filepos;
 }
 
 void
 SkipStepper_read_record(SkipStepper *self, InStream *instream) {
-    self->doc_id   += InStream_Read_C32(instream);
-    self->filepos  += InStream_Read_C64(instream);
+    SkipStepperIVARS *const ivars = SkipStepper_IVARS(self);
+    ivars->doc_id   += InStream_Read_C32(instream);
+    ivars->filepos  += InStream_Read_C64(instream);
 }
 
 CharBuf*
 SkipStepper_to_string(SkipStepper *self) {
+    SkipStepperIVARS *const ivars = SkipStepper_IVARS(self);
     char *ptr = (char*)MALLOCATE(60);
     size_t len = sprintf(ptr, "skip doc: %u file pointer: %" PRId64,
-                         self->doc_id, self->filepos);
+                         ivars->doc_id, ivars->filepos);
     return CB_new_steal_from_trusted_str(ptr, len, 60);
 }
 
 void
 SkipStepper_write_record(SkipStepper *self, OutStream *outstream,
                          int32_t last_doc_id, int64_t last_filepos) {
-    const int32_t delta_doc_id = self->doc_id - last_doc_id;
-    const int64_t delta_filepos = self->filepos - last_filepos;
+    SkipStepperIVARS *const ivars = SkipStepper_IVARS(self);
+    const int32_t delta_doc_id = ivars->doc_id - last_doc_id;
+    const int64_t delta_filepos = ivars->filepos - last_filepos;
 
     // Write delta doc id.
     OutStream_Write_C32(outstream, delta_doc_id);

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Snapshot.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Snapshot.c b/core/Lucy/Index/Snapshot.c
index beca285..4bae50f 100644
--- a/core/Lucy/Index/Snapshot.c
+++ b/core/Lucy/Index/Snapshot.c
@@ -38,10 +38,11 @@ Snapshot_new() {
 
 static void
 S_zero_out(Snapshot *self) {
-    DECREF(self->entries);
-    DECREF(self->path);
-    self->entries  = Hash_new(0);
-    self->path = NULL;
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    DECREF(ivars->entries);
+    DECREF(ivars->path);
+    ivars->entries  = Hash_new(0);
+    ivars->path = NULL;
 }
 
 Snapshot*
@@ -52,19 +53,22 @@ Snapshot_init(Snapshot *self) {
 
 void
 Snapshot_destroy(Snapshot *self) {
-    DECREF(self->entries);
-    DECREF(self->path);
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    DECREF(ivars->entries);
+    DECREF(ivars->path);
     SUPER_DESTROY(self, SNAPSHOT);
 }
 
 void
 Snapshot_add_entry(Snapshot *self, const CharBuf *entry) {
-    Hash_Store(self->entries, (Obj*)entry, (Obj*)CFISH_TRUE);
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    Hash_Store(ivars->entries, (Obj*)entry, (Obj*)CFISH_TRUE);
 }
 
 bool
 Snapshot_delete_entry(Snapshot *self, const CharBuf *entry) {
-    Obj *val = Hash_Delete(self->entries, (Obj*)entry);
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    Obj *val = Hash_Delete(ivars->entries, (Obj*)entry);
     if (val) {
         Obj_Dec_RefCount(val);
         return true;
@@ -76,34 +80,39 @@ Snapshot_delete_entry(Snapshot *self, const CharBuf *entry) {
 
 VArray*
 Snapshot_list(Snapshot *self) {
-    return Hash_Keys(self->entries);
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    return Hash_Keys(ivars->entries);
 }
 
 uint32_t
 Snapshot_num_entries(Snapshot *self) {
-    return Hash_Get_Size(self->entries);
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    return Hash_Get_Size(ivars->entries);
 }
 
 void
 Snapshot_set_path(Snapshot *self, const CharBuf *path) {
-    DECREF(self->path);
-    self->path = path ? CB_Clone(path) : NULL;
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+    DECREF(ivars->path);
+    ivars->path = path ? CB_Clone(path) : NULL;
 }
 
 CharBuf*
 Snapshot_get_path(Snapshot *self) {
-    return self->path;
+    return Snapshot_IVARS(self)->path;
 }
 
 Snapshot*
 Snapshot_read_file(Snapshot *self, Folder *folder, const CharBuf *path) {
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
+
     // Eliminate all prior data. Pick a snapshot file.
     S_zero_out(self);
-    self->path = path ? CB_Clone(path) : IxFileNames_latest_snapshot(folder);
+    ivars->path = path ? CB_Clone(path) : IxFileNames_latest_snapshot(folder);
 
-    if (self->path) {
+    if (ivars->path) {
         Hash *snap_data
-            = (Hash*)CERTIFY(Json_slurp_json(folder, self->path), HASH);
+            = (Hash*)CERTIFY(Json_slurp_json(folder, ivars->path), HASH);
         Obj *format_obj
             = CERTIFY(Hash_Fetch_Str(snap_data, "format", 6), OBJ);
         int32_t format = (int32_t)Obj_To_I64(format_obj);
@@ -128,11 +137,11 @@ Snapshot_read_file(Snapshot *self, Folder *folder, const CharBuf *path) {
             DECREF(list);
             list = cleaned;
         }
-        Hash_Clear(self->entries);
+        Hash_Clear(ivars->entries);
         for (uint32_t i = 0, max = VA_Get_Size(list); i < max; i++) {
             CharBuf *entry
                 = (CharBuf*)CERTIFY(VA_Fetch(list, i), CHARBUF);
-            Hash_Store(self->entries, (Obj*)entry, (Obj*)CFISH_TRUE);
+            Hash_Store(ivars->entries, (Obj*)entry, (Obj*)CFISH_TRUE);
         }
 
         DECREF(list);
@@ -163,26 +172,27 @@ S_clean_segment_contents(VArray *orig) {
 
 void
 Snapshot_write_file(Snapshot *self, Folder *folder, const CharBuf *path) {
+    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
     Hash   *all_data = Hash_new(0);
     VArray *list     = Snapshot_List(self);
 
     // Update path.
-    DECREF(self->path);
+    DECREF(ivars->path);
     if (path) {
-        self->path = CB_Clone(path);
+        ivars->path = CB_Clone(path);
     }
     else {
         CharBuf *latest = IxFileNames_latest_snapshot(folder);
         uint64_t gen = latest ? IxFileNames_extract_gen(latest) + 1 : 1;
         char base36[StrHelp_MAX_BASE36_BYTES];
         StrHelp_to_base36(gen, &base36);
-        self->path = CB_newf("snapshot_%s.json", &base36);
+        ivars->path = CB_newf("snapshot_%s.json", &base36);
         DECREF(latest);
     }
 
     // Don't overwrite.
-    if (Folder_Exists(folder, self->path)) {
-        THROW(ERR, "Snapshot file '%o' already exists", self->path);
+    if (Folder_Exists(folder, ivars->path)) {
+        THROW(ERR, "Snapshot file '%o' already exists", ivars->path);
     }
 
     // Sort, then store file names.
@@ -196,7 +206,7 @@ Snapshot_write_file(Snapshot *self, Folder *folder, const CharBuf *path) {
                    (Obj*)CB_newf("%i32", (int32_t)Snapshot_current_file_subformat));
 
     // Write out JSON-ized data to the new file.
-    Json_spew_json((Obj*)all_data, folder, self->path);
+    Json_spew_json((Obj*)all_data, folder, ivars->path);
 
     DECREF(all_data);
 }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/SortCache.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/SortCache.c b/core/Lucy/Index/SortCache.c
index 00d41b2..31975ad 100644
--- a/core/Lucy/Index/SortCache.c
+++ b/core/Lucy/Index/SortCache.c
@@ -24,20 +24,22 @@ SortCache*
 SortCache_init(SortCache *self, const CharBuf *field, FieldType *type,
                void *ords, int32_t cardinality, int32_t doc_max, int32_t null_ord,
                int32_t ord_width) {
+    SortCacheIVARS *const ivars = SortCache_IVARS(self);
+
     // Init.
-    self->native_ords = false;
+    ivars->native_ords = false;
 
     // Assign.
     if (!FType_Sortable(type)) {
         THROW(ERR, "Non-sortable FieldType for %o", field);
     }
-    self->field       = CB_Clone(field);
-    self->type        = (FieldType*)INCREF(type);
-    self->ords        = ords;
-    self->cardinality = cardinality;
-    self->doc_max     = doc_max;
-    self->null_ord    = null_ord;
-    self->ord_width   = ord_width;
+    ivars->field       = CB_Clone(field);
+    ivars->type        = (FieldType*)INCREF(type);
+    ivars->ords        = ords;
+    ivars->cardinality = cardinality;
+    ivars->doc_max     = doc_max;
+    ivars->null_ord    = null_ord;
+    ivars->ord_width   = ord_width;
 
     ABSTRACT_CLASS_CHECK(self, SORTCACHE);
     return self;
@@ -45,56 +47,58 @@ SortCache_init(SortCache *self, const CharBuf *field, FieldType *type,
 
 void
 SortCache_destroy(SortCache *self) {
-    DECREF(self->field);
-    DECREF(self->type);
+    SortCacheIVARS *const ivars = SortCache_IVARS(self);
+    DECREF(ivars->field);
+    DECREF(ivars->type);
     SUPER_DESTROY(self, SORTCACHE);
 }
 
 bool
 SortCache_get_native_ords(SortCache *self) {
-    return self->native_ords;
+    return SortCache_IVARS(self)->native_ords;
 }
 
 void
 SortCache_set_native_ords(SortCache *self, bool native_ords) {
-    self->native_ords = native_ords;
+    SortCache_IVARS(self)->native_ords = native_ords;
 }
 
 int32_t
 SortCache_ordinal(SortCache *self, int32_t doc_id) {
-    if ((uint32_t)doc_id > (uint32_t)self->doc_max) {
-        THROW(ERR, "Out of range: %i32 > %i32", doc_id, self->doc_max);
+    SortCacheIVARS *const ivars = SortCache_IVARS(self);
+    if ((uint32_t)doc_id > (uint32_t)ivars->doc_max) {
+        THROW(ERR, "Out of range: %i32 > %i32", doc_id, ivars->doc_max);
     }
-    switch (self->ord_width) {
-        case 1: return NumUtil_u1get(self->ords, doc_id);
-        case 2: return NumUtil_u2get(self->ords, doc_id);
-        case 4: return NumUtil_u4get(self->ords, doc_id);
+    switch (ivars->ord_width) {
+        case 1: return NumUtil_u1get(ivars->ords, doc_id);
+        case 2: return NumUtil_u2get(ivars->ords, doc_id);
+        case 4: return NumUtil_u4get(ivars->ords, doc_id);
         case 8: {
-                uint8_t *ints = (uint8_t*)self->ords;
+                uint8_t *ints = (uint8_t*)ivars->ords;
                 return ints[doc_id];
             }
         case 16:
-            if (self->native_ords) {
-                uint16_t *ints = (uint16_t*)self->ords;
+            if (ivars->native_ords) {
+                uint16_t *ints = (uint16_t*)ivars->ords;
                 return ints[doc_id];
             }
             else {
-                uint8_t *bytes = (uint8_t*)self->ords;
+                uint8_t *bytes = (uint8_t*)ivars->ords;
                 bytes += doc_id * sizeof(uint16_t);
                 return NumUtil_decode_bigend_u16(bytes);
             }
         case 32:
-            if (self->native_ords) {
-                uint32_t *ints = (uint32_t*)self->ords;
+            if (ivars->native_ords) {
+                uint32_t *ints = (uint32_t*)ivars->ords;
                 return ints[doc_id];
             }
             else {
-                uint8_t *bytes = (uint8_t*)self->ords;
+                uint8_t *bytes = (uint8_t*)ivars->ords;
                 bytes += doc_id * sizeof(uint32_t);
                 return NumUtil_decode_bigend_u32(bytes);
             }
         default: {
-                THROW(ERR, "Invalid ord width: %i32", self->ord_width);
+                THROW(ERR, "Invalid ord width: %i32", ivars->ord_width);
                 UNREACHABLE_RETURN(int32_t);
             }
     }
@@ -102,9 +106,10 @@ SortCache_ordinal(SortCache *self, int32_t doc_id) {
 
 int32_t
 SortCache_find(SortCache *self, Obj *term) {
-    FieldType *const type   = self->type;
+    SortCacheIVARS *const ivars = SortCache_IVARS(self);
+    FieldType *const type   = ivars->type;
     int32_t          lo     = 0;
-    int32_t          hi     = self->cardinality - 1;
+    int32_t          hi     = ivars->cardinality - 1;
     int32_t          result = -100;
     Obj             *blank  = SortCache_Make_Blank(self);
 
@@ -113,7 +118,7 @@ SortCache_find(SortCache *self, Obj *term) {
         && !Obj_Is_A(blank, Obj_Get_VTable(term))
        ) {
         THROW(ERR, "SortCache error for field %o: term is a %o, and not "
-              "comparable to a %o", self->field, Obj_Get_Class_Name(term),
+              "comparable to a %o", ivars->field, Obj_Get_Class_Name(term),
               Obj_Get_Class_Name(blank));
     }
 
@@ -151,22 +156,22 @@ SortCache_find(SortCache *self, Obj *term) {
 
 void*
 SortCache_get_ords(SortCache *self) {
-    return self->ords;
+    return SortCache_IVARS(self)->ords;
 }
 
 int32_t
 SortCache_get_cardinality(SortCache *self) {
-    return self->cardinality;
+    return SortCache_IVARS(self)->cardinality;
 }
 
 int32_t
 SortCache_get_null_ord(SortCache *self) {
-    return self->null_ord;
+    return SortCache_IVARS(self)->null_ord;
 }
 
 int32_t
 SortCache_get_ord_width(SortCache *self) {
-    return self->ord_width;
+    return SortCache_IVARS(self)->ord_width;
 }
 
 


[lucy-commits] [8/9] git commit: refs/heads/ivars-wip1 - Fix prefix capitalization bug.

Posted by ma...@apache.org.
Fix prefix capitalization bug.


Project: http://git-wip-us.apache.org/repos/asf/lucy/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucy/commit/edf18e49
Tree: http://git-wip-us.apache.org/repos/asf/lucy/tree/edf18e49
Diff: http://git-wip-us.apache.org/repos/asf/lucy/diff/edf18e49

Branch: refs/heads/ivars-wip1
Commit: edf18e49be1459010f09cc5502bb88697885832e
Parents: d3edeee
Author: Marvin Humphrey <ma...@rectangular.com>
Authored: Mon Jul 1 08:41:38 2013 -0700
Committer: Marvin Humphrey <ma...@rectangular.com>
Committed: Mon Jul 1 08:46:22 2013 -0700

----------------------------------------------------------------------
 clownfish/compiler/src/CFCBindClass.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucy/blob/edf18e49/clownfish/compiler/src/CFCBindClass.c
----------------------------------------------------------------------
diff --git a/clownfish/compiler/src/CFCBindClass.c b/clownfish/compiler/src/CFCBindClass.c
index a1b3ac6..33b4d71 100644
--- a/clownfish/compiler/src/CFCBindClass.c
+++ b/clownfish/compiler/src/CFCBindClass.c
@@ -156,7 +156,7 @@ S_ivars_hack(CFCBindClass *self) {
     const char *full_struct = CFCClass_full_struct_sym(self->client);
     const char *full_ivars  = CFCClass_full_ivars_name(self->client);
     const char *short_ivars = CFCClass_short_ivars_name(self->client);
-    const char *prefix      = CFCClass_get_PREFIX(self->client);
+    const char *prefix      = CFCClass_get_prefix(self->client);
     const char *class_cnick = CFCClass_get_cnick(self->client);
     char pattern[] =
         "typedef struct %s %s;\n"


[lucy-commits] [5/9] git commit: refs/heads/ivars-wip1 - Migrate Lucy's posting classes to IVARS.

Posted by ma...@apache.org.
Migrate Lucy's posting classes to IVARS.

Change all of Lucy's posting classes to access instance vars via an IVARS
struct rather than via `self`.


Project: http://git-wip-us.apache.org/repos/asf/lucy/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucy/commit/188574cc
Tree: http://git-wip-us.apache.org/repos/asf/lucy/tree/188574cc
Diff: http://git-wip-us.apache.org/repos/asf/lucy/diff/188574cc

Branch: refs/heads/ivars-wip1
Commit: 188574cc97e759421aa1c9fcc907f548e0573822
Parents: 7c23ce2
Author: Marvin Humphrey <ma...@rectangular.com>
Authored: Sun Jun 30 20:34:50 2013 -0700
Committer: Marvin Humphrey <ma...@rectangular.com>
Committed: Mon Jul 1 08:09:37 2013 -0700

----------------------------------------------------------------------
 core/Lucy/Index/Posting/MatchPosting.c | 118 ++++++++++++++++------------
 core/Lucy/Index/Posting/RawPosting.c   |  43 ++++++----
 core/Lucy/Index/Posting/RichPosting.c  |  60 +++++++-------
 core/Lucy/Index/Posting/ScorePosting.c |  84 +++++++++++---------
 4 files changed, 175 insertions(+), 130 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucy/blob/188574cc/core/Lucy/Index/Posting/MatchPosting.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Posting/MatchPosting.c b/core/Lucy/Index/Posting/MatchPosting.c
index 3ff887f..f12a4c8 100644
--- a/core/Lucy/Index/Posting/MatchPosting.c
+++ b/core/Lucy/Index/Posting/MatchPosting.c
@@ -56,38 +56,41 @@ MatchPost_new(Similarity *sim) {
 
 MatchPosting*
 MatchPost_init(MatchPosting *self, Similarity *sim) {
-    self->sim = (Similarity*)INCREF(sim);
+    MatchPostingIVARS *const ivars = MatchPost_IVARS(self);
+    ivars->sim = (Similarity*)INCREF(sim);
     return (MatchPosting*)Post_init((Posting*)self);
 }
 
 void
 MatchPost_destroy(MatchPosting *self) {
-    DECREF(self->sim);
+    MatchPostingIVARS *const ivars = MatchPost_IVARS(self);
+    DECREF(ivars->sim);
     SUPER_DESTROY(self, MATCHPOSTING);
 }
 
 int32_t
 MatchPost_get_freq(MatchPosting *self) {
-    return self->freq;
+    return MatchPost_IVARS(self)->freq;
 }
 
 void
 MatchPost_reset(MatchPosting *self) {
-    self->doc_id = 0;
+    MatchPost_IVARS(self)->doc_id = 0;
 }
 
 void
 MatchPost_read_record(MatchPosting *self, InStream *instream) {
+    MatchPostingIVARS *const ivars = MatchPost_IVARS(self);
     const uint32_t doc_code = InStream_Read_C32(instream);
     const uint32_t doc_delta = doc_code >> 1;
 
     // Apply delta doc and retrieve freq.
-    self->doc_id   += doc_delta;
+    ivars->doc_id   += doc_delta;
     if (doc_code & 1) {
-        self->freq = 1;
+        ivars->freq = 1;
     }
     else {
-        self->freq = InStream_Read_C32(instream);
+        ivars->freq = InStream_Read_C32(instream);
     }
 }
 
@@ -125,11 +128,11 @@ MatchPost_add_inversion_to_pool(MatchPosting *self, PostingPool *post_pool,
 
     Inversion_Reset(inversion);
     while ((tokens = Inversion_Next_Cluster(inversion, &freq)) != NULL) {
-        Token   *token          = *tokens;
-        uint32_t raw_post_bytes = MAX_RAW_POSTING_LEN(token->len);
+        TokenIVARS *const token_ivars = Token_IVARS(*tokens);
+        uint32_t raw_post_bytes = MAX_RAW_POSTING_LEN(token_ivars->len);
         RawPosting *raw_posting
             = RawPost_new(MemPool_Grab(mem_pool, raw_post_bytes), doc_id,
-                          freq, token->text, token->len);
+                          freq, token_ivars->text, token_ivars->len);
         PostPool_Feed(post_pool, &raw_posting);
     }
 }
@@ -156,7 +159,7 @@ MatchPostMatcher_init(MatchPostingMatcher *self, Similarity *sim,
 
 float
 MatchPostMatcher_score(MatchPostingMatcher* self) {
-    return self->weight;
+    return MatchPostMatcher_IVARS(self)->weight;
 }
 
 /***************************************************************************/
@@ -179,46 +182,55 @@ MatchPostWriter_init(MatchPostingWriter *self, Schema *schema,
         = CB_newf("%o/postings-%i32.dat", Seg_Get_Name(segment), field_num);
     PostWriter_init((PostingWriter*)self, schema, snapshot, segment,
                     polyreader, field_num);
-    self->outstream = Folder_Open_Out(folder, filename);
-    if (!self->outstream) { RETHROW(INCREF(Err_get_error())); }
+    MatchPostingWriterIVARS *const ivars = MatchPostWriter_IVARS(self);
+    ivars->outstream = Folder_Open_Out(folder, filename);
+    if (!ivars->outstream) { RETHROW(INCREF(Err_get_error())); }
     DECREF(filename);
     return self;
 }
 
 void
 MatchPostWriter_destroy(MatchPostingWriter *self) {
-    DECREF(self->outstream);
+    MatchPostingWriterIVARS *const ivars = MatchPostWriter_IVARS(self);
+    DECREF(ivars->outstream);
     SUPER_DESTROY(self, MATCHPOSTINGWRITER);
 }
 
 void
 MatchPostWriter_write_posting(MatchPostingWriter *self, RawPosting *posting) {
-    OutStream *const outstream   = self->outstream;
-    const int32_t    doc_id      = posting->doc_id;
-    const uint32_t   delta_doc   = doc_id - self->last_doc_id;
-    char  *const     aux_content = posting->blob + posting->content_len;
-    if (posting->freq == 1) {
+    MatchPostingWriterIVARS *const ivars = MatchPostWriter_IVARS(self);
+    RawPostingIVARS *const posting_ivars = RawPost_IVARS(posting);
+    OutStream *const outstream   = ivars->outstream;
+    const int32_t    doc_id      = posting_ivars->doc_id;
+    const uint32_t   delta_doc   = doc_id - ivars->last_doc_id;
+    char  *const     aux_content = posting_ivars->blob
+                                   + posting_ivars->content_len;
+    if (posting_ivars->freq == 1) {
         const uint32_t doc_code = (delta_doc << 1) | 1;
         OutStream_Write_C32(outstream, doc_code);
     }
     else {
         const uint32_t doc_code = delta_doc << 1;
         OutStream_Write_C32(outstream, doc_code);
-        OutStream_Write_C32(outstream, posting->freq);
+        OutStream_Write_C32(outstream, posting_ivars->freq);
     }
-    OutStream_Write_Bytes(outstream, aux_content, posting->aux_len);
-    self->last_doc_id = doc_id;
+    OutStream_Write_Bytes(outstream, aux_content, posting_ivars->aux_len);
+    ivars->last_doc_id = doc_id;
 }
 
 void
 MatchPostWriter_start_term(MatchPostingWriter *self, TermInfo *tinfo) {
-    self->last_doc_id   = 0;
-    tinfo->post_filepos = OutStream_Tell(self->outstream);
+    MatchPostingWriterIVARS *const ivars = MatchPostWriter_IVARS(self);
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS(tinfo);
+    ivars->last_doc_id   = 0;
+    tinfo_ivars->post_filepos = OutStream_Tell(ivars->outstream);
 }
 
 void
 MatchPostWriter_update_skip_info(MatchPostingWriter *self, TermInfo *tinfo) {
-    tinfo->post_filepos = OutStream_Tell(self->outstream);
+    MatchPostingWriterIVARS *const ivars = MatchPostWriter_IVARS(self);
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS(tinfo);
+    tinfo_ivars->post_filepos = OutStream_Tell(ivars->outstream);
 }
 
 /***************************************************************************/
@@ -234,43 +246,49 @@ MatchTermInfoStepper*
 MatchTInfoStepper_init(MatchTermInfoStepper *self, Schema *schema) {
     Architecture *arch = Schema_Get_Architecture(schema);
     TermStepper_init((TermStepper*)self);
-    self->skip_interval = Arch_Skip_Interval(arch);
-    self->value = (Obj*)TInfo_new(0);
+    MatchTermInfoStepperIVARS *const ivars = MatchTInfoStepper_IVARS(self);
+    ivars->skip_interval = Arch_Skip_Interval(arch);
+    ivars->value = (Obj*)TInfo_new(0);
     return self;
 }
 
 void
 MatchTInfoStepper_reset(MatchTermInfoStepper *self) {
-    TInfo_Reset((TermInfo*)self->value);
+    MatchTermInfoStepperIVARS *const ivars = MatchTInfoStepper_IVARS(self);
+    TInfo_Reset((TermInfo*)ivars->value);
 }
 
 void
 MatchTInfoStepper_write_key_frame(MatchTermInfoStepper *self,
                                   OutStream *outstream, Obj *value) {
+    MatchTermInfoStepperIVARS *const ivars = MatchTInfoStepper_IVARS(self);
     TermInfo *tinfo    = (TermInfo*)CERTIFY(value, TERMINFO);
     int32_t   doc_freq = TInfo_Get_Doc_Freq(tinfo);
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS((TermInfo*)value);
 
     // Write doc_freq.
     OutStream_Write_C32(outstream, doc_freq);
 
     // Write postings file pointer.
-    OutStream_Write_C64(outstream, tinfo->post_filepos);
+    OutStream_Write_C64(outstream, tinfo_ivars->post_filepos);
 
     // Write skip file pointer (maybe).
-    if (doc_freq >= self->skip_interval) {
-        OutStream_Write_C64(outstream, tinfo->skip_filepos);
+    if (doc_freq >= ivars->skip_interval) {
+        OutStream_Write_C64(outstream, tinfo_ivars->skip_filepos);
     }
 
-    TInfo_Mimic((TermInfo*)self->value, (Obj*)tinfo);
+    TInfo_Mimic((TermInfo*)ivars->value, (Obj*)tinfo);
 }
 
 void
 MatchTInfoStepper_write_delta(MatchTermInfoStepper *self,
                               OutStream *outstream, Obj *value) {
+    MatchTermInfoStepperIVARS *const ivars = MatchTInfoStepper_IVARS(self);
     TermInfo *tinfo      = (TermInfo*)CERTIFY(value, TERMINFO);
-    TermInfo *last_tinfo = (TermInfo*)self->value;
+    TermInfo *last_tinfo = (TermInfo*)ivars->value;
     int32_t   doc_freq   = TInfo_Get_Doc_Freq(tinfo);
-    int64_t   post_delta = tinfo->post_filepos - last_tinfo->post_filepos;
+    int64_t   post_delta = TInfo_IVARS(tinfo)->post_filepos
+                           - TInfo_IVARS(last_tinfo)->post_filepos;
 
     // Write doc_freq.
     OutStream_Write_C32(outstream, doc_freq);
@@ -279,49 +297,51 @@ MatchTInfoStepper_write_delta(MatchTermInfoStepper *self,
     OutStream_Write_C64(outstream, post_delta);
 
     // Write skip file pointer (maybe).
-    if (doc_freq >= self->skip_interval) {
-        OutStream_Write_C64(outstream, tinfo->skip_filepos);
+    if (doc_freq >= ivars->skip_interval) {
+        OutStream_Write_C64(outstream, TInfo_IVARS(tinfo)->skip_filepos);
     }
 
-    TInfo_Mimic((TermInfo*)self->value, (Obj*)tinfo);
+    TInfo_Mimic((TermInfo*)ivars->value, (Obj*)tinfo);
 }
 
 void
 MatchTInfoStepper_read_key_frame(MatchTermInfoStepper *self,
                                  InStream *instream) {
-    TermInfo *const tinfo = (TermInfo*)self->value;
+    MatchTermInfoStepperIVARS *const ivars = MatchTInfoStepper_IVARS(self);
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS((TermInfo*)ivars->value);
 
     // Read doc freq.
-    tinfo->doc_freq = InStream_Read_C32(instream);
+    tinfo_ivars->doc_freq = InStream_Read_C32(instream);
 
     // Read postings file pointer.
-    tinfo->post_filepos = InStream_Read_C64(instream);
+    tinfo_ivars->post_filepos = InStream_Read_C64(instream);
 
     // Maybe read skip pointer.
-    if (tinfo->doc_freq >= self->skip_interval) {
-        tinfo->skip_filepos = InStream_Read_C64(instream);
+    if (tinfo_ivars->doc_freq >= ivars->skip_interval) {
+        tinfo_ivars->skip_filepos = InStream_Read_C64(instream);
     }
     else {
-        tinfo->skip_filepos = 0;
+        tinfo_ivars->skip_filepos = 0;
     }
 }
 
 void
 MatchTInfoStepper_read_delta(MatchTermInfoStepper *self, InStream *instream) {
-    TermInfo *const tinfo = (TermInfo*)self->value;
+    MatchTermInfoStepperIVARS *const ivars = MatchTInfoStepper_IVARS(self);
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS((TermInfo*)ivars->value);
 
     // Read doc freq.
-    tinfo->doc_freq = InStream_Read_C32(instream);
+    tinfo_ivars->doc_freq = InStream_Read_C32(instream);
 
     // Adjust postings file pointer.
-    tinfo->post_filepos += InStream_Read_C64(instream);
+    tinfo_ivars->post_filepos += InStream_Read_C64(instream);
 
     // Maybe read skip pointer.
-    if (tinfo->doc_freq >= self->skip_interval) {
-        tinfo->skip_filepos = InStream_Read_C64(instream);
+    if (tinfo_ivars->doc_freq >= ivars->skip_interval) {
+        tinfo_ivars->skip_filepos = InStream_Read_C64(instream);
     }
     else {
-        tinfo->skip_filepos = 0;
+        tinfo_ivars->skip_filepos = 0;
     }
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/188574cc/core/Lucy/Index/Posting/RawPosting.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Posting/RawPosting.c b/core/Lucy/Index/Posting/RawPosting.c
index eb87fb2..624beda 100644
--- a/core/Lucy/Index/Posting/RawPosting.c
+++ b/core/Lucy/Index/Posting/RawPosting.c
@@ -35,11 +35,12 @@ RawPost_new(void *pre_allocated_memory, int32_t doc_id, uint32_t freq,
             char *term_text, size_t term_text_len) {
     RawPosting *self
         = (RawPosting*)VTable_Init_Obj(RAWPOSTING, pre_allocated_memory);
-    self->doc_id      = doc_id;
-    self->freq        = freq;
-    self->content_len = term_text_len;
-    self->aux_len     = 0;
-    memcpy(&self->blob, term_text, term_text_len);
+    RawPostingIVARS *const ivars = RawPost_IVARS(self);
+    ivars->doc_id      = doc_id;
+    ivars->freq        = freq;
+    ivars->content_len = term_text_len;
+    ivars->aux_len     = 0;
+    memcpy(&ivars->blob, term_text, term_text_len);
 
     return self;
 }
@@ -85,34 +86,42 @@ RawPostWriter_init(RawPostingWriter *self, Schema *schema,
     const int32_t invalid_field_num = 0;
     PostWriter_init((PostingWriter*)self, schema, snapshot, segment,
                     polyreader, invalid_field_num);
-    self->outstream = (OutStream*)INCREF(outstream);
-    self->last_doc_id = 0;
+    RawPostingWriterIVARS *const ivars = RawPostWriter_IVARS(self);
+    ivars->outstream = (OutStream*)INCREF(outstream);
+    ivars->last_doc_id = 0;
     return self;
 }
 
 void
 RawPostWriter_start_term(RawPostingWriter *self, TermInfo *tinfo) {
-    self->last_doc_id   = 0;
-    tinfo->post_filepos = OutStream_Tell(self->outstream);
+    RawPostingWriterIVARS *const ivars = RawPostWriter_IVARS(self);
+    ivars->last_doc_id   = 0;
+    tinfo->post_filepos = OutStream_Tell(ivars->outstream);
 }
 
 void
 RawPostWriter_update_skip_info(RawPostingWriter *self, TermInfo *tinfo) {
-    tinfo->post_filepos = OutStream_Tell(self->outstream);
+    RawPostingWriterIVARS *const ivars = RawPostWriter_IVARS(self);
+    TermInfoIVARS *const tinfo_ivars = TInfo_IVARS(tinfo);
+    tinfo_ivars->post_filepos = OutStream_Tell(ivars->outstream);
 }
 
 void
 RawPostWriter_destroy(RawPostingWriter *self) {
-    DECREF(self->outstream);
+    RawPostingWriterIVARS *const ivars = RawPostWriter_IVARS(self);
+    DECREF(ivars->outstream);
     SUPER_DESTROY(self, RAWPOSTINGWRITER);
 }
 
 void
 RawPostWriter_write_posting(RawPostingWriter *self, RawPosting *posting) {
-    OutStream *const outstream   = self->outstream;
-    const int32_t    doc_id      = posting->doc_id;
-    const uint32_t   delta_doc   = doc_id - self->last_doc_id;
-    char  *const     aux_content = posting->blob + posting->content_len;
+    RawPostingWriterIVARS *const ivars = RawPostWriter_IVARS(self);
+    RawPostingIVARS *const posting_ivars = RawPost_IVARS(posting);
+    OutStream *const outstream   = ivars->outstream;
+    const int32_t    doc_id      = posting_ivars->doc_id;
+    const uint32_t   delta_doc   = doc_id - ivars->last_doc_id;
+    char  *const     aux_content = posting_ivars->blob
+                                   + posting_ivars->content_len;
     if (posting->freq == 1) {
         const uint32_t doc_code = (delta_doc << 1) | 1;
         OutStream_Write_C32(outstream, doc_code);
@@ -122,8 +131,8 @@ RawPostWriter_write_posting(RawPostingWriter *self, RawPosting *posting) {
         OutStream_Write_C32(outstream, doc_code);
         OutStream_Write_C32(outstream, posting->freq);
     }
-    OutStream_Write_Bytes(outstream, aux_content, posting->aux_len);
-    self->last_doc_id = doc_id;
+    OutStream_Write_Bytes(outstream, aux_content, posting_ivars->aux_len);
+    ivars->last_doc_id = doc_id;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/188574cc/core/Lucy/Index/Posting/RichPosting.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Posting/RichPosting.c b/core/Lucy/Index/Posting/RichPosting.c
index 30b4f39..40b23b6 100644
--- a/core/Lucy/Index/Posting/RichPosting.c
+++ b/core/Lucy/Index/Posting/RichPosting.c
@@ -50,46 +50,49 @@ RichPost_new(Similarity *sim) {
 RichPosting*
 RichPost_init(RichPosting *self, Similarity *sim) {
     ScorePost_init((ScorePosting*)self, sim);
-    self->prox_boosts     = NULL;
+    RichPostingIVARS *const ivars = RichPost_IVARS(self);
+    ivars->prox_boosts     = NULL;
     return self;
 }
 
 void
 RichPost_destroy(RichPosting *self) {
-    FREEMEM(self->prox_boosts);
+    RichPostingIVARS *const ivars = RichPost_IVARS(self);
+    FREEMEM(ivars->prox_boosts);
     SUPER_DESTROY(self, RICHPOSTING);
 }
 
 void
 RichPost_read_record(RichPosting *self, InStream *instream) {
-    float *const norm_decoder = self->norm_decoder;
+    RichPostingIVARS *const ivars = RichPost_IVARS(self);
+    float *const norm_decoder = ivars->norm_decoder;
     uint32_t  num_prox = 0;
     uint32_t  position = 0;
     float     aggregate_weight = 0.0;
 
     // Decode delta doc.
     uint32_t doc_code = InStream_Read_C32(instream);
-    self->doc_id += doc_code >> 1;
+    ivars->doc_id += doc_code >> 1;
 
     // If the stored num was odd, the freq is 1.
     if (doc_code & 1) {
-        self->freq = 1;
+        ivars->freq = 1;
     }
     // Otherwise, freq was stored as a C32.
     else {
-        self->freq = InStream_Read_C32(instream);
+        ivars->freq = InStream_Read_C32(instream);
     }
 
     // Read positions, aggregate per-position boost byte into weight.
-    num_prox = self->freq;
-    if (num_prox > self->prox_cap) {
-        self->prox
-            = (uint32_t*)REALLOCATE(self->prox, num_prox * sizeof(uint32_t));
-        self->prox_boosts
-            = (float*)REALLOCATE(self->prox_boosts, num_prox * sizeof(float));
+    num_prox = ivars->freq;
+    if (num_prox > ivars->prox_cap) {
+        ivars->prox
+            = (uint32_t*)REALLOCATE(ivars->prox, num_prox * sizeof(uint32_t));
+        ivars->prox_boosts
+            = (float*)REALLOCATE(ivars->prox_boosts, num_prox * sizeof(float));
     }
-    uint32_t *positions    = self->prox;
-    float    *prox_boosts  = self->prox_boosts;
+    uint32_t *positions    = ivars->prox;
+    float    *prox_boosts  = ivars->prox_boosts;
 
     while (num_prox--) {
         position += InStream_Read_C32(instream);
@@ -98,7 +101,7 @@ RichPost_read_record(RichPosting *self, InStream *instream) {
         aggregate_weight += *prox_boosts;
         prox_boosts++;
     }
-    self->weight = aggregate_weight / self->freq;
+    ivars->weight = aggregate_weight / ivars->freq;
 }
 
 void
@@ -106,38 +109,40 @@ RichPost_add_inversion_to_pool(RichPosting *self, PostingPool *post_pool,
                                Inversion *inversion, FieldType *type,
                                int32_t doc_id, float doc_boost,
                                float length_norm) {
+    RichPostingIVARS *const ivars = RichPost_IVARS(self);
     MemoryPool *mem_pool = PostPool_Get_Mem_Pool(post_pool);
-    Similarity *sim = self->sim;
+    Similarity *sim = ivars->sim;
     float       field_boost = doc_boost * FType_Get_Boost(type) * length_norm;
     Token     **tokens;
     uint32_t    freq;
 
     Inversion_Reset(inversion);
     while ((tokens = Inversion_Next_Cluster(inversion, &freq)) != NULL) {
-        Token   *token          = *tokens;
-        uint32_t raw_post_bytes = MAX_RAW_POSTING_LEN(token->len, freq);
+        TokenIVARS *const token_ivars = Token_IVARS(*tokens);
+        uint32_t raw_post_bytes = MAX_RAW_POSTING_LEN(token_ivars->len, freq);
         RawPosting *raw_posting
             = RawPost_new(MemPool_Grab(mem_pool, raw_post_bytes), doc_id,
-                          freq, token->text, token->len);
-        char *const start = raw_posting->blob + token->len;
+                          freq, token_ivars->text, token_ivars->len);
+        RawPostingIVARS *const raw_post_ivars = RawPost_IVARS(raw_posting);
+        char *const start = raw_post_ivars->blob + token_ivars->len;
         char *dest = start;
         uint32_t last_prox = 0;
 
         // Positions and boosts.
         for (uint32_t i = 0; i < freq; i++) {
-            Token *const t = tokens[i];
-            const uint32_t prox_delta = t->pos - last_prox;
-            const float boost = field_boost * t->boost;
+            TokenIVARS *const t_ivars = Token_IVARS(tokens[i]);
+            const uint32_t prox_delta = t_ivars->pos - last_prox;
+            const float boost = field_boost * t_ivars->boost;
 
             NumUtil_encode_c32(prox_delta, &dest);
-            last_prox = t->pos;
+            last_prox = t_ivars->pos;
 
             *((uint8_t*)dest) = Sim_Encode_Norm(sim, boost);
             dest++;
         }
 
         // Resize raw posting memory allocation.
-        raw_posting->aux_len = dest - start;
+        raw_post_ivars->aux_len = dest - start;
         raw_post_bytes = dest - (char*)raw_posting;
         MemPool_Resize(mem_pool, raw_posting, raw_post_bytes);
         PostPool_Feed(post_pool, &raw_posting);
@@ -159,8 +164,9 @@ RichPost_read_raw(RichPosting *self, InStream *instream, int32_t last_doc_id,
     void *const allocation        = MemPool_Grab(mem_pool, raw_post_bytes);
     RawPosting *const raw_posting
         = RawPost_new(allocation, doc_id, freq, text_buf, text_size);
+        RawPostingIVARS *const raw_post_ivars = RawPost_IVARS(raw_posting);
     uint32_t num_prox = freq;
-    char *const start = raw_posting->blob + text_size;
+    char *const start = raw_post_ivars->blob + text_size;
     char *      dest  = start;
     UNUSED_VAR(self);
 
@@ -172,7 +178,7 @@ RichPost_read_raw(RichPosting *self, InStream *instream, int32_t last_doc_id,
     }
 
     // Resize raw posting memory allocation.
-    raw_posting->aux_len = dest - start;
+    raw_post_ivars->aux_len = dest - start;
     raw_post_bytes       = dest - (char*)raw_posting;
     MemPool_Resize(mem_pool, raw_posting, raw_post_bytes);
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/188574cc/core/Lucy/Index/Posting/ScorePosting.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Posting/ScorePosting.c b/core/Lucy/Index/Posting/ScorePosting.c
index 08dee59..1cba64a 100644
--- a/core/Lucy/Index/Posting/ScorePosting.c
+++ b/core/Lucy/Index/Posting/ScorePosting.c
@@ -52,23 +52,25 @@ ScorePost_new(Similarity *sim) {
 ScorePosting*
 ScorePost_init(ScorePosting *self, Similarity *sim) {
     MatchPost_init((MatchPosting*)self, sim);
-    self->norm_decoder = Sim_Get_Norm_Decoder(sim);
-    self->freq         = 0;
-    self->weight       = 0.0;
-    self->prox         = NULL;
-    self->prox_cap     = 0;
+    ScorePostingIVARS *const ivars = ScorePost_IVARS(self);
+    ivars->norm_decoder = Sim_Get_Norm_Decoder(sim);
+    ivars->freq         = 0;
+    ivars->weight       = 0.0;
+    ivars->prox         = NULL;
+    ivars->prox_cap     = 0;
     return self;
 }
 
 void
 ScorePost_destroy(ScorePosting *self) {
-    FREEMEM(self->prox);
+    ScorePostingIVARS *const ivars = ScorePost_IVARS(self);
+    FREEMEM(ivars->prox);
     SUPER_DESTROY(self, SCOREPOSTING);
 }
 
 uint32_t*
 ScorePost_get_prox(ScorePosting *self) {
-    return self->prox;
+    return ScorePost_IVARS(self)->prox;
 }
 
 void
@@ -76,8 +78,9 @@ ScorePost_add_inversion_to_pool(ScorePosting *self, PostingPool *post_pool,
                                 Inversion *inversion, FieldType *type,
                                 int32_t doc_id, float doc_boost,
                                 float length_norm) {
+    ScorePostingIVARS *const ivars = ScorePost_IVARS(self);
     MemoryPool     *mem_pool = PostPool_Get_Mem_Pool(post_pool);
-    Similarity     *sim = self->sim;
+    Similarity     *sim = ivars->sim;
     float           field_boost = doc_boost * FType_Get_Boost(type) * length_norm;
     const uint8_t   field_boost_byte  = Sim_Encode_Norm(sim, field_boost);
     Token         **tokens;
@@ -85,12 +88,13 @@ ScorePost_add_inversion_to_pool(ScorePosting *self, PostingPool *post_pool,
 
     Inversion_Reset(inversion);
     while ((tokens = Inversion_Next_Cluster(inversion, &freq)) != NULL) {
-        Token   *token          = *tokens;
-        uint32_t raw_post_bytes = MAX_RAW_POSTING_LEN(token->len, freq);
+        TokenIVARS *const token_ivars = Token_IVARS(*tokens);
+        uint32_t raw_post_bytes = MAX_RAW_POSTING_LEN(token_ivars->len, freq);
         RawPosting *raw_posting
             = RawPost_new(MemPool_Grab(mem_pool, raw_post_bytes), doc_id,
-                          freq, token->text, token->len);
-        char *const start  = raw_posting->blob + token->len;
+                          freq, token_ivars->text, token_ivars->len);
+        RawPostingIVARS *const raw_post_ivars = RawPost_IVARS(raw_posting);
+        char *const start  = raw_post_ivars->blob + token_ivars->len;
         char *dest         = start;
         uint32_t last_prox = 0;
 
@@ -100,14 +104,14 @@ ScorePost_add_inversion_to_pool(ScorePosting *self, PostingPool *post_pool,
 
         // Positions.
         for (uint32_t i = 0; i < freq; i++) {
-            Token *const t = tokens[i];
-            const uint32_t prox_delta = t->pos - last_prox;
+            TokenIVARS *const t_ivars = Token_IVARS(tokens[i]);
+            const uint32_t prox_delta = t_ivars->pos - last_prox;
             NumUtil_encode_c32(prox_delta, &dest);
-            last_prox = t->pos;
+            last_prox = t_ivars->pos;
         }
 
         // Resize raw posting memory allocation.
-        raw_posting->aux_len = dest - start;
+        raw_post_ivars->aux_len = dest - start;
         raw_post_bytes = dest - (char*)raw_posting;
         MemPool_Resize(mem_pool, raw_posting, raw_post_bytes);
         PostPool_Feed(post_pool, &raw_posting);
@@ -116,13 +120,15 @@ ScorePost_add_inversion_to_pool(ScorePosting *self, PostingPool *post_pool,
 
 void
 ScorePost_reset(ScorePosting *self) {
-    self->doc_id = 0;
-    self->freq   = 0;
-    self->weight = 0.0;
+    ScorePostingIVARS *const ivars = ScorePost_IVARS(self);
+    ivars->doc_id = 0;
+    ivars->freq   = 0;
+    ivars->weight = 0.0;
 }
 
 void
 ScorePost_read_record(ScorePosting *self, InStream *instream) {
+    ScorePostingIVARS *const ivars = ScorePost_IVARS(self);
     uint32_t  position = 0;
     const size_t max_start_bytes = (C32_MAX_BYTES * 2) + 1;
     char *buf = InStream_Buf(instream, max_start_bytes);
@@ -130,26 +136,26 @@ ScorePost_read_record(ScorePosting *self, InStream *instream) {
     const uint32_t doc_delta = doc_code >> 1;
 
     // Apply delta doc and retrieve freq.
-    self->doc_id   += doc_delta;
+    ivars->doc_id   += doc_delta;
     if (doc_code & 1) {
-        self->freq = 1;
+        ivars->freq = 1;
     }
     else {
-        self->freq = NumUtil_decode_c32(&buf);
+        ivars->freq = NumUtil_decode_c32(&buf);
     }
 
     // Decode boost/norm byte.
-    self->weight = self->norm_decoder[*(uint8_t*)buf];
+    ivars->weight = ivars->norm_decoder[*(uint8_t*)buf];
     buf++;
 
     // Read positions.
-    uint32_t num_prox = self->freq;
-    if (num_prox > self->prox_cap) {
-        self->prox = (uint32_t*)REALLOCATE(
-                         self->prox, num_prox * sizeof(uint32_t));
-        self->prox_cap = num_prox;
+    uint32_t num_prox = ivars->freq;
+    if (num_prox > ivars->prox_cap) {
+        ivars->prox = (uint32_t*)REALLOCATE(
+                         ivars->prox, num_prox * sizeof(uint32_t));
+        ivars->prox_cap = num_prox;
     }
-    uint32_t *positions = self->prox;
+    uint32_t *positions = ivars->prox;
 
     InStream_Advance_Buf(instream, buf);
     buf = InStream_Buf(instream, num_prox * C32_MAX_BYTES);
@@ -177,8 +183,9 @@ ScorePost_read_raw(ScorePosting *self, InStream *instream,
     void *const allocation        = MemPool_Grab(mem_pool, raw_post_bytes);
     RawPosting *const raw_posting
         = RawPost_new(allocation, doc_id, freq, text_buf, text_size);
+    RawPostingIVARS *const raw_post_ivars = RawPost_IVARS(raw_posting);
     uint32_t num_prox = freq;
-    char *const start = raw_posting->blob + text_size;
+    char *const start = raw_post_ivars->blob + text_size;
     char *dest        = start;
     UNUSED_VAR(self);
 
@@ -192,7 +199,7 @@ ScorePost_read_raw(ScorePosting *self, InStream *instream,
     }
 
     // Resize raw posting memory allocation.
-    raw_posting->aux_len = dest - start;
+    raw_post_ivars->aux_len = dest - start;
     raw_post_bytes       = dest - (char*)raw_posting;
     MemPool_Resize(mem_pool, raw_posting, raw_post_bytes);
 
@@ -215,11 +222,12 @@ ScorePostMatcher_init(ScorePostingMatcher *self, Similarity *sim,
                       PostingList *plist, Compiler *compiler) {
     // Init.
     TermMatcher_init((TermMatcher*)self, sim, plist, compiler);
+    ScorePostingMatcherIVARS *const ivars = ScorePostMatcher_IVARS(self);
 
     // Fill score cache.
-    self->score_cache = (float*)MALLOCATE(TERMMATCHER_SCORE_CACHE_SIZE * sizeof(float));
+    ivars->score_cache = (float*)MALLOCATE(TERMMATCHER_SCORE_CACHE_SIZE * sizeof(float));
     for (uint32_t i = 0; i < TERMMATCHER_SCORE_CACHE_SIZE; i++) {
-        self->score_cache[i] = Sim_TF(sim, (float)i) * self->weight;
+        ivars->score_cache[i] = Sim_TF(sim, (float)i) * ivars->weight;
     }
 
     return self;
@@ -227,13 +235,14 @@ ScorePostMatcher_init(ScorePostingMatcher *self, Similarity *sim,
 
 float
 ScorePostMatcher_score(ScorePostingMatcher* self) {
-    ScorePosting *const posting = (ScorePosting*)self->posting;
+    ScorePostingMatcherIVARS *const ivars = ScorePostMatcher_IVARS(self);
+    ScorePosting *const posting = (ScorePosting*)ivars->posting;
     const uint32_t freq = posting->freq;
 
     // Calculate initial score based on frequency of term.
     float score = (freq < TERMMATCHER_SCORE_CACHE_SIZE)
-                  ? self->score_cache[freq] // cache hit
-                  : Sim_TF(self->sim, (float)freq) * self->weight;
+                  ? ivars->score_cache[freq] // cache hit
+                  : Sim_TF(ivars->sim, (float)freq) * ivars->weight;
 
     // Factor in field-length normalization and doc/field/prox boost.
     score *= posting->weight;
@@ -243,7 +252,8 @@ ScorePostMatcher_score(ScorePostingMatcher* self) {
 
 void
 ScorePostMatcher_destroy(ScorePostingMatcher *self) {
-    FREEMEM(self->score_cache);
+    ScorePostingMatcherIVARS *const ivars = ScorePostMatcher_IVARS(self);
+    FREEMEM(ivars->score_cache);
     SUPER_DESTROY(self, SCOREPOSTINGMATCHER);
 }
 


[lucy-commits] [3/9] Migrate Lucy's index classes to IVARS.

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/IndexReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/IndexReader.c b/core/Lucy/Index/IndexReader.c
index 99250c2..9f81db2 100644
--- a/core/Lucy/Index/IndexReader.c
+++ b/core/Lucy/Index/IndexReader.c
@@ -52,62 +52,66 @@ IxReader_init(IndexReader *self, Schema *schema, Folder *folder,
     DataReader_init((DataReader*)self, schema, folder, snapshot, segments,
                     seg_tick);
     DECREF(snapshot);
-    self->components     = Hash_new(0);
-    self->read_lock      = NULL;
-    self->deletion_lock  = NULL;
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    ivars->components     = Hash_new(0);
+    ivars->read_lock      = NULL;
+    ivars->deletion_lock  = NULL;
     if (manager) {
-        self->manager = (IndexManager*)INCREF(manager);
-        IxManager_Set_Folder(self->manager, self->folder);
+        ivars->manager = (IndexManager*)INCREF(manager);
+        IxManager_Set_Folder(ivars->manager, ivars->folder);
     }
     else {
-        self->manager = NULL;
+        ivars->manager = NULL;
     }
     return self;
 }
 
 void
 IxReader_close(IndexReader *self) {
-    if (self->components) {
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    if (ivars->components) {
         CharBuf *key;
         DataReader *component;
-        Hash_Iterate(self->components);
-        while (Hash_Next(self->components, (Obj**)&key,
+        Hash_Iterate(ivars->components);
+        while (Hash_Next(ivars->components, (Obj**)&key,
                          (Obj**)&component)
               ) {
             if (Obj_Is_A((Obj*)component, DATAREADER)) {
                 DataReader_Close(component);
             }
         }
-        Hash_Clear(self->components);
+        Hash_Clear(ivars->components);
     }
-    if (self->read_lock) {
-        Lock_Release(self->read_lock);
-        DECREF(self->read_lock);
-        self->read_lock = NULL;
+    if (ivars->read_lock) {
+        Lock_Release(ivars->read_lock);
+        DECREF(ivars->read_lock);
+        ivars->read_lock = NULL;
     }
 }
 
 void
 IxReader_destroy(IndexReader *self) {
-    DECREF(self->components);
-    if (self->read_lock) {
-        Lock_Release(self->read_lock);
-        DECREF(self->read_lock);
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    DECREF(ivars->components);
+    if (ivars->read_lock) {
+        Lock_Release(ivars->read_lock);
+        DECREF(ivars->read_lock);
     }
-    DECREF(self->manager);
-    DECREF(self->deletion_lock);
+    DECREF(ivars->manager);
+    DECREF(ivars->deletion_lock);
     SUPER_DESTROY(self, INDEXREADER);
 }
 
 Hash*
 IxReader_get_components(IndexReader *self) {
-    return self->components;
+    return IxReader_IVARS(self)->components;
 }
 
 DataReader*
 IxReader_obtain(IndexReader *self, const CharBuf *api) {
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
     DataReader *component
-        = (DataReader*)Hash_Fetch(self->components, (Obj*)api);
+        = (DataReader*)Hash_Fetch(ivars->components, (Obj*)api);
     if (!component) {
         THROW(ERR, "No component registered for '%o'", api);
     }
@@ -116,7 +120,8 @@ IxReader_obtain(IndexReader *self, const CharBuf *api) {
 
 DataReader*
 IxReader_fetch(IndexReader *self, const CharBuf *api) {
-    return (DataReader*)Hash_Fetch(self->components, (Obj*)api);
+    IndexReaderIVARS *const ivars = IxReader_IVARS(self);
+    return (DataReader*)Hash_Fetch(ivars->components, (Obj*)api);
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Indexer.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Indexer.c b/core/Lucy/Index/Indexer.c
index 1a9076d..803b7db 100644
--- a/core/Lucy/Index/Indexer.c
+++ b/core/Lucy/Index/Indexer.c
@@ -70,33 +70,34 @@ Indexer_new(Schema *schema, Obj *index, IndexManager *manager, int32_t flags) {
 Indexer*
 Indexer_init(Indexer *self, Schema *schema, Obj *index,
              IndexManager *manager, int32_t flags) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     bool      create   = (flags & Indexer_CREATE)   ? true : false;
     bool      truncate = (flags & Indexer_TRUNCATE) ? true : false;
     Folder   *folder   = S_init_folder(index, create);
     Snapshot *latest_snapshot = Snapshot_new();
 
     // Init.
-    self->stock_doc     = Doc_new(NULL, 0);
-    self->truncate      = false;
-    self->optimize      = false;
-    self->prepared      = false;
-    self->needs_commit  = false;
-    self->snapfile      = NULL;
-    self->merge_lock    = NULL;
+    ivars->stock_doc     = Doc_new(NULL, 0);
+    ivars->truncate      = false;
+    ivars->optimize      = false;
+    ivars->prepared      = false;
+    ivars->needs_commit  = false;
+    ivars->snapfile      = NULL;
+    ivars->merge_lock    = NULL;
 
     // Assign.
-    self->folder       = folder;
-    self->manager      = manager
+    ivars->folder       = folder;
+    ivars->manager      = manager
                          ? (IndexManager*)INCREF(manager)
                          : IxManager_new(NULL, NULL);
-    IxManager_Set_Folder(self->manager, folder);
+    IxManager_Set_Folder(ivars->manager, folder);
 
     // Get a write lock for this folder.
-    Lock *write_lock = IxManager_Make_Write_Lock(self->manager);
+    Lock *write_lock = IxManager_Make_Write_Lock(ivars->manager);
     Lock_Clear_Stale(write_lock);
     if (Lock_Obtain(write_lock)) {
         // Only assign if successful, otherwise DESTROY unlocks -- bad!
-        self->write_lock = write_lock;
+        ivars->write_lock = write_lock;
     }
     else {
         DECREF(write_lock);
@@ -112,7 +113,7 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
 
     // Look for an existing Schema if one wasn't supplied.
     if (schema) {
-        self->schema = (Schema*)INCREF(schema);
+        ivars->schema = (Schema*)INCREF(schema);
     }
     else {
         if (!latest_snapfile) {
@@ -123,10 +124,10 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
             CharBuf *schema_file = S_find_schema_file(latest_snapshot);
             Hash *dump = (Hash*)Json_slurp_json(folder, schema_file);
             if (dump) { // read file successfully
-                self->schema = (Schema*)CERTIFY(
+                ivars->schema = (Schema*)CERTIFY(
                                    VTable_Load_Obj(SCHEMA, (Obj*)dump),
                                    SCHEMA);
-                schema = self->schema;
+                schema = ivars->schema;
                 DECREF(dump);
                 schema_file = NULL;
             }
@@ -140,21 +141,21 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
     // PolyReader.  Otherwise, start with the most recent Snapshot and an
     // up-to-date PolyReader.
     if (truncate) {
-        self->snapshot = Snapshot_new();
-        self->polyreader = PolyReader_new(schema, folder, NULL, NULL, NULL);
-        self->truncate = true;
+        ivars->snapshot = Snapshot_new();
+        ivars->polyreader = PolyReader_new(schema, folder, NULL, NULL, NULL);
+        ivars->truncate = true;
     }
     else {
         // TODO: clone most recent snapshot rather than read it twice.
-        self->snapshot = (Snapshot*)INCREF(latest_snapshot);
-        self->polyreader = latest_snapfile
+        ivars->snapshot = (Snapshot*)INCREF(latest_snapshot);
+        ivars->polyreader = latest_snapfile
                            ? PolyReader_open((Obj*)folder, NULL, NULL)
                            : PolyReader_new(schema, folder, NULL, NULL, NULL);
 
         if (latest_snapfile) {
             // Make sure than any existing fields which may have been
             // dynamically added during past indexing sessions get added.
-            Schema *old_schema = PolyReader_Get_Schema(self->polyreader);
+            Schema *old_schema = PolyReader_Get_Schema(ivars->polyreader);
             Schema_Eat(schema, old_schema);
         }
     }
@@ -163,18 +164,18 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
     // Note: we have to feed FilePurger with the most recent snapshot file
     // now, but with the Indexer's snapshot later.
     FilePurger *file_purger
-        = FilePurger_new(folder, latest_snapshot, self->manager);
+        = FilePurger_new(folder, latest_snapshot, ivars->manager);
     FilePurger_Purge(file_purger);
     DECREF(file_purger);
 
     // Create a new segment.
     int64_t new_seg_num
-        = IxManager_Highest_Seg_Num(self->manager, latest_snapshot) + 1;
-    Lock *merge_lock = IxManager_Make_Merge_Lock(self->manager);
+        = IxManager_Highest_Seg_Num(ivars->manager, latest_snapshot) + 1;
+    Lock *merge_lock = IxManager_Make_Merge_Lock(ivars->manager);
     if (Lock_Is_Locked(merge_lock)) {
         // If there's a background merge process going on, stay out of its
         // way.
-        Hash *merge_data = IxManager_Read_Merge_Data(self->manager);
+        Hash *merge_data = IxManager_Read_Merge_Data(ivars->manager);
         Obj *cutoff_obj = merge_data
                           ? Hash_Fetch_Str(merge_data, "cutoff", 6)
                           : NULL;
@@ -191,27 +192,27 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
         }
         DECREF(merge_data);
     }
-    self->segment = Seg_new(new_seg_num);
+    ivars->segment = Seg_new(new_seg_num);
 
     // Add all known fields to Segment.
     VArray *fields = Schema_All_Fields(schema);
     for (uint32_t i = 0, max = VA_Get_Size(fields); i < max; i++) {
-        Seg_Add_Field(self->segment, (CharBuf*)VA_Fetch(fields, i));
+        Seg_Add_Field(ivars->segment, (CharBuf*)VA_Fetch(fields, i));
     }
     DECREF(fields);
 
     DECREF(merge_lock);
 
     // Create new SegWriter and FilePurger.
-    self->file_purger
-        = FilePurger_new(folder, self->snapshot, self->manager);
-    self->seg_writer = SegWriter_new(self->schema, self->snapshot,
-                                     self->segment, self->polyreader);
-    SegWriter_Prep_Seg_Dir(self->seg_writer);
+    ivars->file_purger
+        = FilePurger_new(folder, ivars->snapshot, ivars->manager);
+    ivars->seg_writer = SegWriter_new(ivars->schema, ivars->snapshot,
+                                     ivars->segment, ivars->polyreader);
+    SegWriter_Prep_Seg_Dir(ivars->seg_writer);
 
     // Grab a local ref to the DeletionsWriter.
-    self->del_writer = (DeletionsWriter*)INCREF(
-                           SegWriter_Get_Del_Writer(self->seg_writer));
+    ivars->del_writer = (DeletionsWriter*)INCREF(
+                           SegWriter_Get_Del_Writer(ivars->seg_writer));
 
     DECREF(latest_snapfile);
     DECREF(latest_snapshot);
@@ -221,20 +222,21 @@ Indexer_init(Indexer *self, Schema *schema, Obj *index,
 
 void
 Indexer_destroy(Indexer *self) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     S_release_merge_lock(self);
     S_release_write_lock(self);
-    DECREF(self->schema);
-    DECREF(self->folder);
-    DECREF(self->segment);
-    DECREF(self->manager);
-    DECREF(self->stock_doc);
-    DECREF(self->polyreader);
-    DECREF(self->del_writer);
-    DECREF(self->snapshot);
-    DECREF(self->seg_writer);
-    DECREF(self->file_purger);
-    DECREF(self->write_lock);
-    DECREF(self->snapfile);
+    DECREF(ivars->schema);
+    DECREF(ivars->folder);
+    DECREF(ivars->segment);
+    DECREF(ivars->manager);
+    DECREF(ivars->stock_doc);
+    DECREF(ivars->polyreader);
+    DECREF(ivars->del_writer);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->seg_writer);
+    DECREF(ivars->file_purger);
+    DECREF(ivars->write_lock);
+    DECREF(ivars->snapfile);
     SUPER_DESTROY(self, INDEXER);
 }
 
@@ -268,12 +270,14 @@ S_init_folder(Obj *index, bool create) {
 
 void
 Indexer_add_doc(Indexer *self, Doc *doc, float boost) {
-    SegWriter_Add_Doc(self->seg_writer, doc, boost);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    SegWriter_Add_Doc(ivars->seg_writer, doc, boost);
 }
 
 void
 Indexer_delete_by_term(Indexer *self, CharBuf *field, Obj *term) {
-    Schema    *schema = self->schema;
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    Schema    *schema = ivars->schema;
     FieldType *type   = Schema_Fetch_Type(schema, field);
 
     // Raise exception if the field isn't indexed.
@@ -288,28 +292,31 @@ Indexer_delete_by_term(Indexer *self, CharBuf *field, Obj *term) {
         VArray *terms = Analyzer_Split(analyzer, (CharBuf*)term);
         Obj *analyzed_term = VA_Fetch(terms, 0);
         if (analyzed_term) {
-            DelWriter_Delete_By_Term(self->del_writer, field,
+            DelWriter_Delete_By_Term(ivars->del_writer, field,
                                      analyzed_term);
         }
         DECREF(terms);
     }
     else {
-        DelWriter_Delete_By_Term(self->del_writer, field, term);
+        DelWriter_Delete_By_Term(ivars->del_writer, field, term);
     }
 }
 
 void
 Indexer_delete_by_query(Indexer *self, Query *query) {
-    DelWriter_Delete_By_Query(self->del_writer, query);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    DelWriter_Delete_By_Query(ivars->del_writer, query);
 }
 
 void
 Indexer_delete_by_doc_id(Indexer *self, int32_t doc_id) {
-    DelWriter_Delete_By_Doc_ID(self->del_writer, doc_id);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    DelWriter_Delete_By_Doc_ID(ivars->del_writer, doc_id);
 }
 
 void
 Indexer_add_index(Indexer *self, Obj *index) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     Folder *other_folder = NULL;
     IndexReader *reader  = NULL;
 
@@ -328,7 +335,7 @@ Indexer_add_index(Indexer *self, Obj *index) {
         THROW(ERR, "Index doesn't seem to contain any data");
     }
     else {
-        Schema *schema       = self->schema;
+        Schema *schema       = ivars->schema;
         Schema *other_schema = IxReader_Get_Schema(reader);
         VArray *other_fields = Schema_All_Fields(other_schema);
         VArray *seg_readers  = IxReader_Seg_Readers(reader);
@@ -339,7 +346,7 @@ Indexer_add_index(Indexer *self, Obj *index) {
         // Add fields to Segment.
         for (uint32_t i = 0, max = VA_Get_Size(other_fields); i < max; i++) {
             CharBuf *other_field = (CharBuf*)VA_Fetch(other_fields, i);
-            Seg_Add_Field(self->segment, other_field);
+            Seg_Add_Field(ivars->segment, other_field);
         }
         DECREF(other_fields);
 
@@ -353,10 +360,10 @@ Indexer_add_index(Indexer *self, Obj *index) {
                                  ? DelReader_Iterator(del_reader)
                                  : NULL;
             I32Array *doc_map = DelWriter_Generate_Doc_Map(
-                                    self->del_writer, deletions,
+                                    ivars->del_writer, deletions,
                                     SegReader_Doc_Max(seg_reader),
-                                    (int32_t)Seg_Get_Count(self->segment));
-            SegWriter_Add_Segment(self->seg_writer, seg_reader, doc_map);
+                                    (int32_t)Seg_Get_Count(ivars->segment));
+            SegWriter_Add_Segment(ivars->seg_writer, seg_reader, doc_map);
             DECREF(deletions);
             DECREF(doc_map);
         }
@@ -369,7 +376,7 @@ Indexer_add_index(Indexer *self, Obj *index) {
 
 void
 Indexer_optimize(Indexer *self) {
-    self->optimize = true;
+    Indexer_IVARS(self)->optimize = true;
 }
 
 static CharBuf*
@@ -391,19 +398,20 @@ S_find_schema_file(Snapshot *snapshot) {
 
 static bool
 S_maybe_merge(Indexer *self, VArray *seg_readers) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
     bool      merge_happened  = false;
     uint32_t  num_seg_readers = VA_Get_Size(seg_readers);
-    Lock     *merge_lock      = IxManager_Make_Merge_Lock(self->manager);
+    Lock     *merge_lock      = IxManager_Make_Merge_Lock(ivars->manager);
     bool      got_merge_lock  = Lock_Obtain(merge_lock);
     int64_t   cutoff;
 
     if (got_merge_lock) {
-        self->merge_lock = merge_lock;
+        ivars->merge_lock = merge_lock;
         cutoff = 0;
     }
     else {
         // If something else holds the merge lock, don't interfere.
-        Hash *merge_data = IxManager_Read_Merge_Data(self->manager);
+        Hash *merge_data = IxManager_Read_Merge_Data(ivars->manager);
         if (merge_data) {
             Obj *cutoff_obj = Hash_Fetch_Str(merge_data, "cutoff", 6);
             if (cutoff_obj) {
@@ -422,8 +430,8 @@ S_maybe_merge(Indexer *self, VArray *seg_readers) {
 
     // Get a list of segments to recycle.  Validate and confirm that there are
     // no dupes in the list.
-    VArray *to_merge = IxManager_Recycle(self->manager, self->polyreader,
-                                         self->del_writer, cutoff, self->optimize);
+    VArray *to_merge = IxManager_Recycle(ivars->manager, ivars->polyreader,
+                                         ivars->del_writer, cutoff, ivars->optimize);
 
     Hash *seen = Hash_new(VA_Get_Size(to_merge));
     for (uint32_t i = 0, max = VA_Get_Size(to_merge); i < max; i++) {
@@ -445,26 +453,26 @@ S_maybe_merge(Indexer *self, VArray *seg_readers) {
         SegReader *seg_reader = (SegReader*)VA_Fetch(to_merge, i);
         int64_t seg_num = SegReader_Get_Seg_Num(seg_reader);
         Matcher *deletions
-            = DelWriter_Seg_Deletions(self->del_writer, seg_reader);
+            = DelWriter_Seg_Deletions(ivars->del_writer, seg_reader);
         I32Array *doc_map = DelWriter_Generate_Doc_Map(
-                                self->del_writer, deletions,
+                                ivars->del_writer, deletions,
                                 SegReader_Doc_Max(seg_reader),
-                                (int32_t)Seg_Get_Count(self->segment));
+                                (int32_t)Seg_Get_Count(ivars->segment));
         if (seg_num <= cutoff) {
             THROW(ERR, "Segment %o violates cutoff (%i64 <= %i64)",
                   SegReader_Get_Seg_Name(seg_reader), seg_num, cutoff);
         }
-        SegWriter_Merge_Segment(self->seg_writer, seg_reader, doc_map);
+        SegWriter_Merge_Segment(ivars->seg_writer, seg_reader, doc_map);
         merge_happened = true;
         DECREF(deletions);
         DECREF(doc_map);
     }
 
     // Write out new deletions.
-    if (DelWriter_Updated(self->del_writer)) {
+    if (DelWriter_Updated(ivars->del_writer)) {
         // Only write out if they haven't all been applied.
         if (VA_Get_Size(to_merge) != num_seg_readers) {
-            DelWriter_Finish(self->del_writer);
+            DelWriter_Finish(ivars->del_writer);
         }
     }
 
@@ -474,11 +482,12 @@ S_maybe_merge(Indexer *self, VArray *seg_readers) {
 
 void
 Indexer_prepare_commit(Indexer *self) {
-    VArray   *seg_readers     = PolyReader_Get_Seg_Readers(self->polyreader);
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    VArray   *seg_readers     = PolyReader_Get_Seg_Readers(ivars->polyreader);
     uint32_t  num_seg_readers = VA_Get_Size(seg_readers);
     bool      merge_happened  = false;
 
-    if (!self->write_lock || self->prepared) {
+    if (!ivars->write_lock || ivars->prepared) {
         THROW(ERR, "Can't call Prepare_Commit() more than once");
     }
 
@@ -488,26 +497,26 @@ Indexer_prepare_commit(Indexer *self) {
     }
 
     // Add a new segment and write a new snapshot file if...
-    if (Seg_Get_Count(self->segment)             // Docs/segs added.
+    if (Seg_Get_Count(ivars->segment)             // Docs/segs added.
         || merge_happened                        // Some segs merged.
-        || !Snapshot_Num_Entries(self->snapshot) // Initializing index.
-        || DelWriter_Updated(self->del_writer)
+        || !Snapshot_Num_Entries(ivars->snapshot) // Initializing index.
+        || DelWriter_Updated(ivars->del_writer)
        ) {
-        Folder   *folder   = self->folder;
-        Schema   *schema   = self->schema;
-        Snapshot *snapshot = self->snapshot;
+        Folder   *folder   = ivars->folder;
+        Schema   *schema   = ivars->schema;
+        Snapshot *snapshot = ivars->snapshot;
 
         // Derive snapshot and schema file names.
-        DECREF(self->snapfile);
-        self->snapfile = IxManager_Make_Snapshot_Filename(self->manager);
-        CB_Cat_Trusted_Str(self->snapfile, ".temp", 5);
-        uint64_t schema_gen = IxFileNames_extract_gen(self->snapfile);
+        DECREF(ivars->snapfile);
+        ivars->snapfile = IxManager_Make_Snapshot_Filename(ivars->manager);
+        CB_Cat_Trusted_Str(ivars->snapfile, ".temp", 5);
+        uint64_t schema_gen = IxFileNames_extract_gen(ivars->snapfile);
         char base36[StrHelp_MAX_BASE36_BYTES];
         StrHelp_to_base36(schema_gen, &base36);
         CharBuf *new_schema_name = CB_newf("schema_%s.json", base36);
 
         // Finish the segment, write schema file.
-        SegWriter_Finish(self->seg_writer);
+        SegWriter_Finish(ivars->seg_writer);
         Schema_Write(schema, folder, new_schema_name);
         CharBuf *old_schema_name = S_find_schema_file(snapshot);
         if (old_schema_name) {
@@ -517,42 +526,44 @@ Indexer_prepare_commit(Indexer *self) {
         DECREF(new_schema_name);
 
         // Write temporary snapshot file.
-        Folder_Delete(folder, self->snapfile);
-        Snapshot_Write_File(snapshot, folder, self->snapfile);
+        Folder_Delete(folder, ivars->snapfile);
+        Snapshot_Write_File(snapshot, folder, ivars->snapfile);
 
-        self->needs_commit = true;
+        ivars->needs_commit = true;
     }
 
     // Close reader, so that we can delete its files if appropriate.
-    PolyReader_Close(self->polyreader);
+    PolyReader_Close(ivars->polyreader);
 
-    self->prepared = true;
+    ivars->prepared = true;
 }
 
 void
 Indexer_commit(Indexer *self) {
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+
     // Safety check.
-    if (!self->write_lock) {
+    if (!ivars->write_lock) {
         THROW(ERR, "Can't call commit() more than once");
     }
 
-    if (!self->prepared) {
+    if (!ivars->prepared) {
         Indexer_Prepare_Commit(self);
     }
 
-    if (self->needs_commit) {
+    if (ivars->needs_commit) {
         bool success;
 
         // Rename temp snapshot file.
-        CharBuf *temp_snapfile = CB_Clone(self->snapfile);
-        CB_Chop(self->snapfile, sizeof(".temp") - 1);
-        Snapshot_Set_Path(self->snapshot, self->snapfile);
-        success = Folder_Rename(self->folder, temp_snapfile, self->snapfile);
+        CharBuf *temp_snapfile = CB_Clone(ivars->snapfile);
+        CB_Chop(ivars->snapfile, sizeof(".temp") - 1);
+        Snapshot_Set_Path(ivars->snapshot, ivars->snapfile);
+        success = Folder_Rename(ivars->folder, temp_snapfile, ivars->snapfile);
         DECREF(temp_snapfile);
         if (!success) { RETHROW(INCREF(Err_get_error())); }
 
         // Purge obsolete files.
-        FilePurger_Purge(self->file_purger);
+        FilePurger_Purge(ivars->file_purger);
     }
 
     // Release locks, invalidating the Indexer.
@@ -562,34 +573,36 @@ Indexer_commit(Indexer *self) {
 
 Schema*
 Indexer_get_schema(Indexer *self) {
-    return self->schema;
+    return Indexer_IVARS(self)->schema;
 }
 
 SegWriter*
 Indexer_get_seg_writer(Indexer *self) {
-    return self->seg_writer;
+    return Indexer_IVARS(self)->seg_writer;
 }
 
 Doc*
 Indexer_get_stock_doc(Indexer *self) {
-    return self->stock_doc;
+    return Indexer_IVARS(self)->stock_doc;
 }
 
 static void
 S_release_write_lock(Indexer *self) {
-    if (self->write_lock) {
-        Lock_Release(self->write_lock);
-        DECREF(self->write_lock);
-        self->write_lock = NULL;
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    if (ivars->write_lock) {
+        Lock_Release(ivars->write_lock);
+        DECREF(ivars->write_lock);
+        ivars->write_lock = NULL;
     }
 }
 
 static void
 S_release_merge_lock(Indexer *self) {
-    if (self->merge_lock) {
-        Lock_Release(self->merge_lock);
-        DECREF(self->merge_lock);
-        self->merge_lock = NULL;
+    IndexerIVARS *const ivars = Indexer_IVARS(self);
+    if (ivars->merge_lock) {
+        Lock_Release(ivars->merge_lock);
+        DECREF(ivars->merge_lock);
+        ivars->merge_lock = NULL;
     }
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Inverter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Inverter.c b/core/Lucy/Index/Inverter.c
index 10077ed..7bf5807 100644
--- a/core/Lucy/Index/Inverter.c
+++ b/core/Lucy/Index/Inverter.c
@@ -40,138 +40,155 @@ Inverter_new(Schema *schema, Segment *segment) {
 
 Inverter*
 Inverter_init(Inverter *self, Schema *schema, Segment *segment) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+
     // Init.
-    self->tick       = -1;
-    self->doc        = NULL;
-    self->sorted     = false;
-    self->blank      = InvEntry_new(NULL, NULL, 0);
-    self->current    = self->blank;
+    ivars->tick       = -1;
+    ivars->doc        = NULL;
+    ivars->sorted     = false;
+    ivars->blank      = InvEntry_new(NULL, NULL, 0);
+    ivars->current    = ivars->blank;
 
     // Derive.
-    self->entry_pool = VA_new(Schema_Num_Fields(schema));
-    self->entries    = VA_new(Schema_Num_Fields(schema));
+    ivars->entry_pool = VA_new(Schema_Num_Fields(schema));
+    ivars->entries    = VA_new(Schema_Num_Fields(schema));
 
     // Assign.
-    self->schema  = (Schema*)INCREF(schema);
-    self->segment = (Segment*)INCREF(segment);
+    ivars->schema  = (Schema*)INCREF(schema);
+    ivars->segment = (Segment*)INCREF(segment);
 
     return self;
 }
 
 void
 Inverter_destroy(Inverter *self) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
     Inverter_Clear(self);
-    DECREF(self->blank);
-    DECREF(self->entries);
-    DECREF(self->entry_pool);
-    DECREF(self->schema);
-    DECREF(self->segment);
+    DECREF(ivars->blank);
+    DECREF(ivars->entries);
+    DECREF(ivars->entry_pool);
+    DECREF(ivars->schema);
+    DECREF(ivars->segment);
     SUPER_DESTROY(self, INVERTER);
 }
 
 uint32_t
 Inverter_iterate(Inverter *self) {
-    self->tick = -1;
-    if (!self->sorted) {
-        VA_Sort(self->entries, NULL, NULL);
-        self->sorted = true;
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    ivars->tick = -1;
+    if (!ivars->sorted) {
+        VA_Sort(ivars->entries, NULL, NULL);
+        ivars->sorted = true;
     }
-    return VA_Get_Size(self->entries);
+    return VA_Get_Size(ivars->entries);
 }
 
 int32_t
 Inverter_next(Inverter *self) {
-    self->current = (InverterEntry*)VA_Fetch(self->entries, ++self->tick);
-    if (!self->current) { self->current = self->blank; } // Exhausted.
-    return self->current->field_num;
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    ivars->current = (InverterEntry*)VA_Fetch(ivars->entries, ++ivars->tick);
+    if (!ivars->current) { ivars->current = ivars->blank; } // Exhausted.
+    return ivars->current->field_num;
 }
 
 void
 Inverter_set_doc(Inverter *self, Doc *doc) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
     Inverter_Clear(self); // Zap all cached field values and Inversions.
-    self->doc = (Doc*)INCREF(doc);
+    ivars->doc = (Doc*)INCREF(doc);
 }
 
 void
 Inverter_set_boost(Inverter *self, float boost) {
-    self->boost = boost;
+    Inverter_IVARS(self)->boost = boost;
 }
 
 float
 Inverter_get_boost(Inverter *self) {
-    return self->boost;
+    return Inverter_IVARS(self)->boost;
 }
 
 Doc*
 Inverter_get_doc(Inverter *self) {
-    return self->doc;
+    return Inverter_IVARS(self)->doc;
 }
 
 CharBuf*
 Inverter_get_field_name(Inverter *self) {
-    return self->current->field;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->field;
 }
 
 Obj*
 Inverter_get_value(Inverter *self) {
-    return self->current->value;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->value;
 }
 
 FieldType*
 Inverter_get_type(Inverter *self) {
-    return self->current->type;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->type;
 }
 
 Analyzer*
 Inverter_get_analyzer(Inverter *self) {
-    return self->current->analyzer;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->analyzer;
 }
 
 Similarity*
 Inverter_get_similarity(Inverter *self) {
-    return self->current->sim;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->sim;
 }
 
 Inversion*
 Inverter_get_inversion(Inverter *self) {
-    return self->current->inversion;
+    InverterEntry *current = Inverter_IVARS(self)->current;
+    return InvEntry_IVARS(current)->inversion;
 }
 
 
 void
 Inverter_add_field(Inverter *self, InverterEntry *entry) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    InverterEntryIVARS *const entry_ivars = InvEntry_IVARS(entry);
+
     // Get an Inversion, going through analyzer if appropriate.
-    if (entry->analyzer) {
-        DECREF(entry->inversion);
-        entry->inversion = Analyzer_Transform_Text(entry->analyzer,
-                                                   (CharBuf*)entry->value);
-        Inversion_Invert(entry->inversion);
+    if (entry_ivars->analyzer) {
+        DECREF(entry_ivars->inversion);
+        entry_ivars->inversion
+            = Analyzer_Transform_Text(entry_ivars->analyzer,
+                                      (CharBuf*)entry_ivars->value);
+        Inversion_Invert(entry_ivars->inversion);
     }
-    else if (entry->indexed || entry->highlightable) {
-        ViewCharBuf *value = (ViewCharBuf*)entry->value;
+    else if (entry_ivars->indexed || entry_ivars->highlightable) {
+        ViewCharBuf *value = (ViewCharBuf*)entry_ivars->value;
         size_t token_len = ViewCB_Get_Size(value);
         Token *seed = Token_new((char*)ViewCB_Get_Ptr8(value),
                                 token_len, 0, token_len, 1.0f, 1);
-        DECREF(entry->inversion);
-        entry->inversion = Inversion_new(seed);
+        DECREF(entry_ivars->inversion);
+        entry_ivars->inversion = Inversion_new(seed);
         DECREF(seed);
-        Inversion_Invert(entry->inversion); // Nearly a no-op.
+        Inversion_Invert(entry_ivars->inversion); // Nearly a no-op.
     }
 
     // Prime the iterator.
-    VA_Push(self->entries, INCREF(entry));
-    self->sorted = false;
+    VA_Push(ivars->entries, INCREF(entry));
+    ivars->sorted = false;
 }
 
 void
 Inverter_clear(Inverter *self) {
-    for (uint32_t i = 0, max = VA_Get_Size(self->entries); i < max; i++) {
-        InvEntry_Clear(VA_Fetch(self->entries, i));
+    InverterIVARS *const ivars = Inverter_IVARS(self);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->entries); i < max; i++) {
+        InvEntry_Clear(VA_Fetch(ivars->entries, i));
     }
-    VA_Clear(self->entries);
-    self->tick = -1;
-    DECREF(self->doc);
-    self->doc = NULL;
+    VA_Clear(ivars->entries);
+    ivars->tick = -1;
+    DECREF(ivars->doc);
+    ivars->doc = NULL;
 }
 
 InverterEntry*
@@ -183,49 +200,50 @@ InvEntry_new(Schema *schema, const CharBuf *field, int32_t field_num) {
 InverterEntry*
 InvEntry_init(InverterEntry *self, Schema *schema, const CharBuf *field,
               int32_t field_num) {
-    self->field_num  = field_num;
-    self->field      = field ? CB_Clone(field) : NULL;
-    self->inversion  = NULL;
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    ivars->field_num  = field_num;
+    ivars->field      = field ? CB_Clone(field) : NULL;
+    ivars->inversion  = NULL;
 
     if (schema) {
-        self->analyzer
+        ivars->analyzer
             = (Analyzer*)INCREF(Schema_Fetch_Analyzer(schema, field));
-        self->sim  = (Similarity*)INCREF(Schema_Fetch_Sim(schema, field));
-        self->type = (FieldType*)INCREF(Schema_Fetch_Type(schema, field));
-        if (!self->type) { THROW(ERR, "Unknown field: '%o'", field); }
+        ivars->sim  = (Similarity*)INCREF(Schema_Fetch_Sim(schema, field));
+        ivars->type = (FieldType*)INCREF(Schema_Fetch_Type(schema, field));
+        if (!ivars->type) { THROW(ERR, "Unknown field: '%o'", field); }
 
-        uint8_t prim_id = FType_Primitive_ID(self->type);
+        uint8_t prim_id = FType_Primitive_ID(ivars->type);
         switch (prim_id & FType_PRIMITIVE_ID_MASK) {
             case FType_TEXT:
-                self->value = (Obj*)ViewCB_new_from_trusted_utf8(NULL, 0);
+                ivars->value = (Obj*)ViewCB_new_from_trusted_utf8(NULL, 0);
                 break;
             case FType_BLOB:
-                self->value = (Obj*)ViewBB_new(NULL, 0);
+                ivars->value = (Obj*)ViewBB_new(NULL, 0);
                 break;
             case FType_INT32:
-                self->value = (Obj*)Int32_new(0);
+                ivars->value = (Obj*)Int32_new(0);
                 break;
             case FType_INT64:
-                self->value = (Obj*)Int64_new(0);
+                ivars->value = (Obj*)Int64_new(0);
                 break;
             case FType_FLOAT32:
-                self->value = (Obj*)Float32_new(0);
+                ivars->value = (Obj*)Float32_new(0);
                 break;
             case FType_FLOAT64:
-                self->value = (Obj*)Float64_new(0);
+                ivars->value = (Obj*)Float64_new(0);
                 break;
             default:
                 THROW(ERR, "Unrecognized primitive id: %i8", prim_id);
         }
 
-        self->indexed = FType_Indexed(self->type);
-        if (self->indexed && FType_Is_A(self->type, NUMERICTYPE)) {
+        ivars->indexed = FType_Indexed(ivars->type);
+        if (ivars->indexed && FType_Is_A(ivars->type, NUMERICTYPE)) {
             THROW(ERR, "Field '%o' spec'd as indexed, but numerical types cannot "
                   "be indexed yet", field);
         }
-        if (FType_Is_A(self->type, FULLTEXTTYPE)) {
-            self->highlightable
-                = FullTextType_Highlightable((FullTextType*)self->type);
+        if (FType_Is_A(ivars->type, FULLTEXTTYPE)) {
+            ivars->highlightable
+                = FullTextType_Highlightable((FullTextType*)ivars->type);
         }
     }
     return self;
@@ -233,26 +251,29 @@ InvEntry_init(InverterEntry *self, Schema *schema, const CharBuf *field,
 
 void
 InvEntry_destroy(InverterEntry *self) {
-    DECREF(self->field);
-    DECREF(self->value);
-    DECREF(self->analyzer);
-    DECREF(self->type);
-    DECREF(self->sim);
-    DECREF(self->inversion);
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    DECREF(ivars->field);
+    DECREF(ivars->value);
+    DECREF(ivars->analyzer);
+    DECREF(ivars->type);
+    DECREF(ivars->sim);
+    DECREF(ivars->inversion);
     SUPER_DESTROY(self, INVERTERENTRY);
 }
 
 void
 InvEntry_clear(InverterEntry *self) {
-    DECREF(self->inversion);
-    self->inversion = NULL;
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    DECREF(ivars->inversion);
+    ivars->inversion = NULL;
 }
 
 int32_t
 InvEntry_compare_to(InverterEntry *self, Obj *other) {
-    InverterEntry *competitor
-        = (InverterEntry*)CERTIFY(other, INVERTERENTRY);
-    return self->field_num - competitor->field_num;
+    CERTIFY(other, INVERTERENTRY);
+    InverterEntryIVARS *const ivars = InvEntry_IVARS(self);
+    InverterEntryIVARS *const ovars = InvEntry_IVARS((InverterEntry*)other);
+    return ivars->field_num - ovars->field_num;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/LexIndex.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/LexIndex.c b/core/Lucy/Index/LexIndex.c
index dca9adf..bc252f4 100644
--- a/core/Lucy/Index/LexIndex.c
+++ b/core/Lucy/Index/LexIndex.c
@@ -50,41 +50,42 @@ LexIndex_init(LexIndex *self, Schema *schema, Folder *folder,
 
     // Init.
     Lex_init((Lexicon*)self, field);
-    self->tinfo        = TInfo_new(0);
-    self->tick         = 0;
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    ivars->tinfo        = TInfo_new(0);
+    ivars->tick         = 0;
 
     // Derive
-    self->field_type = Schema_Fetch_Type(schema, field);
-    if (!self->field_type) {
+    ivars->field_type = Schema_Fetch_Type(schema, field);
+    if (!ivars->field_type) {
         CharBuf *mess = MAKE_MESS("Unknown field: '%o'", field);
         DECREF(ix_file);
         DECREF(ixix_file);
         DECREF(self);
         Err_throw_mess(ERR, mess);
     }
-    INCREF(self->field_type);
-    self->term_stepper = FType_Make_Term_Stepper(self->field_type);
-    self->ixix_in = Folder_Open_In(folder, ixix_file);
-    if (!self->ixix_in) {
+    INCREF(ivars->field_type);
+    ivars->term_stepper = FType_Make_Term_Stepper(ivars->field_type);
+    ivars->ixix_in = Folder_Open_In(folder, ixix_file);
+    if (!ivars->ixix_in) {
         Err *error = (Err*)INCREF(Err_get_error());
         DECREF(ix_file);
         DECREF(ixix_file);
         DECREF(self);
         RETHROW(error);
     }
-    self->ix_in = Folder_Open_In(folder, ix_file);
-    if (!self->ix_in) {
+    ivars->ix_in = Folder_Open_In(folder, ix_file);
+    if (!ivars->ix_in) {
         Err *error = (Err*)INCREF(Err_get_error());
         DECREF(ix_file);
         DECREF(ixix_file);
         DECREF(self);
         RETHROW(error);
     }
-    self->index_interval = Arch_Index_Interval(arch);
-    self->skip_interval  = Arch_Skip_Interval(arch);
-    self->size    = (int32_t)(InStream_Length(self->ixix_in) / sizeof(int64_t));
-    self->offsets = (int64_t*)InStream_Buf(self->ixix_in,
-                                           (size_t)InStream_Length(self->ixix_in));
+    ivars->index_interval = Arch_Index_Interval(arch);
+    ivars->skip_interval  = Arch_Skip_Interval(arch);
+    ivars->size    = (int32_t)(InStream_Length(ivars->ixix_in) / sizeof(int64_t));
+    ivars->offsets = (int64_t*)InStream_Buf(ivars->ixix_in,
+                                           (size_t)InStream_Length(ivars->ixix_in));
 
     DECREF(ixix_file);
     DECREF(ix_file);
@@ -94,39 +95,43 @@ LexIndex_init(LexIndex *self, Schema *schema, Folder *folder,
 
 void
 LexIndex_destroy(LexIndex *self) {
-    DECREF(self->field_type);
-    DECREF(self->ixix_in);
-    DECREF(self->ix_in);
-    DECREF(self->term_stepper);
-    DECREF(self->tinfo);
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    DECREF(ivars->field_type);
+    DECREF(ivars->ixix_in);
+    DECREF(ivars->ix_in);
+    DECREF(ivars->term_stepper);
+    DECREF(ivars->tinfo);
     SUPER_DESTROY(self, LEXINDEX);
 }
 
 int32_t
 LexIndex_get_term_num(LexIndex *self) {
-    return (self->index_interval * self->tick) - 1;
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    return (ivars->index_interval * ivars->tick) - 1;
 }
 
 Obj*
 LexIndex_get_term(LexIndex *self) {
-    return TermStepper_Get_Value(self->term_stepper);
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    return TermStepper_Get_Value(ivars->term_stepper);
 }
 
 TermInfo*
 LexIndex_get_term_info(LexIndex *self) {
-    return self->tinfo;
+    return LexIndex_IVARS(self)->tinfo;
 }
 
 static void
 S_read_entry(LexIndex *self) {
-    InStream *ix_in  = self->ix_in;
-    TermInfo *tinfo  = self->tinfo;
-    int64_t offset = (int64_t)NumUtil_decode_bigend_u64(self->offsets + self->tick);
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    InStream *ix_in  = ivars->ix_in;
+    TermInfo *tinfo  = ivars->tinfo;
+    int64_t offset = (int64_t)NumUtil_decode_bigend_u64(ivars->offsets + ivars->tick);
     InStream_Seek(ix_in, offset);
-    TermStepper_Read_Key_Frame(self->term_stepper, ix_in);
+    TermStepper_Read_Key_Frame(ivars->term_stepper, ix_in);
     tinfo->doc_freq     = InStream_Read_C32(ix_in);
     tinfo->post_filepos = InStream_Read_C64(ix_in);
-    tinfo->skip_filepos = tinfo->doc_freq >= self->skip_interval
+    tinfo->skip_filepos = tinfo->doc_freq >= ivars->skip_interval
                           ? InStream_Read_C64(ix_in)
                           : 0;
     tinfo->lex_filepos  = InStream_Read_C64(ix_in);
@@ -134,15 +139,16 @@ S_read_entry(LexIndex *self) {
 
 void
 LexIndex_seek(LexIndex *self, Obj *target) {
-    TermStepper *term_stepper = self->term_stepper;
-    InStream    *ix_in        = self->ix_in;
-    FieldType   *type         = self->field_type;
+    LexIndexIVARS *const ivars = LexIndex_IVARS(self);
+    TermStepper *term_stepper = ivars->term_stepper;
+    InStream    *ix_in        = ivars->ix_in;
+    FieldType   *type         = ivars->field_type;
     int32_t      lo           = 0;
-    int32_t      hi           = self->size - 1;
+    int32_t      hi           = ivars->size - 1;
     int32_t      result       = -100;
 
-    if (target == NULL || self->size == 0) {
-        self->tick = 0;
+    if (target == NULL || ivars->size == 0) {
+        ivars->tick = 0;
         return;
     }
     else {
@@ -163,12 +169,12 @@ LexIndex_seek(LexIndex *self, Obj *target) {
     while (hi >= lo) {
         const int32_t mid = lo + ((hi - lo) / 2);
         const int64_t offset
-            = (int64_t)NumUtil_decode_bigend_u64(self->offsets + mid);
+            = (int64_t)NumUtil_decode_bigend_u64(ivars->offsets + mid);
         InStream_Seek(ix_in, offset);
         TermStepper_Read_Key_Frame(term_stepper, ix_in);
 
         // Compare values.  There is no need for a NULL-check because the term
-        // number is alway between 0 and self->size - 1.
+        // number is alway between 0 and ivars->size - 1.
         Obj *value = TermStepper_Get_Value(term_stepper);
         int32_t comparison = FType_Compare_Values(type, target, value);
 
@@ -185,7 +191,7 @@ LexIndex_seek(LexIndex *self, Obj *target) {
     }
 
     // Record the index of the entry we've seeked to, then read entry.
-    self->tick = hi == -1 // indicating that target lt first entry
+    ivars->tick = hi == -1 // indicating that target lt first entry
                  ? 0
                  : result == -100 // if result is still -100, it wasn't set
                  ? hi

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Lexicon.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Lexicon.c b/core/Lucy/Index/Lexicon.c
index c26b1e0..d1fd76a 100644
--- a/core/Lucy/Index/Lexicon.c
+++ b/core/Lucy/Index/Lexicon.c
@@ -21,19 +21,21 @@
 
 Lexicon*
 Lex_init(Lexicon *self, const CharBuf *field) {
-    self->field = CB_Clone(field);
+    LexiconIVARS *const ivars = Lex_IVARS(self);
+    ivars->field = CB_Clone(field);
     ABSTRACT_CLASS_CHECK(self, LEXICON);
     return self;
 }
 
 CharBuf*
 Lex_get_field(Lexicon *self) {
-    return self->field;
+    return Lex_IVARS(self)->field;
 }
 
 void
 Lex_destroy(Lexicon *self) {
-    DECREF(self->field);
+    LexiconIVARS *const ivars = Lex_IVARS(self);
+    DECREF(ivars->field);
     SUPER_DESTROY(self, LEXICON);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/LexiconReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/LexiconReader.c b/core/Lucy/Index/LexiconReader.c
index 9a82ef6..e08bea2 100644
--- a/core/Lucy/Index/LexiconReader.c
+++ b/core/Lucy/Index/LexiconReader.c
@@ -61,27 +61,30 @@ PolyLexReader_init(PolyLexiconReader *self, VArray *readers,
         if (!schema) { schema = LexReader_Get_Schema(reader); }
     }
     LexReader_init((LexiconReader*)self, schema, NULL, NULL, NULL, -1);
-    self->readers = (VArray*)INCREF(readers);
-    self->offsets = (I32Array*)INCREF(offsets);
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+    ivars->readers = (VArray*)INCREF(readers);
+    ivars->offsets = (I32Array*)INCREF(offsets);
     return self;
 }
 
 void
 PolyLexReader_close(PolyLexiconReader *self) {
-    if (self->readers) {
-        for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+    if (ivars->readers) {
+        for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
             LexiconReader *reader
-                = (LexiconReader*)VA_Fetch(self->readers, i);
+                = (LexiconReader*)VA_Fetch(ivars->readers, i);
             if (reader) { LexReader_Close(reader); }
         }
-        VA_Clear(self->readers);
+        VA_Clear(ivars->readers);
     }
 }
 
 void
 PolyLexReader_destroy(PolyLexiconReader *self) {
-    DECREF(self->readers);
-    DECREF(self->offsets);
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+    DECREF(ivars->readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYLEXICONREADER);
 }
 
@@ -94,7 +97,8 @@ PolyLexReader_lexicon(PolyLexiconReader *self, const CharBuf *field,
         Schema *schema = PolyLexReader_Get_Schema(self);
         FieldType *type = Schema_Fetch_Type(schema, field);
         if (type != NULL) {
-            lexicon = PolyLex_new(field, self->readers);
+            PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
+            lexicon = PolyLex_new(field, ivars->readers);
             if (!PolyLex_Get_Num_Seg_Lexicons(lexicon)) {
                 DECREF(lexicon);
                 return NULL;
@@ -109,9 +113,10 @@ PolyLexReader_lexicon(PolyLexiconReader *self, const CharBuf *field,
 uint32_t
 PolyLexReader_doc_freq(PolyLexiconReader *self, const CharBuf *field,
                        Obj *term) {
+    PolyLexiconReaderIVARS *const ivars = PolyLexReader_IVARS(self);
     uint32_t doc_freq = 0;
-    for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
-        LexiconReader *reader = (LexiconReader*)VA_Fetch(self->readers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
+        LexiconReader *reader = (LexiconReader*)VA_Fetch(ivars->readers, i);
         if (reader) {
             doc_freq += LexReader_Doc_Freq(reader, field, term);
         }
@@ -157,15 +162,16 @@ DefLexReader_init(DefaultLexiconReader *self, Schema *schema, Folder *folder,
     // Init.
     LexReader_init((LexiconReader*)self, schema, folder, snapshot, segments,
                    seg_tick);
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
     Segment *segment = DefLexReader_Get_Segment(self);
 
     // Build an array of SegLexicon objects.
-    self->lexicons = VA_new(Schema_Num_Fields(schema));
+    ivars->lexicons = VA_new(Schema_Num_Fields(schema));
     for (uint32_t i = 1, max = Schema_Num_Fields(schema) + 1; i < max; i++) {
         CharBuf *field = Seg_Field_Name(segment, i);
         if (field && S_has_data(schema, folder, segment, field)) {
             SegLexicon *lexicon = SegLex_new(schema, folder, segment, field);
-            VA_Store(self->lexicons, i, (Obj*)lexicon);
+            VA_Store(ivars->lexicons, i, (Obj*)lexicon);
         }
     }
 
@@ -174,26 +180,29 @@ DefLexReader_init(DefaultLexiconReader *self, Schema *schema, Folder *folder,
 
 void
 DefLexReader_close(DefaultLexiconReader *self) {
-    DECREF(self->lexicons);
-    self->lexicons = NULL;
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
+    DECREF(ivars->lexicons);
+    ivars->lexicons = NULL;
 }
 
 void
 DefLexReader_destroy(DefaultLexiconReader *self) {
-    DECREF(self->lexicons);
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
+    DECREF(ivars->lexicons);
     SUPER_DESTROY(self, DEFAULTLEXICONREADER);
 }
 
 Lexicon*
 DefLexReader_lexicon(DefaultLexiconReader *self, const CharBuf *field,
                      Obj *term) {
-    int32_t     field_num = Seg_Field_Num(self->segment, field);
-    SegLexicon *orig      = (SegLexicon*)VA_Fetch(self->lexicons, field_num);
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
+    int32_t     field_num = Seg_Field_Num(ivars->segment, field);
+    SegLexicon *orig      = (SegLexicon*)VA_Fetch(ivars->lexicons, field_num);
     SegLexicon *lexicon   = NULL;
 
     if (orig) { // i.e. has data
         lexicon
-            = SegLex_new(self->schema, self->folder, self->segment, field);
+            = SegLex_new(ivars->schema, ivars->folder, ivars->segment, field);
         SegLex_Seek(lexicon, term);
     }
 
@@ -202,10 +211,11 @@ DefLexReader_lexicon(DefaultLexiconReader *self, const CharBuf *field,
 
 static TermInfo*
 S_find_tinfo(DefaultLexiconReader *self, const CharBuf *field, Obj *target) {
+    DefaultLexiconReaderIVARS *const ivars = DefLexReader_IVARS(self);
     if (field != NULL && target != NULL) {
-        int32_t field_num = Seg_Field_Num(self->segment, field);
+        int32_t field_num = Seg_Field_Num(ivars->segment, field);
         SegLexicon *lexicon
-            = (SegLexicon*)VA_Fetch(self->lexicons, field_num);
+            = (SegLexicon*)VA_Fetch(ivars->lexicons, field_num);
 
         if (lexicon) {
             // Iterate until the result is ge the term.

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/LexiconWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/LexiconWriter.c b/core/Lucy/Index/LexiconWriter.c
index 9a65fa3..115771e 100644
--- a/core/Lucy/Index/LexiconWriter.c
+++ b/core/Lucy/Index/LexiconWriter.c
@@ -46,79 +46,85 @@ LexWriter_init(LexiconWriter *self, Schema *schema, Snapshot *snapshot,
     Architecture *arch = Schema_Get_Architecture(schema);
 
     DataWriter_init((DataWriter*)self, schema, snapshot, segment, polyreader);
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
 
     // Assign.
-    self->index_interval = Arch_Index_Interval(arch);
-    self->skip_interval  = Arch_Skip_Interval(arch);
+    ivars->index_interval = Arch_Index_Interval(arch);
+    ivars->skip_interval  = Arch_Skip_Interval(arch);
 
     // Init.
-    self->ix_out             = NULL;
-    self->ixix_out           = NULL;
-    self->dat_out            = NULL;
-    self->count              = 0;
-    self->ix_count           = 0;
-    self->dat_file           = CB_new(30);
-    self->ix_file            = CB_new(30);
-    self->ixix_file          = CB_new(30);
-    self->counts             = Hash_new(0);
-    self->ix_counts          = Hash_new(0);
-    self->temp_mode          = false;
-    self->term_stepper       = NULL;
-    self->tinfo_stepper      = (TermStepper*)MatchTInfoStepper_new(schema);
+    ivars->ix_out             = NULL;
+    ivars->ixix_out           = NULL;
+    ivars->dat_out            = NULL;
+    ivars->count              = 0;
+    ivars->ix_count           = 0;
+    ivars->dat_file           = CB_new(30);
+    ivars->ix_file            = CB_new(30);
+    ivars->ixix_file          = CB_new(30);
+    ivars->counts             = Hash_new(0);
+    ivars->ix_counts          = Hash_new(0);
+    ivars->temp_mode          = false;
+    ivars->term_stepper       = NULL;
+    ivars->tinfo_stepper      = (TermStepper*)MatchTInfoStepper_new(schema);
 
     return self;
 }
 
 void
 LexWriter_destroy(LexiconWriter *self) {
-    DECREF(self->term_stepper);
-    DECREF(self->tinfo_stepper);
-    DECREF(self->dat_file);
-    DECREF(self->ix_file);
-    DECREF(self->ixix_file);
-    DECREF(self->dat_out);
-    DECREF(self->ix_out);
-    DECREF(self->ixix_out);
-    DECREF(self->counts);
-    DECREF(self->ix_counts);
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    DECREF(ivars->term_stepper);
+    DECREF(ivars->tinfo_stepper);
+    DECREF(ivars->dat_file);
+    DECREF(ivars->ix_file);
+    DECREF(ivars->ixix_file);
+    DECREF(ivars->dat_out);
+    DECREF(ivars->ix_out);
+    DECREF(ivars->ixix_out);
+    DECREF(ivars->counts);
+    DECREF(ivars->ix_counts);
     SUPER_DESTROY(self, LEXICONWRITER);
 }
 
 static void
 S_add_last_term_to_ix(LexiconWriter *self) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+
     // Write file pointer to index record.
-    OutStream_Write_I64(self->ixix_out, OutStream_Tell(self->ix_out));
+    OutStream_Write_I64(ivars->ixix_out, OutStream_Tell(ivars->ix_out));
 
     // Write term and file pointer to main record.  Track count of terms added
     // to ix.
-    TermStepper_Write_Key_Frame(self->term_stepper,
-                                self->ix_out, TermStepper_Get_Value(self->term_stepper));
-    TermStepper_Write_Key_Frame(self->tinfo_stepper,
-                                self->ix_out, TermStepper_Get_Value(self->tinfo_stepper));
-    OutStream_Write_C64(self->ix_out, OutStream_Tell(self->dat_out));
-    self->ix_count++;
+    TermStepper_Write_Key_Frame(ivars->term_stepper,
+                                ivars->ix_out, TermStepper_Get_Value(ivars->term_stepper));
+    TermStepper_Write_Key_Frame(ivars->tinfo_stepper,
+                                ivars->ix_out, TermStepper_Get_Value(ivars->tinfo_stepper));
+    OutStream_Write_C64(ivars->ix_out, OutStream_Tell(ivars->dat_out));
+    ivars->ix_count++;
 }
 
 void
 LexWriter_add_term(LexiconWriter* self, CharBuf* term_text, TermInfo* tinfo) {
-    OutStream *dat_out = self->dat_out;
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    OutStream *dat_out = ivars->dat_out;
 
-    if ((self->count % self->index_interval == 0)
-        && !self->temp_mode
+    if ((ivars->count % ivars->index_interval == 0)
+        && !ivars->temp_mode
        ) {
         // Write a subset of entries to lexicon.ix.
         S_add_last_term_to_ix(self);
     }
 
-    TermStepper_Write_Delta(self->term_stepper, dat_out, (Obj*)term_text);
-    TermStepper_Write_Delta(self->tinfo_stepper, dat_out, (Obj*)tinfo);
+    TermStepper_Write_Delta(ivars->term_stepper, dat_out, (Obj*)term_text);
+    TermStepper_Write_Delta(ivars->tinfo_stepper, dat_out, (Obj*)tinfo);
 
     // Track number of terms.
-    self->count++;
+    ivars->count++;
 }
 
 void
 LexWriter_start_field(LexiconWriter *self, int32_t field_num) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
     Segment   *const segment  = LexWriter_Get_Segment(self);
     Folder    *const folder   = LexWriter_Get_Folder(self);
     Schema    *const schema   = LexWriter_Get_Schema(self);
@@ -127,104 +133,110 @@ LexWriter_start_field(LexiconWriter *self, int32_t field_num) {
     FieldType *const type     = Schema_Fetch_Type(schema, field);
 
     // Open outstreams.
-    CB_setf(self->dat_file,  "%o/lexicon-%i32.dat",  seg_name, field_num);
-    CB_setf(self->ix_file,   "%o/lexicon-%i32.ix",   seg_name, field_num);
-    CB_setf(self->ixix_file, "%o/lexicon-%i32.ixix", seg_name, field_num);
-    self->dat_out = Folder_Open_Out(folder, self->dat_file);
-    if (!self->dat_out) { RETHROW(INCREF(Err_get_error())); }
-    self->ix_out = Folder_Open_Out(folder, self->ix_file);
-    if (!self->ix_out) { RETHROW(INCREF(Err_get_error())); }
-    self->ixix_out = Folder_Open_Out(folder, self->ixix_file);
-    if (!self->ixix_out) { RETHROW(INCREF(Err_get_error())); }
+    CB_setf(ivars->dat_file,  "%o/lexicon-%i32.dat",  seg_name, field_num);
+    CB_setf(ivars->ix_file,   "%o/lexicon-%i32.ix",   seg_name, field_num);
+    CB_setf(ivars->ixix_file, "%o/lexicon-%i32.ixix", seg_name, field_num);
+    ivars->dat_out = Folder_Open_Out(folder, ivars->dat_file);
+    if (!ivars->dat_out) { RETHROW(INCREF(Err_get_error())); }
+    ivars->ix_out = Folder_Open_Out(folder, ivars->ix_file);
+    if (!ivars->ix_out) { RETHROW(INCREF(Err_get_error())); }
+    ivars->ixix_out = Folder_Open_Out(folder, ivars->ixix_file);
+    if (!ivars->ixix_out) { RETHROW(INCREF(Err_get_error())); }
 
     // Initialize count and ix_count, term stepper and term info stepper.
-    self->count    = 0;
-    self->ix_count = 0;
-    self->term_stepper = FType_Make_Term_Stepper(type);
-    TermStepper_Reset(self->tinfo_stepper);
+    ivars->count    = 0;
+    ivars->ix_count = 0;
+    ivars->term_stepper = FType_Make_Term_Stepper(type);
+    TermStepper_Reset(ivars->tinfo_stepper);
 }
 
 void
 LexWriter_finish_field(LexiconWriter *self, int32_t field_num) {
-    CharBuf *field = Seg_Field_Name(self->segment, field_num);
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    CharBuf *field = Seg_Field_Name(ivars->segment, field_num);
 
     // Store count of terms for this field as metadata.
-    Hash_Store(self->counts, (Obj*)field,
-               (Obj*)CB_newf("%i32", self->count));
-    Hash_Store(self->ix_counts, (Obj*)field,
-               (Obj*)CB_newf("%i32", self->ix_count));
+    Hash_Store(ivars->counts, (Obj*)field,
+               (Obj*)CB_newf("%i32", ivars->count));
+    Hash_Store(ivars->ix_counts, (Obj*)field,
+               (Obj*)CB_newf("%i32", ivars->ix_count));
 
     // Close streams.
-    OutStream_Close(self->dat_out);
-    OutStream_Close(self->ix_out);
-    OutStream_Close(self->ixix_out);
-    DECREF(self->dat_out);
-    DECREF(self->ix_out);
-    DECREF(self->ixix_out);
-    self->dat_out  = NULL;
-    self->ix_out   = NULL;
-    self->ixix_out = NULL;
+    OutStream_Close(ivars->dat_out);
+    OutStream_Close(ivars->ix_out);
+    OutStream_Close(ivars->ixix_out);
+    DECREF(ivars->dat_out);
+    DECREF(ivars->ix_out);
+    DECREF(ivars->ixix_out);
+    ivars->dat_out  = NULL;
+    ivars->ix_out   = NULL;
+    ivars->ixix_out = NULL;
 
     // Close term stepper.
-    DECREF(self->term_stepper);
-    self->term_stepper = NULL;
+    DECREF(ivars->term_stepper);
+    ivars->term_stepper = NULL;
 }
 
 void
 LexWriter_enter_temp_mode(LexiconWriter *self, const CharBuf *field,
                           OutStream *temp_outstream) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
     Schema    *schema = LexWriter_Get_Schema(self);
     FieldType *type   = Schema_Fetch_Type(schema, field);
 
     // Assign outstream.
-    if (self->dat_out != NULL) {
-        THROW(ERR, "Can't enter temp mode (filename: %o) ", self->dat_file);
+    if (ivars->dat_out != NULL) {
+        THROW(ERR, "Can't enter temp mode (filename: %o) ", ivars->dat_file);
     }
-    self->dat_out = (OutStream*)INCREF(temp_outstream);
+    ivars->dat_out = (OutStream*)INCREF(temp_outstream);
 
     // Initialize count and ix_count, term stepper and term info stepper.
-    self->count    = 0;
-    self->ix_count = 0;
-    self->term_stepper = FType_Make_Term_Stepper(type);
-    TermStepper_Reset(self->tinfo_stepper);
+    ivars->count    = 0;
+    ivars->ix_count = 0;
+    ivars->term_stepper = FType_Make_Term_Stepper(type);
+    TermStepper_Reset(ivars->tinfo_stepper);
 
     // Remember that we're in temp mode.
-    self->temp_mode = true;
+    ivars->temp_mode = true;
 }
 
 void
 LexWriter_leave_temp_mode(LexiconWriter *self) {
-    DECREF(self->term_stepper);
-    self->term_stepper = NULL;
-    DECREF(self->dat_out);
-    self->dat_out   = NULL;
-    self->temp_mode = false;
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+    DECREF(ivars->term_stepper);
+    ivars->term_stepper = NULL;
+    DECREF(ivars->dat_out);
+    ivars->dat_out   = NULL;
+    ivars->temp_mode = false;
 }
 
 void
 LexWriter_finish(LexiconWriter *self) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
+
     // Ensure that streams were closed (by calling Finish_Field or
     // Leave_Temp_Mode).
-    if (self->dat_out != NULL) {
-        THROW(ERR, "File '%o' never closed", self->dat_file);
+    if (ivars->dat_out != NULL) {
+        THROW(ERR, "File '%o' never closed", ivars->dat_file);
     }
-    else if (self->ix_out != NULL) {
-        THROW(ERR, "File '%o' never closed", self->ix_file);
+    else if (ivars->ix_out != NULL) {
+        THROW(ERR, "File '%o' never closed", ivars->ix_file);
     }
-    else if (self->ix_out != NULL) {
-        THROW(ERR, "File '%o' never closed", self->ix_file);
+    else if (ivars->ix_out != NULL) {
+        THROW(ERR, "File '%o' never closed", ivars->ix_file);
     }
 
     // Store metadata.
-    Seg_Store_Metadata_Str(self->segment, "lexicon", 7,
+    Seg_Store_Metadata_Str(ivars->segment, "lexicon", 7,
                            (Obj*)LexWriter_Metadata(self));
 }
 
 Hash*
 LexWriter_metadata(LexiconWriter *self) {
+    LexiconWriterIVARS *const ivars = LexWriter_IVARS(self);
     Hash *const metadata  = DataWriter_metadata((DataWriter*)self);
-    Hash *const counts    = (Hash*)INCREF(self->counts);
-    Hash *const ix_counts = (Hash*)INCREF(self->ix_counts);
+    Hash *const counts    = (Hash*)INCREF(ivars->counts);
+    Hash *const ix_counts = (Hash*)INCREF(ivars->ix_counts);
 
     // Placeholders.
     if (Hash_Get_Size(counts) == 0) {

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/PolyLexicon.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PolyLexicon.c b/core/Lucy/Index/PolyLexicon.c
index 52553a2..713fda2 100644
--- a/core/Lucy/Index/PolyLexicon.c
+++ b/core/Lucy/Index/PolyLexicon.c
@@ -41,8 +41,9 @@ PolyLex_init(PolyLexicon *self, const CharBuf *field, VArray *sub_readers) {
 
     // Init.
     Lex_init((Lexicon*)self, field);
-    self->term            = NULL;
-    self->lex_q           = SegLexQ_new(num_sub_readers);
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    ivars->term            = NULL;
+    ivars->lex_q           = SegLexQ_new(num_sub_readers);
 
     // Derive.
     for (uint32_t i = 0; i < num_sub_readers; i++) {
@@ -54,7 +55,7 @@ PolyLex_init(PolyLexicon *self, const CharBuf *field, VArray *sub_readers) {
             }
         }
     }
-    self->seg_lexicons  = seg_lexicons;
+    ivars->seg_lexicons  = seg_lexicons;
 
     PolyLex_Reset(self);
 
@@ -63,9 +64,10 @@ PolyLex_init(PolyLexicon *self, const CharBuf *field, VArray *sub_readers) {
 
 void
 PolyLex_destroy(PolyLexicon *self) {
-    DECREF(self->seg_lexicons);
-    DECREF(self->lex_q);
-    DECREF(self->term);
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    DECREF(ivars->seg_lexicons);
+    DECREF(ivars->lex_q);
+    DECREF(ivars->term);
     SUPER_DESTROY(self, POLYLEXICON);
 }
 
@@ -91,9 +93,10 @@ S_refresh_lex_q(SegLexQueue *lex_q, VArray *seg_lexicons, Obj *target) {
 
 void
 PolyLex_reset(PolyLexicon *self) {
-    VArray *seg_lexicons = self->seg_lexicons;
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    VArray *seg_lexicons = ivars->seg_lexicons;
     uint32_t num_segs = VA_Get_Size(seg_lexicons);
-    SegLexQueue *lex_q = self->lex_q;
+    SegLexQueue *lex_q = ivars->lex_q;
 
     // Empty out the queue.
     while (1) {
@@ -108,30 +111,31 @@ PolyLex_reset(PolyLexicon *self) {
             = (SegLexicon*)VA_Fetch(seg_lexicons, i);
         SegLex_Reset(seg_lexicon);
         if (SegLex_Next(seg_lexicon)) {
-            SegLexQ_Insert(self->lex_q, INCREF(seg_lexicon));
+            SegLexQ_Insert(ivars->lex_q, INCREF(seg_lexicon));
         }
     }
 
-    if (self->term != NULL) {
-        DECREF(self->term);
-        self->term = NULL;
+    if (ivars->term != NULL) {
+        DECREF(ivars->term);
+        ivars->term = NULL;
     }
 }
 
 bool
 PolyLex_next(PolyLexicon *self) {
-    SegLexQueue *lex_q = self->lex_q;
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    SegLexQueue *lex_q = ivars->lex_q;
     SegLexicon *top_seg_lexicon = (SegLexicon*)SegLexQ_Peek(lex_q);
 
     // Churn through queue items with equal terms.
     while (top_seg_lexicon != NULL) {
         Obj *const candidate = SegLex_Get_Term(top_seg_lexicon);
-        if ((candidate && !self->term)
-            || Obj_Compare_To(self->term, candidate) != 0
+        if ((candidate && !ivars->term)
+            || Obj_Compare_To(ivars->term, candidate) != 0
            ) {
             // Succeed if the next item in the queue has a different term.
-            DECREF(self->term);
-            self->term = Obj_Clone(candidate);
+            DECREF(ivars->term);
+            ivars->term = Obj_Clone(candidate);
             return true;
         }
         else {
@@ -145,15 +149,16 @@ PolyLex_next(PolyLexicon *self) {
     }
 
     // If queue is empty, iterator is finished.
-    DECREF(self->term);
-    self->term = NULL;
+    DECREF(ivars->term);
+    ivars->term = NULL;
     return false;
 }
 
 void
 PolyLex_seek(PolyLexicon *self, Obj *target) {
-    VArray *seg_lexicons = self->seg_lexicons;
-    SegLexQueue *lex_q = self->lex_q;
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    VArray *seg_lexicons = ivars->seg_lexicons;
+    SegLexQueue *lex_q = ivars->lex_q;
 
     if (target == NULL) {
         PolyLex_Reset(self);
@@ -163,17 +168,17 @@ PolyLex_seek(PolyLexicon *self, Obj *target) {
     // Refresh the queue, set vars.
     S_refresh_lex_q(lex_q, seg_lexicons, target);
     SegLexicon *least = (SegLexicon*)SegLexQ_Peek(lex_q);
-    DECREF(self->term);
-    self->term = NULL;
+    DECREF(ivars->term);
+    ivars->term = NULL;
     if (least) {
         Obj *least_term = SegLex_Get_Term(least);
-        self->term = least_term ? Obj_Clone(least_term) : NULL;
+        ivars->term = least_term ? Obj_Clone(least_term) : NULL;
     }
 
     // Scan up to the real target.
     do {
-        if (self->term) {
-            const int32_t comparison = Obj_Compare_To(self->term, target);
+        if (ivars->term) {
+            const int32_t comparison = Obj_Compare_To(ivars->term, target);
             if (comparison >= 0) { break; }
         }
     } while (PolyLex_Next(self));
@@ -181,12 +186,13 @@ PolyLex_seek(PolyLexicon *self, Obj *target) {
 
 Obj*
 PolyLex_get_term(PolyLexicon *self) {
-    return self->term;
+    return PolyLex_IVARS(self)->term;
 }
 
 uint32_t
 PolyLex_get_num_seg_lexicons(PolyLexicon *self) {
-    return VA_Get_Size(self->seg_lexicons);
+    PolyLexiconIVARS *const ivars = PolyLex_IVARS(self);
+    return VA_Get_Size(ivars->seg_lexicons);
 }
 
 SegLexQueue*

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/PolyReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PolyReader.c b/core/Lucy/Index/PolyReader.c
index ba64e16..92f198b 100644
--- a/core/Lucy/Index/PolyReader.c
+++ b/core/Lucy/Index/PolyReader.c
@@ -100,23 +100,24 @@ S_first_non_null(VArray *array) {
 
 static void
 S_init_sub_readers(PolyReader *self, VArray *sub_readers) {
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
     uint32_t  num_sub_readers = VA_Get_Size(sub_readers);
     int32_t *starts = (int32_t*)MALLOCATE(num_sub_readers * sizeof(int32_t));
     Hash  *data_readers = Hash_new(0);
 
-    DECREF(self->sub_readers);
-    DECREF(self->offsets);
-    self->sub_readers       = (VArray*)INCREF(sub_readers);
+    DECREF(ivars->sub_readers);
+    DECREF(ivars->offsets);
+    ivars->sub_readers       = (VArray*)INCREF(sub_readers);
 
     // Accumulate doc_max, subreader start offsets, and DataReaders.
-    self->doc_max = 0;
+    ivars->doc_max = 0;
     for (uint32_t i = 0; i < num_sub_readers; i++) {
         SegReader *seg_reader = (SegReader*)VA_Fetch(sub_readers, i);
         Hash *components = SegReader_Get_Components(seg_reader);
         CharBuf *api;
         DataReader *component;
-        starts[i] = self->doc_max;
-        self->doc_max += SegReader_Doc_Max(seg_reader);
+        starts[i] = ivars->doc_max;
+        ivars->doc_max += SegReader_Doc_Max(seg_reader);
         Hash_Iterate(components);
         while (Hash_Next(components, (Obj**)&api, (Obj**)&component)) {
             VArray *readers = (VArray*)Hash_Fetch(data_readers, (Obj*)api);
@@ -127,7 +128,7 @@ S_init_sub_readers(PolyReader *self, VArray *sub_readers) {
             VA_Store(readers, i, INCREF(component));
         }
     }
-    self->offsets = I32Arr_new_steal(starts, num_sub_readers);
+    ivars->offsets = I32Arr_new_steal(starts, num_sub_readers);
 
     CharBuf *api;
     VArray  *readers;
@@ -136,26 +137,27 @@ S_init_sub_readers(PolyReader *self, VArray *sub_readers) {
         DataReader *datareader
             = (DataReader*)CERTIFY(S_first_non_null(readers), DATAREADER);
         DataReader *aggregator
-            = DataReader_Aggregator(datareader, readers, self->offsets);
+            = DataReader_Aggregator(datareader, readers, ivars->offsets);
         if (aggregator) {
             CERTIFY(aggregator, DATAREADER);
-            Hash_Store(self->components, (Obj*)api, (Obj*)aggregator);
+            Hash_Store(ivars->components, (Obj*)api, (Obj*)aggregator);
         }
     }
     DECREF(data_readers);
 
     DeletionsReader *del_reader
         = (DeletionsReader*)Hash_Fetch(
-              self->components, (Obj*)VTable_Get_Name(DELETIONSREADER));
-    self->del_count = del_reader ? DelReader_Del_Count(del_reader) : 0;
+              ivars->components, (Obj*)VTable_Get_Name(DELETIONSREADER));
+    ivars->del_count = del_reader ? DelReader_Del_Count(del_reader) : 0;
 }
 
 PolyReader*
 PolyReader_init(PolyReader *self, Schema *schema, Folder *folder,
                 Snapshot *snapshot, IndexManager *manager,
                 VArray *sub_readers) {
-    self->doc_max    = 0;
-    self->del_count  = 0;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    ivars->doc_max    = 0;
+    ivars->del_count  = 0;
 
     if (sub_readers) {
         uint32_t num_segs = VA_Get_Size(sub_readers);
@@ -173,8 +175,8 @@ PolyReader_init(PolyReader *self, Schema *schema, Folder *folder,
     else {
         IxReader_init((IndexReader*)self, schema, folder, snapshot,
                       NULL, -1, manager);
-        self->sub_readers = VA_new(0);
-        self->offsets = I32Arr_new_steal(NULL, 0);
+        ivars->sub_readers = VA_new(0);
+        ivars->offsets = I32Arr_new_steal(NULL, 0);
     }
 
     return self;
@@ -182,10 +184,11 @@ PolyReader_init(PolyReader *self, Schema *schema, Folder *folder,
 
 void
 PolyReader_close(PolyReader *self) {
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
     PolyReader_Close_t super_close
         = SUPER_METHOD_PTR(POLYREADER, Lucy_PolyReader_Close);
-    for (uint32_t i = 0, max = VA_Get_Size(self->sub_readers); i < max; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->sub_readers, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->sub_readers); i < max; i++) {
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->sub_readers, i);
         SegReader_Close(seg_reader);
     }
     super_close(self);
@@ -193,8 +196,9 @@ PolyReader_close(PolyReader *self) {
 
 void
 PolyReader_destroy(PolyReader *self) {
-    DECREF(self->sub_readers);
-    DECREF(self->offsets);
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    DECREF(ivars->sub_readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYREADER);
 }
 
@@ -218,7 +222,8 @@ S_try_open_elements(void *context) {
     struct try_open_elements_context *args
         = (struct try_open_elements_context*)context;
     PolyReader *self              = args->self;
-    VArray     *files             = Snapshot_List(self->snapshot);
+    PolyReaderIVARS *const ivars  = PolyReader_IVARS(self);
+    VArray     *files             = Snapshot_List(ivars->snapshot);
     Folder     *folder            = PolyReader_Get_Folder(self);
     uint32_t    num_segs          = 0;
     uint64_t    latest_schema_gen = 0;
@@ -251,8 +256,8 @@ S_try_open_elements(void *context) {
     else {
         Hash *dump = (Hash*)Json_slurp_json(folder, schema_file);
         if (dump) { // read file successfully
-            DECREF(self->schema);
-            self->schema = (Schema*)CERTIFY(
+            DECREF(ivars->schema);
+            ivars->schema = (Schema*)CERTIFY(
                                VTable_Load_Obj(SCHEMA, (Obj*)dump), SCHEMA);
             DECREF(dump);
             DECREF(schema_file);
@@ -329,6 +334,7 @@ int32_t  PolyReader_debug1_num_passes     = 0;
 PolyReader*
 PolyReader_do_open(PolyReader *self, Obj *index, Snapshot *snapshot,
                    IndexManager *manager) {
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
     Folder   *folder   = S_derive_folder(index);
     uint64_t  last_gen = 0;
 
@@ -391,7 +397,7 @@ PolyReader_do_open(PolyReader *self, Obj *index, Snapshot *snapshot,
         // If that's not the case, we must read the file we just picked.
         if (!snapshot) {
             struct try_read_snapshot_context context;
-            context.snapshot = self->snapshot;
+            context.snapshot = ivars->snapshot;
             context.folder   = folder;
             context.path     = target_snap_file;
             Err *error = Err_trap(S_try_read_snapshot, &context);
@@ -463,11 +469,12 @@ S_derive_folder(Obj *index) {
 
 static bool 
 S_obtain_deletion_lock(PolyReader *self) {
-    self->deletion_lock = IxManager_Make_Deletion_Lock(self->manager);
-    Lock_Clear_Stale(self->deletion_lock);
-    if (!Lock_Obtain(self->deletion_lock)) {
-        DECREF(self->deletion_lock);
-        self->deletion_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    ivars->deletion_lock = IxManager_Make_Deletion_Lock(ivars->manager);
+    Lock_Clear_Stale(ivars->deletion_lock);
+    if (!Lock_Obtain(ivars->deletion_lock)) {
+        DECREF(ivars->deletion_lock);
+        ivars->deletion_lock = NULL;
         return false;
     }
     return true;
@@ -475,13 +482,14 @@ S_obtain_deletion_lock(PolyReader *self) {
 
 static bool
 S_obtain_read_lock(PolyReader *self, const CharBuf *snapshot_file_name) {
-    self->read_lock = IxManager_Make_Snapshot_Read_Lock(self->manager,
-                                                        snapshot_file_name);
-
-    Lock_Clear_Stale(self->read_lock);
-    if (!Lock_Obtain(self->read_lock)) {
-        DECREF(self->read_lock);
-        self->read_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    ivars->read_lock = IxManager_Make_Snapshot_Read_Lock(ivars->manager,
+                                                         snapshot_file_name);
+
+    Lock_Clear_Stale(ivars->read_lock);
+    if (!Lock_Obtain(ivars->read_lock)) {
+        DECREF(ivars->read_lock);
+        ivars->read_lock = NULL;
         return false;
     }
     return true;
@@ -489,50 +497,55 @@ S_obtain_read_lock(PolyReader *self, const CharBuf *snapshot_file_name) {
 
 static void
 S_release_read_lock(PolyReader *self) {
-    if (self->read_lock) {
-        Lock_Release(self->read_lock);
-        DECREF(self->read_lock);
-        self->read_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    if (ivars->read_lock) {
+        Lock_Release(ivars->read_lock);
+        DECREF(ivars->read_lock);
+        ivars->read_lock = NULL;
     }
 }
 
 static void
 S_release_deletion_lock(PolyReader *self) {
-    if (self->deletion_lock) {
-        Lock_Release(self->deletion_lock);
-        DECREF(self->deletion_lock);
-        self->deletion_lock = NULL;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    if (ivars->deletion_lock) {
+        Lock_Release(ivars->deletion_lock);
+        DECREF(ivars->deletion_lock);
+        ivars->deletion_lock = NULL;
     }
 }
 
 int32_t
 PolyReader_doc_max(PolyReader *self) {
-    return self->doc_max;
+    return PolyReader_IVARS(self)->doc_max;
 }
 
 int32_t
 PolyReader_doc_count(PolyReader *self) {
-    return self->doc_max - self->del_count;
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    return ivars->doc_max - ivars->del_count;
 }
 
 int32_t
 PolyReader_del_count(PolyReader *self) {
-    return self->del_count;
+    return PolyReader_IVARS(self)->del_count;
 }
 
 I32Array*
 PolyReader_offsets(PolyReader *self) {
-    return (I32Array*)INCREF(self->offsets);
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    return (I32Array*)INCREF(ivars->offsets);
 }
 
 VArray*
 PolyReader_seg_readers(PolyReader *self) {
-    return (VArray*)VA_Shallow_Copy(self->sub_readers);
+    PolyReaderIVARS *const ivars = PolyReader_IVARS(self);
+    return (VArray*)VA_Shallow_Copy(ivars->sub_readers);
 }
 
 VArray*
 PolyReader_get_seg_readers(PolyReader *self) {
-    return self->sub_readers;
+    return PolyReader_IVARS(self)->sub_readers;
 }
 
 uint32_t

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/Posting.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/Posting.c b/core/Lucy/Index/Posting.c
index 36cb1e9..98ca0f8 100644
--- a/core/Lucy/Index/Posting.c
+++ b/core/Lucy/Index/Posting.c
@@ -30,18 +30,19 @@
 
 Posting*
 Post_init(Posting *self) {
-    self->doc_id = 0;
+    PostingIVARS *const ivars = Post_IVARS(self);
+    ivars->doc_id = 0;
     return self;
 }
 
 void
 Post_set_doc_id(Posting *self, int32_t doc_id) {
-    self->doc_id = doc_id;
+    Post_IVARS(self)->doc_id = doc_id;
 }
 
 int32_t
 Post_get_doc_id(Posting *self) {
-    return self->doc_id;
+    return Post_IVARS(self)->doc_id;
 }
 
 PostingWriter*
@@ -49,7 +50,8 @@ PostWriter_init(PostingWriter *self, Schema *schema, Snapshot *snapshot,
                 Segment *segment, PolyReader *polyreader, int32_t field_num) {
     DataWriter_init((DataWriter*)self, schema, snapshot, segment,
                     polyreader);
-    self->field_num = field_num;
+    PostingWriterIVARS *const ivars = PostWriter_IVARS(self);
+    ivars->field_num = field_num;
     return self;
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/PostingListReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PostingListReader.c b/core/Lucy/Index/PostingListReader.c
index c2d3a49..5cb018e 100644
--- a/core/Lucy/Index/PostingListReader.c
+++ b/core/Lucy/Index/PostingListReader.c
@@ -64,10 +64,11 @@ DefPListReader_init(DefaultPostingListReader *self, Schema *schema,
                     int32_t seg_tick, LexiconReader *lex_reader) {
     PListReader_init((PostingListReader*)self, schema, folder, snapshot,
                      segments, seg_tick);
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
     Segment *segment = DefPListReader_Get_Segment(self);
 
     // Derive.
-    self->lex_reader = (LexiconReader*)INCREF(lex_reader);
+    ivars->lex_reader = (LexiconReader*)INCREF(lex_reader);
 
     // Check format.
     Hash *my_meta = (Hash*)Seg_Fetch_Metadata_Str(segment, "postings", 8);
@@ -91,23 +92,26 @@ DefPListReader_init(DefaultPostingListReader *self, Schema *schema,
 
 void
 DefPListReader_close(DefaultPostingListReader *self) {
-    if (self->lex_reader) {
-        LexReader_Close(self->lex_reader);
-        DECREF(self->lex_reader);
-        self->lex_reader = NULL;
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
+    if (ivars->lex_reader) {
+        LexReader_Close(ivars->lex_reader);
+        DECREF(ivars->lex_reader);
+        ivars->lex_reader = NULL;
     }
 }
 
 void
 DefPListReader_destroy(DefaultPostingListReader *self) {
-    DECREF(self->lex_reader);
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
+    DECREF(ivars->lex_reader);
     SUPER_DESTROY(self, DEFAULTPOSTINGLISTREADER);
 }
 
 SegPostingList*
 DefPListReader_posting_list(DefaultPostingListReader *self,
                             const CharBuf *field, Obj *target) {
-    FieldType *type = Schema_Fetch_Type(self->schema, field);
+    DefaultPostingListReaderIVARS *const ivars = DefPListReader_IVARS(self);
+    FieldType *type = Schema_Fetch_Type(ivars->schema, field);
 
     // Only return an object if we've got an indexed field.
     if (type != NULL && FType_Indexed(type)) {
@@ -122,6 +126,6 @@ DefPListReader_posting_list(DefaultPostingListReader *self,
 
 LexiconReader*
 DefPListReader_get_lex_reader(DefaultPostingListReader *self) {
-    return self->lex_reader;
+    return DefPListReader_IVARS(self)->lex_reader;
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/PostingListWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/PostingListWriter.c b/core/Lucy/Index/PostingListWriter.c
index f56e8f8..9436e18 100644
--- a/core/Lucy/Index/PostingListWriter.c
+++ b/core/Lucy/Index/PostingListWriter.c
@@ -62,36 +62,38 @@ PListWriter_init(PostingListWriter *self, Schema *schema, Snapshot *snapshot,
                  Segment *segment, PolyReader *polyreader,
                  LexiconWriter *lex_writer) {
     DataWriter_init((DataWriter*)self, schema, snapshot, segment, polyreader);
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
 
     // Assign.
-    self->lex_writer = (LexiconWriter*)INCREF(lex_writer);
+    ivars->lex_writer = (LexiconWriter*)INCREF(lex_writer);
 
     // Init.
-    self->pools          = VA_new(Schema_Num_Fields(schema));
-    self->mem_thresh     = default_mem_thresh;
-    self->mem_pool       = MemPool_new(0);
-    self->lex_temp_out   = NULL;
-    self->post_temp_out  = NULL;
+    ivars->pools          = VA_new(Schema_Num_Fields(schema));
+    ivars->mem_thresh     = default_mem_thresh;
+    ivars->mem_pool       = MemPool_new(0);
+    ivars->lex_temp_out   = NULL;
+    ivars->post_temp_out  = NULL;
 
     return self;
 }
 
 static void
 S_lazy_init(PostingListWriter *self) {
-    if (!self->lex_temp_out) {
-        Folder  *folder         = self->folder;
-        CharBuf *seg_name       = Seg_Get_Name(self->segment);
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
+    if (!ivars->lex_temp_out) {
+        Folder  *folder         = ivars->folder;
+        CharBuf *seg_name       = Seg_Get_Name(ivars->segment);
         CharBuf *lex_temp_path  = CB_newf("%o/lextemp", seg_name);
         CharBuf *post_temp_path = CB_newf("%o/ptemp", seg_name);
         CharBuf *skip_path      = CB_newf("%o/postings.skip", seg_name);
 
         // Open temp streams and final skip stream.
-        self->lex_temp_out  = Folder_Open_Out(folder, lex_temp_path);
-        if (!self->lex_temp_out) { RETHROW(INCREF(Err_get_error())); }
-        self->post_temp_out = Folder_Open_Out(folder, post_temp_path);
-        if (!self->post_temp_out) { RETHROW(INCREF(Err_get_error())); }
-        self->skip_out = Folder_Open_Out(folder, skip_path);
-        if (!self->skip_out) { RETHROW(INCREF(Err_get_error())); }
+        ivars->lex_temp_out  = Folder_Open_Out(folder, lex_temp_path);
+        if (!ivars->lex_temp_out) { RETHROW(INCREF(Err_get_error())); }
+        ivars->post_temp_out = Folder_Open_Out(folder, post_temp_path);
+        if (!ivars->post_temp_out) { RETHROW(INCREF(Err_get_error())); }
+        ivars->skip_out = Folder_Open_Out(folder, skip_path);
+        if (!ivars->skip_out) { RETHROW(INCREF(Err_get_error())); }
 
         DECREF(skip_path);
         DECREF(post_temp_path);
@@ -101,26 +103,28 @@ S_lazy_init(PostingListWriter *self) {
 
 static PostingPool*
 S_lazy_init_posting_pool(PostingListWriter *self, int32_t field_num) {
-    PostingPool *pool = (PostingPool*)VA_Fetch(self->pools, field_num);
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
+    PostingPool *pool = (PostingPool*)VA_Fetch(ivars->pools, field_num);
     if (!pool && field_num != 0) {
-        CharBuf *field = Seg_Field_Name(self->segment, field_num);
-        pool = PostPool_new(self->schema, self->snapshot, self->segment,
-                            self->polyreader, field, self->lex_writer,
-                            self->mem_pool, self->lex_temp_out,
-                            self->post_temp_out, self->skip_out);
-        VA_Store(self->pools, field_num, (Obj*)pool);
+        CharBuf *field = Seg_Field_Name(ivars->segment, field_num);
+        pool = PostPool_new(ivars->schema, ivars->snapshot, ivars->segment,
+                            ivars->polyreader, field, ivars->lex_writer,
+                            ivars->mem_pool, ivars->lex_temp_out,
+                            ivars->post_temp_out, ivars->skip_out);
+        VA_Store(ivars->pools, field_num, (Obj*)pool);
     }
     return pool;
 }
 
 void
 PListWriter_destroy(PostingListWriter *self) {
-    DECREF(self->lex_writer);
-    DECREF(self->mem_pool);
-    DECREF(self->pools);
-    DECREF(self->lex_temp_out);
-    DECREF(self->post_temp_out);
-    DECREF(self->skip_out);
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
+    DECREF(ivars->lex_writer);
+    DECREF(ivars->mem_pool);
+    DECREF(ivars->pools);
+    DECREF(ivars->lex_temp_out);
+    DECREF(ivars->post_temp_out);
+    DECREF(ivars->skip_out);
     SUPER_DESTROY(self, POSTINGLISTWRITER);
 }
 
@@ -139,6 +143,7 @@ void
 PListWriter_add_inverted_doc(PostingListWriter *self, Inverter *inverter,
                              int32_t doc_id) {
     S_lazy_init(self);
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
 
     // Iterate over fields in document, adding the content of indexed fields
     // to their respective PostingPools.
@@ -161,21 +166,22 @@ PListWriter_add_inverted_doc(PostingListWriter *self, Inverter *inverter,
     // If our PostingPools have collectively passed the memory threshold,
     // flush all of them, then release all the RawPostings with a single
     // action.
-    if (MemPool_Get_Consumed(self->mem_pool) > self->mem_thresh) {
-        for (uint32_t i = 0, max = VA_Get_Size(self->pools); i < max; i++) {
-            PostingPool *const pool = (PostingPool*)VA_Fetch(self->pools, i);
+    if (MemPool_Get_Consumed(ivars->mem_pool) > ivars->mem_thresh) {
+        for (uint32_t i = 0, max = VA_Get_Size(ivars->pools); i < max; i++) {
+            PostingPool *const pool = (PostingPool*)VA_Fetch(ivars->pools, i);
             if (pool) { PostPool_Flush(pool); }
         }
-        MemPool_Release_All(self->mem_pool);
+        MemPool_Release_All(ivars->mem_pool);
     }
 }
 
 void
 PListWriter_add_segment(PostingListWriter *self, SegReader *reader,
                         I32Array *doc_map) {
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
     Segment *other_segment = SegReader_Get_Segment(reader);
-    Schema  *schema        = self->schema;
-    Segment *segment       = self->segment;
+    Schema  *schema        = ivars->schema;
+    Segment *segment       = ivars->segment;
     VArray  *all_fields    = Schema_All_Fields(schema);
     S_lazy_init(self);
 
@@ -202,32 +208,34 @@ PListWriter_add_segment(PostingListWriter *self, SegReader *reader,
 
 void
 PListWriter_finish(PostingListWriter *self) {
+    PostingListWriterIVARS *const ivars = PListWriter_IVARS(self);
+
     // If S_lazy_init was never called, we have no data, so bail out.
-    if (!self->lex_temp_out) { return; }
+    if (!ivars->lex_temp_out) { return; }
 
-    Folder  *folder = self->folder;
-    CharBuf *seg_name = Seg_Get_Name(self->segment);
+    Folder  *folder = ivars->folder;
+    CharBuf *seg_name = Seg_Get_Name(ivars->segment);
     CharBuf *lex_temp_path  = CB_newf("%o/lextemp", seg_name);
     CharBuf *post_temp_path = CB_newf("%o/ptemp", seg_name);
 
     // Close temp streams.
-    OutStream_Close(self->lex_temp_out);
-    OutStream_Close(self->post_temp_out);
+    OutStream_Close(ivars->lex_temp_out);
+    OutStream_Close(ivars->post_temp_out);
 
     // Try to free up some memory.
-    for (uint32_t i = 0, max = VA_Get_Size(self->pools); i < max; i++) {
-        PostingPool *pool = (PostingPool*)VA_Fetch(self->pools, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->pools); i < max; i++) {
+        PostingPool *pool = (PostingPool*)VA_Fetch(ivars->pools, i);
         if (pool) { PostPool_Shrink(pool); }
     }
 
     // Write postings for each field.
-    for (uint32_t i = 0, max = VA_Get_Size(self->pools); i < max; i++) {
-        PostingPool *pool = (PostingPool*)VA_Delete(self->pools, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->pools); i < max; i++) {
+        PostingPool *pool = (PostingPool*)VA_Delete(ivars->pools, i);
         if (pool) {
             // Write out content for each PostingPool.  Let each PostingPool
             // use more RAM while finishing.  (This is a little dicy, because if
             // Shrink() was ineffective, we may double the RAM footprint.)
-            PostPool_Set_Mem_Thresh(pool, self->mem_thresh);
+            PostPool_Set_Mem_Thresh(pool, ivars->mem_thresh);
             PostPool_Flip(pool);
             PostPool_Finish(pool);
             DECREF(pool);
@@ -235,24 +243,24 @@ PListWriter_finish(PostingListWriter *self) {
     }
 
     // Store metadata.
-    Seg_Store_Metadata_Str(self->segment, "postings", 8,
+    Seg_Store_Metadata_Str(ivars->segment, "postings", 8,
                            (Obj*)PListWriter_Metadata(self));
 
     // Close down and clean up.
-    OutStream_Close(self->skip_out);
+    OutStream_Close(ivars->skip_out);
     if (!Folder_Delete(folder, lex_temp_path)) {
         THROW(ERR, "Couldn't delete %o", lex_temp_path);
     }
     if (!Folder_Delete(folder, post_temp_path)) {
         THROW(ERR, "Couldn't delete %o", post_temp_path);
     }
-    DECREF(self->skip_out);
-    self->skip_out = NULL;
+    DECREF(ivars->skip_out);
+    ivars->skip_out = NULL;
     DECREF(post_temp_path);
     DECREF(lex_temp_path);
 
     // Dispatch the LexiconWriter.
-    LexWriter_Finish(self->lex_writer);
+    LexWriter_Finish(ivars->lex_writer);
 }
 
 


[lucy-commits] [7/9] git commit: refs/heads/ivars-wip1 - Migrate C host code to IVARS.

Posted by ma...@apache.org.
Migrate C host code to IVARS.

Migrate host-specific code for C to use IVARS rather than access struct
members through `self`.


Project: http://git-wip-us.apache.org/repos/asf/lucy/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucy/commit/d3edeee9
Tree: http://git-wip-us.apache.org/repos/asf/lucy/tree/d3edeee9
Diff: http://git-wip-us.apache.org/repos/asf/lucy/diff/d3edeee9

Branch: refs/heads/ivars-wip1
Commit: d3edeee9a80fc60d9e48939153fe75f31d850a4b
Parents: f19a6af
Author: Marvin Humphrey <ma...@rectangular.com>
Authored: Mon Jul 1 08:10:57 2013 -0700
Committer: Marvin Humphrey <ma...@rectangular.com>
Committed: Mon Jul 1 08:46:22 2013 -0700

----------------------------------------------------------------------
 c/src/Lucy/Document/Doc.c    | 40 +++++++++++++++++++++------------------
 c/src/Lucy/Index/DocReader.c |  7 ++++---
 c/src/Lucy/Index/Inverter.c  | 32 ++++++++++++++++---------------
 3 files changed, 43 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucy/blob/d3edeee9/c/src/Lucy/Document/Doc.c
----------------------------------------------------------------------
diff --git a/c/src/Lucy/Document/Doc.c b/c/src/Lucy/Document/Doc.c
index f0fb2d4..74cfef8 100644
--- a/c/src/Lucy/Document/Doc.c
+++ b/c/src/Lucy/Document/Doc.c
@@ -30,6 +30,7 @@
 
 Doc*
 Doc_init(Doc *self, void *fields, int32_t doc_id) {
+    DocIVARS *const ivars = Doc_IVARS(self);
     Hash *hash;
 
     if (fields) {
@@ -39,49 +40,52 @@ Doc_init(Doc *self, void *fields, int32_t doc_id) {
     else {
         hash = Hash_new(0);
     }
-    self->fields = hash;
-    self->doc_id = doc_id;
+    ivars->fields = hash;
+    ivars->doc_id = doc_id;
 
     return self;
 }
 
 void
 Doc_set_fields(Doc *self, void *fields) {
-    DECREF(self->fields);
-    self->fields = CERTIFY(fields, HASH);
+    DocIVARS *const ivars = Doc_IVARS(self);
+    DECREF(ivars->fields);
+    ivars->fields = CERTIFY(fields, HASH);
 }
 
 uint32_t
 Doc_get_size(Doc *self) {
-    Hash *hash = (Hash *)self->fields;
+    Hash *hash = (Hash*)Doc_IVARS(self)->fields;
     return Hash_Get_Size(hash);
 }
 
 void
 Doc_store(Doc *self, const CharBuf *field, Obj *value) {
-    Hash *hash = (Hash *)self->fields;
+    Hash *hash = (Hash*)Doc_IVARS(self)->fields;
     Hash_Store(hash, (Obj *)field, value);
     INCREF(value);
 }
 
 void
 Doc_serialize(Doc *self, OutStream *outstream) {
-    Hash *hash = (Hash *)self->fields;
+    DocIVARS *const ivars = Doc_IVARS(self);
+    Hash *hash = (Hash*)ivars->fields;
     Freezer_serialize_hash(hash, outstream);
-    OutStream_Write_C32(outstream, self->doc_id);
+    OutStream_Write_C32(outstream, ivars->doc_id);
 }
 
 Doc*
 Doc_deserialize(Doc *self, InStream *instream) {
-     self->fields = Freezer_read_hash(instream);
-     self->doc_id = InStream_Read_C32(instream);
-     return self;
+    DocIVARS *const ivars = Doc_IVARS(self);
+    ivars->fields = Freezer_read_hash(instream);
+    ivars->doc_id = InStream_Read_C32(instream);
+    return self;
 }
 
 Obj*
 Doc_extract(Doc *self, CharBuf *field,
                  ViewCharBuf *target) {
-    Hash *hash = (Hash *)self->fields;
+    Hash *hash = (Hash*)Doc_IVARS(self)->fields;
     Obj  *obj  = Hash_Fetch(hash, (Obj *)field);
 
     if (target && obj && Obj_Is_A(obj, CHARBUF)) {
@@ -115,17 +119,17 @@ Doc_load(Doc *self, Obj *dump) {
 
 bool
 Doc_equals(Doc *self, Obj *other) {
-    Doc *twin = (Doc*)other;
-
-    if (twin == self)                    { return true;  }
+    if ((Doc*)other == self)   { return true;  }
     if (!Obj_Is_A(other, DOC)) { return false; }
-
-    return Hash_Equals((Hash*)self->fields, (Obj*)twin->fields);
+    DocIVARS *const ivars = Doc_IVARS(self);
+    DocIVARS *const ovars = Doc_IVARS((Doc*)other);
+    return Hash_Equals((Hash*)ivars->fields, (Obj*)ovars->fields);
 }
 
 void
 Doc_destroy(Doc *self) {
-    DECREF(self->fields);
+    DocIVARS *const ivars = Doc_IVARS(self);
+    DECREF(ivars->fields);
     SUPER_DESTROY(self, DOC);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/d3edeee9/c/src/Lucy/Index/DocReader.c
----------------------------------------------------------------------
diff --git a/c/src/Lucy/Index/DocReader.c b/c/src/Lucy/Index/DocReader.c
index 79fcd4f..ee56bd1 100644
--- a/c/src/Lucy/Index/DocReader.c
+++ b/c/src/Lucy/Index/DocReader.c
@@ -34,9 +34,10 @@
 
 HitDoc*
 DefDocReader_fetch_doc(DefaultDocReader *self, int32_t doc_id) {
-    Schema   *const schema = self->schema;
-    InStream *const dat_in = self->dat_in;
-    InStream *const ix_in  = self->ix_in;
+    DefaultDocReaderIVARS *const ivars = DefDocReader_IVARS(self);
+    Schema   *const schema = ivars->schema;
+    InStream *const dat_in = ivars->dat_in;
+    InStream *const ix_in  = ivars->ix_in;
     Hash     *const fields = Hash_new(1);
     int64_t   start;
     uint32_t  num_fields;

http://git-wip-us.apache.org/repos/asf/lucy/blob/d3edeee9/c/src/Lucy/Index/Inverter.c
----------------------------------------------------------------------
diff --git a/c/src/Lucy/Index/Inverter.c b/c/src/Lucy/Index/Inverter.c
index 0700075..22c78a1 100644
--- a/c/src/Lucy/Index/Inverter.c
+++ b/c/src/Lucy/Index/Inverter.c
@@ -33,15 +33,15 @@
 #include "Lucy/Plan/Schema.h"
 
 static InverterEntry*
-S_fetch_entry(Inverter *self, CharBuf *field) {
-    Schema *const schema = self->schema;
-    int32_t field_num = Seg_Field_Num(self->segment, field);
+S_fetch_entry(InverterIVARS *ivars, CharBuf *field) {
+    Schema *const schema = ivars->schema;
+    int32_t field_num = Seg_Field_Num(ivars->segment, field);
     if (!field_num) {
         // This field seems not to be in the segment yet.  Try to find it in
         // the Schema.
         if (Schema_Fetch_Type(schema, field)) {
             // The field is in the Schema.  Get a field num from the Segment.
-            field_num = Seg_Add_Field(self->segment, field);
+            field_num = Seg_Add_Field(ivars->segment, field);
         }
         else {
             // We've truly failed to find the field.  The user must
@@ -51,16 +51,17 @@ S_fetch_entry(Inverter *self, CharBuf *field) {
     }
 
     InverterEntry *entry
-        = (InverterEntry*)VA_Fetch(self->entry_pool, field_num);
+        = (InverterEntry*)VA_Fetch(ivars->entry_pool, field_num);
     if (!entry) {
         entry = InvEntry_new(schema, (CharBuf*)field, field_num);
-        VA_Store(self->entry_pool, field_num, (Obj*)entry);
+        VA_Store(ivars->entry_pool, field_num, (Obj*)entry);
     }
     return entry;
 }
 
 void
 Inverter_invert_doc(Inverter *self, Doc *doc) {
+    InverterIVARS *const ivars = Inverter_IVARS(self);
     Hash *const fields = (Hash*)Doc_Get_Fields(doc);
     uint32_t   num_keys     = Hash_Iterate(fields);
 
@@ -72,8 +73,9 @@ Inverter_invert_doc(Inverter *self, Doc *doc) {
         Obj *key, *obj;
         Hash_Next(fields, &key, &obj);
         CharBuf *field = (CharBuf*)CERTIFY(key, CHARBUF);
-        InverterEntry *inv_entry = S_fetch_entry(self, field);
-        FieldType *type = inv_entry->type;
+        InverterEntry *inventry = S_fetch_entry(ivars, field);
+        InverterEntryIVARS *inventry_ivars = InvEntry_IVARS(inventry);
+        FieldType *type = inventry_ivars->type;
 
         // Get the field value.
         switch (FType_Primitive_ID(type) & FType_PRIMITIVE_ID_MASK) {
@@ -81,7 +83,7 @@ Inverter_invert_doc(Inverter *self, Doc *doc) {
                     CharBuf *char_buf
                         = (CharBuf*)CERTIFY(obj, CHARBUF);
                     ViewCharBuf *value
-                        = (ViewCharBuf*)inv_entry->value;
+                        = (ViewCharBuf*)inventry_ivars->value;
                     ViewCB_Assign(value, char_buf);
                     break;
                 }
@@ -89,31 +91,31 @@ Inverter_invert_doc(Inverter *self, Doc *doc) {
                     ByteBuf *byte_buf
                         = (ByteBuf*)CERTIFY(obj, BYTEBUF);
                     ViewByteBuf *value
-                        = (ViewByteBuf*)inv_entry->value;
+                        = (ViewByteBuf*)inventry_ivars->value;
                     ViewBB_Assign(value, byte_buf);
                     break;
                 }
             case FType_INT32: {
                     int32_t int_val = (int32_t)Obj_To_I64(obj);
-                    Integer32* value = (Integer32*)inv_entry->value;
+                    Integer32* value = (Integer32*)inventry_ivars->value;
                     Int32_Set_Value(value, int_val);
                     break;
                 }
             case FType_INT64: {
                     int64_t int_val = Obj_To_I64(obj);
-                    Integer64* value = (Integer64*)inv_entry->value;
+                    Integer64* value = (Integer64*)inventry_ivars->value;
                     Int64_Set_Value(value, int_val);
                     break;
                 }
             case FType_FLOAT32: {
                     float float_val = (float)Obj_To_F64(obj);
-                    Float32* value = (Float32*)inv_entry->value;
+                    Float32* value = (Float32*)inventry_ivars->value;
                     Float32_Set_Value(value, float_val);
                     break;
                 }
             case FType_FLOAT64: {
                     double float_val = Obj_To_F64(obj);
-                    Float64* value = (Float64*)inv_entry->value;
+                    Float64* value = (Float64*)inventry_ivars->value;
                     Float64_Set_Value(value, float_val);
                     break;
                 }
@@ -121,7 +123,7 @@ Inverter_invert_doc(Inverter *self, Doc *doc) {
                 THROW(ERR, "Unrecognized type: %o", type);
         }
 
-        Inverter_Add_Field(self, inv_entry);
+        Inverter_Add_Field(self, inventry);
     }
 }
 


[lucy-commits] [6/9] git commit: refs/heads/ivars-wip1 - Migrate more Lucy search classes to IVARS.

Posted by ma...@apache.org.
Migrate more Lucy search classes to IVARS.


Project: http://git-wip-us.apache.org/repos/asf/lucy/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucy/commit/f19a6af0
Tree: http://git-wip-us.apache.org/repos/asf/lucy/tree/f19a6af0
Diff: http://git-wip-us.apache.org/repos/asf/lucy/diff/f19a6af0

Branch: refs/heads/ivars-wip1
Commit: f19a6af0858019c0c81b003982167d33dbaa114c
Parents: 188574c
Author: Marvin Humphrey <ma...@rectangular.com>
Authored: Sun Jun 30 21:59:23 2013 -0700
Committer: Marvin Humphrey <ma...@rectangular.com>
Committed: Mon Jul 1 08:10:18 2013 -0700

----------------------------------------------------------------------
 core/Lucy/Search/Collector/SortCollector.c | 300 +++++++++++++-----------
 core/Lucy/Search/QueryParser/ParserElem.c  |  51 ++--
 core/Lucy/Search/QueryParser/QueryLexer.c  |  10 +-
 3 files changed, 193 insertions(+), 168 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucy/blob/f19a6af0/core/Lucy/Search/Collector/SortCollector.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Search/Collector/SortCollector.c b/core/Lucy/Search/Collector/SortCollector.c
index 9c4d182..7c877bf 100644
--- a/core/Lucy/Search/Collector/SortCollector.c
+++ b/core/Lucy/Search/Collector/SortCollector.c
@@ -63,7 +63,7 @@ S_derive_action(SortRule *rule, SortCache *sort_cache);
 
 // Decide whether a doc should be inserted into the HitQueue.
 static INLINE bool
-SI_competitive(SortCollector *self, int32_t doc_id);
+SI_competitive(SortCollectorIVARS *ivars, int32_t doc_id);
 
 SortCollector*
 SortColl_new(Schema *schema, SortSpec *sort_spec, uint32_t wanted) {
@@ -98,33 +98,34 @@ SortColl_init(SortCollector *self, Schema *schema, SortSpec *sort_spec,
 
     // Init.
     Coll_init((Collector*)self);
-    self->total_hits    = 0;
-    self->bubble_doc    = INT32_MAX;
-    self->bubble_score  = F32_NEGINF;
-    self->seg_doc_max   = 0;
+    SortCollectorIVARS *const ivars = SortColl_IVARS(self);
+    ivars->total_hits    = 0;
+    ivars->bubble_doc    = INT32_MAX;
+    ivars->bubble_score  = F32_NEGINF;
+    ivars->seg_doc_max   = 0;
 
     // Assign.
-    self->wanted        = wanted;
+    ivars->wanted        = wanted;
 
     // Derive.
-    self->hit_q         = HitQ_new(schema, sort_spec, wanted);
-    self->rules         = rules; // absorb refcount.
-    self->num_rules     = num_rules;
-    self->sort_caches   = (SortCache**)CALLOCATE(num_rules, sizeof(SortCache*));
-    self->ord_arrays    = (void**)CALLOCATE(num_rules, sizeof(void*));
-    self->actions       = (uint8_t*)CALLOCATE(num_rules, sizeof(uint8_t));
+    ivars->hit_q         = HitQ_new(schema, sort_spec, wanted);
+    ivars->rules         = rules; // absorb refcount.
+    ivars->num_rules     = num_rules;
+    ivars->sort_caches   = (SortCache**)CALLOCATE(num_rules, sizeof(SortCache*));
+    ivars->ord_arrays    = (void**)CALLOCATE(num_rules, sizeof(void*));
+    ivars->actions       = (uint8_t*)CALLOCATE(num_rules, sizeof(uint8_t));
 
     // Build up an array of "actions" which we will execute during each call
     // to Collect(). Determine whether we need to track scores and field
     // values.
-    self->need_score  = false;
-    self->need_values = false;
+    ivars->need_score  = false;
+    ivars->need_values = false;
     for (uint32_t i = 0; i < num_rules; i++) {
         SortRule *rule   = (SortRule*)VA_Fetch(rules, i);
         int32_t rule_type  = SortRule_Get_Type(rule);
-        self->actions[i] = S_derive_action(rule, NULL);
+        ivars->actions[i] = S_derive_action(rule, NULL);
         if (rule_type == SortRule_SCORE) {
-            self->need_score = true;
+            ivars->need_score = true;
         }
         else if (rule_type == SortRule_FIELD) {
             CharBuf *field = SortRule_Get_Field(rule);
@@ -132,30 +133,30 @@ SortColl_init(SortCollector *self, Schema *schema, SortSpec *sort_spec,
             if (!type || !FType_Sortable(type)) {
                 THROW(ERR, "'%o' isn't a sortable field", field);
             }
-            self->need_values = true;
+            ivars->need_values = true;
         }
     }
 
     // Perform an optimization.  So long as we always collect docs in
     // ascending order, Collect() will favor lower doc numbers -- so we may
     // not need to execute a final COMPARE_BY_DOC_ID action.
-    self->num_actions = num_rules;
-    if (self->actions[num_rules - 1] == COMPARE_BY_DOC_ID) {
-        self->num_actions--;
+    ivars->num_actions = num_rules;
+    if (ivars->actions[num_rules - 1] == COMPARE_BY_DOC_ID) {
+        ivars->num_actions--;
     }
 
     // Override our derived actions with an action which will be excecuted
     // autmatically until the queue fills up.
-    self->auto_actions    = (uint8_t*)MALLOCATE(1);
-    self->auto_actions[0] = wanted ? AUTO_ACCEPT : AUTO_REJECT;
-    self->derived_actions = self->actions;
-    self->actions         = self->auto_actions;
+    ivars->auto_actions    = (uint8_t*)MALLOCATE(1);
+    ivars->auto_actions[0] = wanted ? AUTO_ACCEPT : AUTO_REJECT;
+    ivars->derived_actions = ivars->actions;
+    ivars->actions         = ivars->auto_actions;
 
 
     // Prepare a MatchDoc-in-waiting.
-    VArray *values = self->need_values ? VA_new(num_rules) : NULL;
-    float   score  = self->need_score  ? F32_NEGINF : F32_NAN;
-    self->bumped = MatchDoc_new(INT32_MAX, score, values);
+    VArray *values = ivars->need_values ? VA_new(num_rules) : NULL;
+    float   score  = ivars->need_score  ? F32_NEGINF : F32_NAN;
+    ivars->bumped = MatchDoc_new(INT32_MAX, score, values);
     DECREF(values);
 
     return self;
@@ -163,13 +164,14 @@ SortColl_init(SortCollector *self, Schema *schema, SortSpec *sort_spec,
 
 void
 SortColl_destroy(SortCollector *self) {
-    DECREF(self->hit_q);
-    DECREF(self->rules);
-    DECREF(self->bumped);
-    FREEMEM(self->sort_caches);
-    FREEMEM(self->ord_arrays);
-    FREEMEM(self->auto_actions);
-    FREEMEM(self->derived_actions);
+    SortCollectorIVARS *const ivars = SortColl_IVARS(self);
+    DECREF(ivars->hit_q);
+    DECREF(ivars->rules);
+    DECREF(ivars->bumped);
+    FREEMEM(ivars->sort_caches);
+    FREEMEM(ivars->ord_arrays);
+    FREEMEM(ivars->auto_actions);
+    FREEMEM(ivars->derived_actions);
     SUPER_DESTROY(self, SORTCOLLECTOR);
 }
 
@@ -221,69 +223,75 @@ S_derive_action(SortRule *rule, SortCache *cache) {
 
 void
 SortColl_set_reader(SortCollector *self, SegReader *reader) {
+    SortCollectorIVARS *const ivars = SortColl_IVARS(self);
     SortReader *sort_reader
         = (SortReader*)SegReader_Fetch(reader, VTable_Get_Name(SORTREADER));
 
     // Reset threshold variables and trigger auto-action behavior.
-    self->bumped->doc_id = INT32_MAX;
-    self->bubble_doc     = INT32_MAX;
-    self->bumped->score  = self->need_score ? F32_NEGINF : F32_NAN;
-    self->bubble_score   = self->need_score ? F32_NEGINF : F32_NAN;
-    self->actions        = self->auto_actions;
+    MatchDocIVARS *const bumped_ivars = MatchDoc_IVARS(ivars->bumped);
+    bumped_ivars->doc_id = INT32_MAX;
+    ivars->bubble_doc    = INT32_MAX;
+    bumped_ivars->score  = ivars->need_score ? F32_NEGINF : F32_NAN;
+    ivars->bubble_score  = ivars->need_score ? F32_NEGINF : F32_NAN;
+    ivars->actions       = ivars->auto_actions;
 
     // Obtain sort caches. Derive actions array for this segment.
-    if (self->need_values && sort_reader) {
-        for (uint32_t i = 0, max = self->num_rules; i < max; i++) {
-            SortRule  *rule  = (SortRule*)VA_Fetch(self->rules, i);
+    if (ivars->need_values && sort_reader) {
+        for (uint32_t i = 0, max = ivars->num_rules; i < max; i++) {
+            SortRule  *rule  = (SortRule*)VA_Fetch(ivars->rules, i);
             CharBuf   *field = SortRule_Get_Field(rule);
             SortCache *cache = field
                                ? SortReader_Fetch_Sort_Cache(sort_reader, field)
                                : NULL;
-            self->sort_caches[i] = cache;
-            self->derived_actions[i] = S_derive_action(rule, cache);
-            if (cache) { self->ord_arrays[i] = SortCache_Get_Ords(cache); }
-            else       { self->ord_arrays[i] = NULL; }
+            ivars->sort_caches[i] = cache;
+            ivars->derived_actions[i] = S_derive_action(rule, cache);
+            if (cache) { ivars->ord_arrays[i] = SortCache_Get_Ords(cache); }
+            else       { ivars->ord_arrays[i] = NULL; }
         }
     }
-    self->seg_doc_max = reader ? SegReader_Doc_Max(reader) : 0;
+    ivars->seg_doc_max = reader ? SegReader_Doc_Max(reader) : 0;
     Coll_set_reader((Collector*)self, reader);
 }
 
 VArray*
 SortColl_pop_match_docs(SortCollector *self) {
-    return HitQ_Pop_All(self->hit_q);
+    SortCollectorIVARS *const ivars = SortColl_IVARS(self);
+    return HitQ_Pop_All(ivars->hit_q);
 }
 
 uint32_t
 SortColl_get_total_hits(SortCollector *self) {
-    return self->total_hits;
+    return SortColl_IVARS(self)->total_hits;
 }
 
 bool
 SortColl_need_score(SortCollector *self) {
-    return self->need_score;
+    return SortColl_IVARS(self)->need_score;
 }
 
 void
 SortColl_collect(SortCollector *self, int32_t doc_id) {
+    SortCollectorIVARS *const ivars = SortColl_IVARS(self);
+
     // Add to the total number of hits.
-    self->total_hits++;
+    ivars->total_hits++;
 
     // Collect this hit if it's competitive.
-    if (SI_competitive(self, doc_id)) {
-        MatchDoc *const match_doc = self->bumped;
-        match_doc->doc_id = doc_id + self->base;
+    if (SI_competitive(ivars, doc_id)) {
+        MatchDoc *const match_doc = ivars->bumped;
+        MatchDocIVARS *const match_doc_ivars = MatchDoc_IVARS(match_doc);
+        match_doc_ivars->doc_id = doc_id + ivars->base;
 
-        if (self->need_score && match_doc->score == F32_NEGINF) {
-            match_doc->score = Matcher_Score(self->matcher);
+        if (ivars->need_score && match_doc_ivars->score == F32_NEGINF) {
+            match_doc_ivars->score = Matcher_Score(ivars->matcher);
         }
 
         // Fetch values so that cross-segment sorting can work.
-        if (self->need_values) {
-            VArray *values = match_doc->values;
+        if (ivars->need_values) {
+            VArray *values = match_doc_ivars->values;
 
-            for (uint32_t i = 0, max = self->num_rules; i < max; i++) {
-                SortCache *cache   = self->sort_caches[i];
+            for (uint32_t i = 0, max = ivars->num_rules; i < max; i++) {
+                SortCache *cache   = ivars->sort_caches[i];
                 Obj       *old_val = (Obj*)VA_Delete(values, i);
                 if (cache) {
                     int32_t ord = SortCache_Ordinal(cache, doc_id);
@@ -298,29 +306,31 @@ SortColl_collect(SortCollector *self, int32_t doc_id) {
         }
 
         // Insert the new MatchDoc.
-        self->bumped = (MatchDoc*)HitQ_Jostle(self->hit_q, (Obj*)match_doc);
+        ivars->bumped = (MatchDoc*)HitQ_Jostle(ivars->hit_q, (Obj*)match_doc);
 
-        if (self->bumped) {
-            if (self->bumped == match_doc) {
+        if (ivars->bumped) {
+            if (ivars->bumped == match_doc) {
                 /* The queue is full, and we have established a threshold for
                  * this segment as to what sort of document is definitely not
                  * acceptable.  Turn off AUTO_ACCEPT and start actually
                  * testing whether hits are competitive. */
-                self->bubble_score  = match_doc->score;
-                self->bubble_doc    = doc_id;
-                self->actions       = self->derived_actions;
+                ivars->bubble_score  = match_doc_ivars->score;
+                ivars->bubble_doc    = doc_id;
+                ivars->actions       = ivars->derived_actions;
             }
 
             // Recycle.
-            self->bumped->score = self->need_score ? F32_NEGINF : F32_NAN;
+            MatchDoc_IVARS(ivars->bumped)->score = ivars->need_score
+                                                   ? F32_NEGINF
+                                                   : F32_NAN;
         }
         else {
             // The queue isn't full yet, so create a fresh MatchDoc.
-            VArray *values = self->need_values
-                             ? VA_new(self->num_rules)
+            VArray *values = ivars->need_values
+                             ? VA_new(ivars->num_rules)
                              : NULL;
-            float fake_score = self->need_score ? F32_NEGINF : F32_NAN;
-            self->bumped = MatchDoc_new(INT32_MAX, fake_score, values);
+            float fake_score = ivars->need_score ? F32_NEGINF : F32_NAN;
+            ivars->bumped = MatchDoc_new(INT32_MAX, fake_score, values);
             DECREF(values);
         }
 
@@ -328,36 +338,41 @@ SortColl_collect(SortCollector *self, int32_t doc_id) {
 }
 
 static INLINE int32_t
-SI_compare_by_ord1(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
-    void *const ords = self->ord_arrays[tick];
+SI_compare_by_ord1(SortCollectorIVARS *ivars, uint32_t tick,
+                   int32_t a, int32_t b) {
+    void *const ords = ivars->ord_arrays[tick];
     int32_t a_ord = NumUtil_u1get(ords, a);
     int32_t b_ord = NumUtil_u1get(ords, b);
     return a_ord - b_ord;
 }
 static INLINE int32_t
-SI_compare_by_ord2(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
-    void *const ords = self->ord_arrays[tick];
+SI_compare_by_ord2(SortCollectorIVARS *ivars, uint32_t tick,
+                   int32_t a, int32_t b) {
+    void *const ords = ivars->ord_arrays[tick];
     int32_t a_ord = NumUtil_u2get(ords, a);
     int32_t b_ord = NumUtil_u2get(ords, b);
     return a_ord - b_ord;
 }
 static INLINE int32_t
-SI_compare_by_ord4(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
-    void *const ords = self->ord_arrays[tick];
+SI_compare_by_ord4(SortCollectorIVARS *ivars, uint32_t tick,
+                   int32_t a, int32_t b) {
+    void *const ords = ivars->ord_arrays[tick];
     int32_t a_ord = NumUtil_u4get(ords, a);
     int32_t b_ord = NumUtil_u4get(ords, b);
     return a_ord - b_ord;
 }
 static INLINE int32_t
-SI_compare_by_ord8(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
-    uint8_t *ords = (uint8_t*)self->ord_arrays[tick];
+SI_compare_by_ord8(SortCollectorIVARS *ivars, uint32_t tick,
+                   int32_t a, int32_t b) {
+    uint8_t *ords = (uint8_t*)ivars->ord_arrays[tick];
     int32_t a_ord = ords[a];
     int32_t b_ord = ords[b];
     return a_ord - b_ord;
 }
 static INLINE int32_t
-SI_compare_by_ord16(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
-    uint8_t *ord_bytes = (uint8_t*)self->ord_arrays[tick];
+SI_compare_by_ord16(SortCollectorIVARS *ivars, uint32_t tick,
+                    int32_t a, int32_t b) {
+    uint8_t *ord_bytes = (uint8_t*)ivars->ord_arrays[tick];
     uint8_t *address_a = ord_bytes + a * sizeof(uint16_t);
     uint8_t *address_b = ord_bytes + b * sizeof(uint16_t);
     int32_t  ord_a = NumUtil_decode_bigend_u16(address_a);
@@ -365,8 +380,9 @@ SI_compare_by_ord16(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
     return ord_a - ord_b;
 }
 static INLINE int32_t
-SI_compare_by_ord32(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
-    uint8_t *ord_bytes = (uint8_t*)self->ord_arrays[tick];
+SI_compare_by_ord32(SortCollectorIVARS *ivars, uint32_t tick,
+                    int32_t a, int32_t b) {
+    uint8_t *ord_bytes = (uint8_t*)ivars->ord_arrays[tick];
     uint8_t *address_a = ord_bytes + a * sizeof(uint32_t);
     uint8_t *address_b = ord_bytes + b * sizeof(uint32_t);
     int32_t  ord_a = NumUtil_decode_bigend_u32(address_a);
@@ -374,42 +390,42 @@ SI_compare_by_ord32(SortCollector *self, uint32_t tick, int32_t a, int32_t b) {
     return ord_a - ord_b;
 }
 static INLINE int32_t
-SI_compare_by_native_ord16(SortCollector *self, uint32_t tick,
+SI_compare_by_native_ord16(SortCollectorIVARS *ivars, uint32_t tick,
                            int32_t a, int32_t b) {
-    uint16_t *ords = (uint16_t*)self->ord_arrays[tick];
+    uint16_t *ords = (uint16_t*)ivars->ord_arrays[tick];
     int32_t a_ord = ords[a];
     int32_t b_ord = ords[b];
     return a_ord - b_ord;
 }
 static INLINE int32_t
-SI_compare_by_native_ord32(SortCollector *self, uint32_t tick,
+SI_compare_by_native_ord32(SortCollectorIVARS *ivars, uint32_t tick,
                            int32_t a, int32_t b) {
-    int32_t *ords = (int32_t*)self->ord_arrays[tick];
+    int32_t *ords = (int32_t*)ivars->ord_arrays[tick];
     return ords[a] - ords[b];
 }
 
 // Bounds checking for doc id against the segment doc_max.  We assume that any
 // sort cache ord arrays can accomodate lookups up to this number.
 static INLINE int32_t
-SI_validate_doc_id(SortCollector *self, int32_t doc_id) {
+SI_validate_doc_id(SortCollectorIVARS *ivars, int32_t doc_id) {
     // Check as uint32_t since we're using these doc ids as array indexes.
-    if ((uint32_t)doc_id > (uint32_t)self->seg_doc_max) {
+    if ((uint32_t)doc_id > (uint32_t)ivars->seg_doc_max) {
         THROW(ERR, "Doc ID %i32 greater than doc max %i32", doc_id,
-              self->seg_doc_max);
+              ivars->seg_doc_max);
     }
     return doc_id;
 }
 
 static INLINE bool
-SI_competitive(SortCollector *self, int32_t doc_id) {
+SI_competitive(SortCollectorIVARS *ivars, int32_t doc_id) {
     /* Ordinarily, we would cache local copies of more member variables in
      * const automatic variables in order to improve code clarity and provide
      * more hints to the compiler about what variables are actually invariant
      * for the duration of this routine:
      *
-     *     uint8_t *const actions    = self->actions;
-     *     const uint32_t num_rules  = self->num_rules;
-     *     const int32_t bubble_doc = self->bubble_doc;
+     *     uint8_t *const actions    = ivars->actions;
+     *     const uint32_t num_rules  = ivars->num_rules;
+     *     const int32_t bubble_doc = ivars->bubble_doc;
      *
      * However, our major goal is to return as quickly as possible, and the
      * common case is that we'll have our answer before the first loop iter
@@ -420,7 +436,7 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
      * loop instead of a "for" loop, and the switch statement optimized for
      * compilation to a jump table.
      */
-    uint8_t *const actions = self->actions;
+    uint8_t *const actions = ivars->actions;
     uint32_t i = 0;
 
     // Iterate through our array of actions, returning as quickly as possible.
@@ -433,46 +449,46 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case AUTO_TIE:
                 break;
             case COMPARE_BY_SCORE: {
-                    float score = Matcher_Score(self->matcher);
-                    if (*(int32_t*)&score == *(int32_t*)&self->bubble_score) {
+                    float score = Matcher_Score(ivars->matcher);
+                    if (*(int32_t*)&score == *(int32_t*)&ivars->bubble_score) {
                         break;
                     }
-                    if (score > self->bubble_score) {
-                        self->bumped->score = score;
+                    if (score > ivars->bubble_score) {
+                        MatchDoc_IVARS(ivars->bumped)->score = score;
                         return true;
                     }
-                    else if (score < self->bubble_score) {
+                    else if (score < ivars->bubble_score) {
                         return false;
                     }
                 }
                 break;
             case COMPARE_BY_SCORE_REV: {
-                    float score = Matcher_Score(self->matcher);
-                    if (*(int32_t*)&score == *(int32_t*)&self->bubble_score) {
+                    float score = Matcher_Score(ivars->matcher);
+                    if (*(int32_t*)&score == *(int32_t*)&ivars->bubble_score) {
                         break;
                     }
-                    if (score < self->bubble_score) {
-                        self->bumped->score = score;
+                    if (score < ivars->bubble_score) {
+                        MatchDoc_IVARS(ivars->bumped)->score = score;
                         return true;
                     }
-                    else if (score > self->bubble_score) {
+                    else if (score > ivars->bubble_score) {
                         return false;
                     }
                 }
                 break;
             case COMPARE_BY_DOC_ID:
-                if (doc_id > self->bubble_doc)      { return false; }
-                else if (doc_id < self->bubble_doc) { return true; }
+                if (doc_id > ivars->bubble_doc)      { return false; }
+                else if (doc_id < ivars->bubble_doc) { return true; }
                 break;
             case COMPARE_BY_DOC_ID_REV:
-                if (doc_id > self->bubble_doc)      { return true; }
-                else if (doc_id < self->bubble_doc) { return false; }
+                if (doc_id > ivars->bubble_doc)      { return true; }
+                else if (doc_id < ivars->bubble_doc) { return false; }
                 break;
             case COMPARE_BY_ORD1: {
                     int32_t comparison
                         = SI_compare_by_ord1(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -480,8 +496,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD1_REV: {
                     int32_t comparison
                         = SI_compare_by_ord1(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -489,8 +505,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD2: {
                     int32_t comparison
                         = SI_compare_by_ord2(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -498,8 +514,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD2_REV: {
                     int32_t comparison
                         = SI_compare_by_ord2(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -507,8 +523,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD4: {
                     int32_t comparison
                         = SI_compare_by_ord4(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -516,8 +532,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD4_REV: {
                     int32_t comparison
                         = SI_compare_by_ord4(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -525,8 +541,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD8: {
                     int32_t comparison
                         = SI_compare_by_ord8(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -534,8 +550,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD8_REV: {
                     int32_t comparison
                         = SI_compare_by_ord8(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -543,8 +559,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD16: {
                     int32_t comparison
                         = SI_compare_by_ord16(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -552,8 +568,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD16_REV: {
                     int32_t comparison
                         = SI_compare_by_ord16(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -561,8 +577,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD32: {
                     int32_t comparison
                         = SI_compare_by_ord32(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -570,8 +586,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_ORD32_REV: {
                     int32_t comparison
                         = SI_compare_by_ord32(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -579,8 +595,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_NATIVE_ORD16: {
                     int32_t comparison
                         = SI_compare_by_native_ord16(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -588,8 +604,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_NATIVE_ORD16_REV: {
                     int32_t comparison
                         = SI_compare_by_native_ord16(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -597,8 +613,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_NATIVE_ORD32: {
                     int32_t comparison
                         = SI_compare_by_native_ord32(
-                              self, i, SI_validate_doc_id(self, doc_id),
-                              self->bubble_doc);
+                              ivars, i, SI_validate_doc_id(ivars, doc_id),
+                              ivars->bubble_doc);
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -606,8 +622,8 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             case COMPARE_BY_NATIVE_ORD32_REV: {
                     int32_t comparison
                         = SI_compare_by_native_ord32(
-                              self, i, self->bubble_doc,
-                              SI_validate_doc_id(self, doc_id));
+                              ivars, i, ivars->bubble_doc,
+                              SI_validate_doc_id(ivars, doc_id));
                     if (comparison < 0)      { return true; }
                     else if (comparison > 0) { return false; }
                 }
@@ -615,7 +631,7 @@ SI_competitive(SortCollector *self, int32_t doc_id) {
             default:
                 THROW(ERR, "UNEXPECTED action %u8", actions[i]);
         }
-    } while (++i < self->num_actions);
+    } while (++i < ivars->num_actions);
 
     // If we've made it this far and we're still tied, reject the doc so that
     // we prefer items already in the queue.  This has the effect of

http://git-wip-us.apache.org/repos/asf/lucy/blob/f19a6af0/core/Lucy/Search/QueryParser/ParserElem.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Search/QueryParser/ParserElem.c b/core/Lucy/Search/QueryParser/ParserElem.c
index 2bedc2d..a0c4e8c 100644
--- a/core/Lucy/Search/QueryParser/ParserElem.c
+++ b/core/Lucy/Search/QueryParser/ParserElem.c
@@ -27,93 +27,100 @@ ParserElem_new(uint32_t type, Obj *value) {
 
 ParserElem*
 ParserElem_init(ParserElem *self, uint32_t type, Obj *value) {
-    self->type  = type;
-    self->value = value;
-    self->occur = LUCY_QPARSER_SHOULD;
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
+    ivars->type  = type;
+    ivars->value = value;
+    ivars->occur = LUCY_QPARSER_SHOULD;
     return self;
 }
 
 void
 ParserElem_destroy(ParserElem *self) {
-    DECREF(self->value);
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
+    DECREF(ivars->value);
     SUPER_DESTROY(self, PARSERELEM);
 }
 
 void
 ParserElem_set_value(ParserElem *self, Obj *value) {
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
     INCREF(value);
-    DECREF(self->value);
-    self->value = value;
+    DECREF(ivars->value);
+    ivars->value = value;
 }
 
 Obj*
 ParserElem_as(ParserElem *self, VTable *metaclass) {
-    if (self->value && Obj_Is_A(self->value, metaclass)) {
-        return self->value;
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
+    if (ivars->value && Obj_Is_A(ivars->value, metaclass)) {
+        return ivars->value;
     }
     return NULL;
 }
 
 uint32_t
 ParserElem_get_type(ParserElem *self) {
-    return self->type;
+    return ParserElem_IVARS(self)->type;
 }
 
 void
 ParserElem_require(ParserElem *self) {
-    switch (self->occur) {
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
+    switch (ivars->occur) {
         case LUCY_QPARSER_SHOULD:
-            self->occur = LUCY_QPARSER_MUST;
+            ivars->occur = LUCY_QPARSER_MUST;
             break;
         case LUCY_QPARSER_MUST_NOT:
         case LUCY_QPARSER_MUST:
             break;
         default:
-            THROW(ERR, "Internal error in value of occur: %u32", self->occur);
+            THROW(ERR, "Internal error in value of occur: %u32", ivars->occur);
     }
 }
 
 void
 ParserElem_unrequire(ParserElem *self) {
-    switch (self->occur) {
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
+    switch (ivars->occur) {
         case LUCY_QPARSER_MUST:
-            self->occur = LUCY_QPARSER_SHOULD;
+            ivars->occur = LUCY_QPARSER_SHOULD;
             break;
         case LUCY_QPARSER_MUST_NOT:
         case LUCY_QPARSER_SHOULD:
             break;
         default:
-            THROW(ERR, "Internal error in value of occur: %u32", self->occur);
+            THROW(ERR, "Internal error in value of occur: %u32", ivars->occur);
     }
 }
 
 void
 ParserElem_negate(ParserElem *self) {
-    switch (self->occur) {
+    ParserElemIVARS *const ivars = ParserElem_IVARS(self);
+    switch (ivars->occur) {
         case LUCY_QPARSER_SHOULD:
         case LUCY_QPARSER_MUST:
-            self->occur = LUCY_QPARSER_MUST_NOT;
+            ivars->occur = LUCY_QPARSER_MUST_NOT;
             break;
         case LUCY_QPARSER_MUST_NOT:
-            self->occur = LUCY_QPARSER_MUST; // Apply double negative.
+            ivars->occur = LUCY_QPARSER_MUST; // Apply double negative.
             break;
         default:
-            THROW(ERR, "Internal error in value of occur: %u32", self->occur);
+            THROW(ERR, "Internal error in value of occur: %u32", ivars->occur);
     }
 }
 
 bool
 ParserElem_optional(ParserElem *self) {
-    return self->occur == LUCY_QPARSER_SHOULD;
+    return ParserElem_IVARS(self)->occur == LUCY_QPARSER_SHOULD;
 }
 
 bool
 ParserElem_required(ParserElem *self) {
-    return self->occur == LUCY_QPARSER_MUST;
+    return ParserElem_IVARS(self)->occur == LUCY_QPARSER_MUST;
 }
 
 bool
 ParserElem_negated(ParserElem *self) {
-    return self->occur == LUCY_QPARSER_MUST_NOT;
+    return ParserElem_IVARS(self)->occur == LUCY_QPARSER_MUST_NOT;
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/f19a6af0/core/Lucy/Search/QueryParser/QueryLexer.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Search/QueryParser/QueryLexer.c b/core/Lucy/Search/QueryParser/QueryLexer.c
index 851380c..847a69f 100644
--- a/core/Lucy/Search/QueryParser/QueryLexer.c
+++ b/core/Lucy/Search/QueryParser/QueryLexer.c
@@ -55,22 +55,24 @@ QueryLexer_new() {
 
 QueryLexer*
 QueryLexer_init(QueryLexer *self) {
-    self->heed_colons = false;
+    QueryLexerIVARS *const ivars = QueryLexer_IVARS(self);
+    ivars->heed_colons = false;
     return self;
 }
 
 bool
 QueryLexer_heed_colons(QueryLexer *self) {
-    return self->heed_colons;
+    return QueryLexer_IVARS(self)->heed_colons;
 }
 
 void
 QueryLexer_set_heed_colons(QueryLexer *self, bool heed_colons) {
-    self->heed_colons = heed_colons;
+    QueryLexer_IVARS(self)->heed_colons = heed_colons;
 }
 
 VArray*
 QueryLexer_tokenize(QueryLexer *self, const CharBuf *query_string) {
+    QueryLexerIVARS *const ivars = QueryLexer_IVARS(self);
     CharBuf *copy = query_string
                     ? CB_Clone(query_string)
                     : CB_new_from_trusted_utf8("", 0);
@@ -86,7 +88,7 @@ QueryLexer_tokenize(QueryLexer *self, const CharBuf *query_string) {
             continue;
         }
 
-        if (self->heed_colons) {
+        if (ivars->heed_colons) {
             ParserElem *elem = S_consume_field(qstring);
             if (elem) {
                 VA_Push(elems, (Obj*)elem);


[lucy-commits] [4/9] git commit: refs/heads/ivars-wip1 - Migrate Lucy's index classes to IVARS.

Posted by ma...@apache.org.
Migrate Lucy's index classes to IVARS.

Change all of Lucy's index classes to access instance vars via an IVARS struct
rather than via `self`.


Project: http://git-wip-us.apache.org/repos/asf/lucy/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucy/commit/7c23ce21
Tree: http://git-wip-us.apache.org/repos/asf/lucy/tree/7c23ce21
Diff: http://git-wip-us.apache.org/repos/asf/lucy/diff/7c23ce21

Branch: refs/heads/ivars-wip1
Commit: 7c23ce21e7901ed0b110d62b3e624da68dc72d51
Parents: 445409a
Author: Marvin Humphrey <ma...@rectangular.com>
Authored: Sat Jun 29 17:49:23 2013 -0700
Committer: Marvin Humphrey <ma...@rectangular.com>
Committed: Sun Jun 30 17:35:36 2013 -0700

----------------------------------------------------------------------
 core/Lucy/Index/BackgroundMerger.c           | 227 ++++++------
 core/Lucy/Index/BitVecDelDocs.c              |  26 +-
 core/Lucy/Index/DataReader.c                 |  38 +-
 core/Lucy/Index/DataWriter.c                 |  32 +-
 core/Lucy/Index/DeletionsReader.c            |  67 ++--
 core/Lucy/Index/DeletionsWriter.c            | 112 +++---
 core/Lucy/Index/DocReader.c                  |  69 ++--
 core/Lucy/Index/DocVector.c                  |  36 +-
 core/Lucy/Index/DocWriter.c                  |  43 ++-
 core/Lucy/Index/FilePurger.c                 |  47 +--
 core/Lucy/Index/HighlightReader.c            |  73 ++--
 core/Lucy/Index/HighlightWriter.c            |  43 ++-
 core/Lucy/Index/IndexManager.c               |  97 +++---
 core/Lucy/Index/IndexReader.c                |  51 +--
 core/Lucy/Index/Indexer.c                    | 225 ++++++------
 core/Lucy/Index/Inverter.c                   | 181 +++++-----
 core/Lucy/Index/LexIndex.c                   |  80 +++--
 core/Lucy/Index/Lexicon.c                    |   8 +-
 core/Lucy/Index/LexiconReader.c              |  52 +--
 core/Lucy/Index/LexiconWriter.c              | 190 +++++-----
 core/Lucy/Index/PolyLexicon.c                |  62 ++--
 core/Lucy/Index/PolyReader.c                 | 111 +++---
 core/Lucy/Index/Posting.c                    |  10 +-
 core/Lucy/Index/PostingListReader.c          |  20 +-
 core/Lucy/Index/PostingListWriter.c          | 106 +++---
 core/Lucy/Index/PostingPool.c                | 366 ++++++++++----------
 core/Lucy/Index/RawLexicon.c                 |  35 +-
 core/Lucy/Index/RawPostingList.c             |  25 +-
 core/Lucy/Index/SegLexicon.c                 |  87 +++--
 core/Lucy/Index/SegPostingList.c             | 153 +++++----
 core/Lucy/Index/SegReader.c                  |  30 +-
 core/Lucy/Index/SegWriter.c                  |  90 +++--
 core/Lucy/Index/Segment.c                    | 106 +++---
 core/Lucy/Index/Similarity.c                 |  17 +-
 core/Lucy/Index/SkipStepper.c                |  23 +-
 core/Lucy/Index/Snapshot.c                   |  58 ++--
 core/Lucy/Index/SortCache.c                  |  71 ++--
 core/Lucy/Index/SortCache/NumericSortCache.c |  60 ++--
 core/Lucy/Index/SortCache/TextSortCache.c    |  49 +--
 core/Lucy/Index/SortFieldWriter.c            | 401 +++++++++++-----------
 core/Lucy/Index/SortReader.c                 |  94 ++---
 core/Lucy/Index/SortWriter.c                 | 124 +++----
 core/Lucy/Index/TermInfo.c                   |  61 ++--
 core/Lucy/Index/TermStepper.c                |  18 +-
 core/Lucy/Index/TermVector.c                 |  83 ++---
 core/Lucy/Index/ZombieKeyedHash.c            |  23 +-
 46 files changed, 2167 insertions(+), 1813 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/BackgroundMerger.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/BackgroundMerger.c b/core/Lucy/Index/BackgroundMerger.c
index db80f68..a32f215 100644
--- a/core/Lucy/Index/BackgroundMerger.c
+++ b/core/Lucy/Index/BackgroundMerger.c
@@ -65,86 +65,87 @@ BGMerger_new(Obj *index, IndexManager *manager) {
 
 BackgroundMerger*
 BGMerger_init(BackgroundMerger *self, Obj *index, IndexManager *manager) {
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
     Folder *folder = S_init_folder(index);
 
     // Init.
-    self->optimize      = false;
-    self->prepared      = false;
-    self->needs_commit  = false;
-    self->snapfile      = NULL;
-    self->doc_maps      = Hash_new(0);
+    ivars->optimize      = false;
+    ivars->prepared      = false;
+    ivars->needs_commit  = false;
+    ivars->snapfile      = NULL;
+    ivars->doc_maps      = Hash_new(0);
 
     // Assign.
-    self->folder = folder;
+    ivars->folder = folder;
     if (manager) {
-        self->manager = (IndexManager*)INCREF(manager);
+        ivars->manager = (IndexManager*)INCREF(manager);
     }
     else {
-        self->manager = IxManager_new(NULL, NULL);
-        IxManager_Set_Write_Lock_Timeout(self->manager, 10000);
+        ivars->manager = IxManager_new(NULL, NULL);
+        IxManager_Set_Write_Lock_Timeout(ivars->manager, 10000);
     }
-    IxManager_Set_Folder(self->manager, folder);
+    IxManager_Set_Folder(ivars->manager, folder);
 
     // Obtain write lock (which we'll only hold briefly), then merge lock.
     S_obtain_write_lock(self);
-    if (!self->write_lock) {
+    if (!ivars->write_lock) {
         DECREF(self);
         RETHROW(INCREF(Err_get_error()));
     }
     S_obtain_merge_lock(self);
-    if (!self->merge_lock) {
+    if (!ivars->merge_lock) {
         DECREF(self);
         RETHROW(INCREF(Err_get_error()));
     }
 
     // Find the latest snapshot.  If there's no index content, bail early.
-    self->snapshot = Snapshot_Read_File(Snapshot_new(), folder, NULL);
-    if (!Snapshot_Get_Path(self->snapshot)) {
+    ivars->snapshot = Snapshot_Read_File(Snapshot_new(), folder, NULL);
+    if (!Snapshot_Get_Path(ivars->snapshot)) {
         S_release_write_lock(self);
         S_release_merge_lock(self);
         return self;
     }
 
     // Create FilePurger. Zap detritus from previous sessions.
-    self->file_purger = FilePurger_new(folder, self->snapshot, self->manager);
-    FilePurger_Purge(self->file_purger);
+    ivars->file_purger = FilePurger_new(folder, ivars->snapshot, ivars->manager);
+    FilePurger_Purge(ivars->file_purger);
 
     // Open a PolyReader, passing in the IndexManager so we get a read lock on
     // the Snapshot's files -- so that Indexers don't zap our files while
     // we're operating in the background.
-    self->polyreader = PolyReader_open((Obj*)folder, NULL, self->manager);
+    ivars->polyreader = PolyReader_open((Obj*)folder, NULL, ivars->manager);
 
     // Clone the PolyReader's schema.
-    Hash *dump = Schema_Dump(PolyReader_Get_Schema(self->polyreader));
-    self->schema = (Schema*)CERTIFY(VTable_Load_Obj(SCHEMA, (Obj*)dump),
+    Hash *dump = Schema_Dump(PolyReader_Get_Schema(ivars->polyreader));
+    ivars->schema = (Schema*)CERTIFY(VTable_Load_Obj(SCHEMA, (Obj*)dump),
                                     SCHEMA);
     DECREF(dump);
 
     // Create new Segment.
     int64_t new_seg_num
-        = IxManager_Highest_Seg_Num(self->manager, self->snapshot) + 1;
-    VArray *fields = Schema_All_Fields(self->schema);
-    self->segment = Seg_new(new_seg_num);
+        = IxManager_Highest_Seg_Num(ivars->manager, ivars->snapshot) + 1;
+    VArray *fields = Schema_All_Fields(ivars->schema);
+    ivars->segment = Seg_new(new_seg_num);
     for (uint32_t i = 0, max = VA_Get_Size(fields); i < max; i++) {
-        Seg_Add_Field(self->segment, (CharBuf*)VA_Fetch(fields, i));
+        Seg_Add_Field(ivars->segment, (CharBuf*)VA_Fetch(fields, i));
     }
     DECREF(fields);
 
     // Our "cutoff" is the segment this BackgroundMerger will write.  Now that
     // we've determined the cutoff, write the merge data file.
-    self->cutoff = Seg_Get_Number(self->segment);
-    IxManager_Write_Merge_Data(self->manager, self->cutoff);
+    ivars->cutoff = Seg_Get_Number(ivars->segment);
+    IxManager_Write_Merge_Data(ivars->manager, ivars->cutoff);
 
     /* Create the SegWriter but hold off on preparing the new segment
      * directory -- because if we don't need to merge any segments we don't
      * need it.  (We've reserved the dir by plopping down the merge.json
      * file.) */
-    self->seg_writer = SegWriter_new(self->schema, self->snapshot,
-                                     self->segment, self->polyreader);
+    ivars->seg_writer = SegWriter_new(ivars->schema, ivars->snapshot,
+                                      ivars->segment, ivars->polyreader);
 
     // Grab a local ref to the DeletionsWriter.
-    self->del_writer
-        = (DeletionsWriter*)INCREF(SegWriter_Get_Del_Writer(self->seg_writer));
+    ivars->del_writer
+        = (DeletionsWriter*)INCREF(SegWriter_Get_Del_Writer(ivars->seg_writer));
 
     // Release the write lock.  Now new Indexers can start while we work in
     // the background.
@@ -155,20 +156,21 @@ BGMerger_init(BackgroundMerger *self, Obj *index, IndexManager *manager) {
 
 void
 BGMerger_destroy(BackgroundMerger *self) {
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
     S_release_merge_lock(self);
     S_release_write_lock(self);
-    DECREF(self->schema);
-    DECREF(self->folder);
-    DECREF(self->segment);
-    DECREF(self->manager);
-    DECREF(self->polyreader);
-    DECREF(self->del_writer);
-    DECREF(self->snapshot);
-    DECREF(self->seg_writer);
-    DECREF(self->file_purger);
-    DECREF(self->write_lock);
-    DECREF(self->snapfile);
-    DECREF(self->doc_maps);
+    DECREF(ivars->schema);
+    DECREF(ivars->folder);
+    DECREF(ivars->segment);
+    DECREF(ivars->manager);
+    DECREF(ivars->polyreader);
+    DECREF(ivars->del_writer);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->seg_writer);
+    DECREF(ivars->file_purger);
+    DECREF(ivars->write_lock);
+    DECREF(ivars->snapfile);
+    DECREF(ivars->doc_maps);
     SUPER_DESTROY(self, BACKGROUNDMERGER);
 }
 
@@ -197,13 +199,14 @@ S_init_folder(Obj *index) {
 
 void
 BGMerger_optimize(BackgroundMerger *self) {
-    self->optimize = true;
+    BGMerger_IVARS(self)->optimize = true;
 }
 
 static uint32_t
 S_maybe_merge(BackgroundMerger *self) {
-    VArray *to_merge = IxManager_Recycle(self->manager, self->polyreader,
-                                         self->del_writer, 0, self->optimize);
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+    VArray *to_merge = IxManager_Recycle(ivars->manager, ivars->polyreader,
+                                         ivars->del_writer, 0, ivars->optimize);
     int32_t num_to_merge = VA_Get_Size(to_merge);
 
     // There's no point in merging one segment if it has no deletions, because
@@ -221,22 +224,22 @@ S_maybe_merge(BackgroundMerger *self) {
     }
 
     // Now that we're sure we're writing a new segment, prep the seg dir.
-    SegWriter_Prep_Seg_Dir(self->seg_writer);
+    SegWriter_Prep_Seg_Dir(ivars->seg_writer);
 
     // Consolidate segments.
     for (uint32_t i = 0, max = num_to_merge; i < max; i++) {
         SegReader *seg_reader = (SegReader*)VA_Fetch(to_merge, i);
         CharBuf   *seg_name   = SegReader_Get_Seg_Name(seg_reader);
-        int64_t    doc_count  = Seg_Get_Count(self->segment);
+        int64_t    doc_count  = Seg_Get_Count(ivars->segment);
         Matcher *deletions
-            = DelWriter_Seg_Deletions(self->del_writer, seg_reader);
+            = DelWriter_Seg_Deletions(ivars->del_writer, seg_reader);
         I32Array *doc_map = DelWriter_Generate_Doc_Map(
-                                self->del_writer, deletions,
+                                ivars->del_writer, deletions,
                                 SegReader_Doc_Max(seg_reader),
                                 (int32_t)doc_count);
 
-        Hash_Store(self->doc_maps, (Obj*)seg_name, (Obj*)doc_map);
-        SegWriter_Merge_Segment(self->seg_writer, seg_reader, doc_map);
+        Hash_Store(ivars->doc_maps, (Obj*)seg_name, (Obj*)doc_map);
+        SegWriter_Merge_Segment(ivars->seg_writer, seg_reader, doc_map);
         DECREF(deletions);
     }
 
@@ -246,14 +249,15 @@ S_maybe_merge(BackgroundMerger *self) {
 
 static bool
 S_merge_updated_deletions(BackgroundMerger *self) {
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
     Hash *updated_deletions = NULL;
 
     PolyReader *new_polyreader
-        = PolyReader_open((Obj*)self->folder, NULL, NULL);
+        = PolyReader_open((Obj*)ivars->folder, NULL, NULL);
     VArray *new_seg_readers
         = PolyReader_Get_Seg_Readers(new_polyreader);
     VArray *old_seg_readers
-        = PolyReader_Get_Seg_Readers(self->polyreader);
+        = PolyReader_Get_Seg_Readers(ivars->polyreader);
     Hash *new_segs = Hash_new(VA_Get_Size(new_seg_readers));
 
     for (uint32_t i = 0, max = VA_Get_Size(new_seg_readers); i < max; i++) {
@@ -267,7 +271,7 @@ S_merge_updated_deletions(BackgroundMerger *self) {
         CharBuf   *seg_name   = SegReader_Get_Seg_Name(seg_reader);
 
         // If this segment was merged away...
-        if (Hash_Fetch(self->doc_maps, (Obj*)seg_name)) {
+        if (Hash_Fetch(ivars->doc_maps, (Obj*)seg_name)) {
             SegReader *new_seg_reader
                 = (SegReader*)CERTIFY(
                       Hash_Fetch(new_segs, (Obj*)seg_name),
@@ -297,18 +301,18 @@ S_merge_updated_deletions(BackgroundMerger *self) {
     }
     else {
         PolyReader *merge_polyreader
-            = PolyReader_open((Obj*)self->folder, self->snapshot, NULL);
+            = PolyReader_open((Obj*)ivars->folder, ivars->snapshot, NULL);
         VArray *merge_seg_readers
             = PolyReader_Get_Seg_Readers(merge_polyreader);
         Snapshot *latest_snapshot
-            = Snapshot_Read_File(Snapshot_new(), self->folder, NULL);
+            = Snapshot_Read_File(Snapshot_new(), ivars->folder, NULL);
         int64_t new_seg_num
-            = IxManager_Highest_Seg_Num(self->manager, latest_snapshot) + 1;
+            = IxManager_Highest_Seg_Num(ivars->manager, latest_snapshot) + 1;
         Segment   *new_segment = Seg_new(new_seg_num);
-        SegWriter *seg_writer  = SegWriter_new(self->schema, self->snapshot,
+        SegWriter *seg_writer  = SegWriter_new(ivars->schema, ivars->snapshot,
                                                new_segment, merge_polyreader);
         DeletionsWriter *del_writer = SegWriter_Get_Del_Writer(seg_writer);
-        int64_t  merge_seg_num = Seg_Get_Number(self->segment);
+        int64_t  merge_seg_num = Seg_Get_Number(ivars->segment);
         uint32_t seg_tick      = INT32_MAX;
         int32_t  offset        = INT32_MAX;
         CharBuf *seg_name      = NULL;
@@ -334,7 +338,7 @@ S_merge_updated_deletions(BackgroundMerger *self) {
               ) {
             I32Array *doc_map
                 = (I32Array*)CERTIFY(
-                      Hash_Fetch(self->doc_maps, (Obj*)seg_name),
+                      Hash_Fetch(ivars->doc_maps, (Obj*)seg_name),
                       I32ARRAY);
             int32_t del;
             while (0 != (del = Matcher_Next(deletions))) {
@@ -365,11 +369,12 @@ S_merge_updated_deletions(BackgroundMerger *self) {
 
 void
 BGMerger_prepare_commit(BackgroundMerger *self) {
-    VArray   *seg_readers     = PolyReader_Get_Seg_Readers(self->polyreader);
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+    VArray   *seg_readers     = PolyReader_Get_Seg_Readers(ivars->polyreader);
     uint32_t  num_seg_readers = VA_Get_Size(seg_readers);
     uint32_t  segs_merged     = 0;
 
-    if (self->prepared) {
+    if (ivars->prepared) {
         THROW(ERR, "Can't call Prepare_Commit() more than once");
     }
 
@@ -379,46 +384,46 @@ BGMerger_prepare_commit(BackgroundMerger *self) {
     }
 
     if (!segs_merged) {
-        // Nothing merged.  Leave self->needs_commit false and bail out.
-        self->prepared = true;
+        // Nothing merged.  Leave `needs_commit` false and bail out.
+        ivars->prepared = true;
         return;
     }
     // Finish the segment and write a new snapshot file.
     else {
-        Folder   *folder   = self->folder;
-        Snapshot *snapshot = self->snapshot;
+        Folder   *folder   = ivars->folder;
+        Snapshot *snapshot = ivars->snapshot;
 
         // Write out new deletions.
-        if (DelWriter_Updated(self->del_writer)) {
+        if (DelWriter_Updated(ivars->del_writer)) {
             // Only write out if they haven't all been applied.
             if (segs_merged != num_seg_readers) {
-                DelWriter_Finish(self->del_writer);
+                DelWriter_Finish(ivars->del_writer);
             }
         }
 
         // Finish the segment.
-        SegWriter_Finish(self->seg_writer);
+        SegWriter_Finish(ivars->seg_writer);
 
         // Grab the write lock.
         S_obtain_write_lock(self);
-        if (!self->write_lock) {
+        if (!ivars->write_lock) {
             RETHROW(INCREF(Err_get_error()));
         }
 
         // Write temporary snapshot file.
-        DECREF(self->snapfile);
-        self->snapfile = IxManager_Make_Snapshot_Filename(self->manager);
-        CB_Cat_Trusted_Str(self->snapfile, ".temp", 5);
-        Folder_Delete(folder, self->snapfile);
-        Snapshot_Write_File(snapshot, folder, self->snapfile);
+        DECREF(ivars->snapfile);
+        ivars->snapfile = IxManager_Make_Snapshot_Filename(ivars->manager);
+        CB_Cat_Trusted_Str(ivars->snapfile, ".temp", 5);
+        Folder_Delete(folder, ivars->snapfile);
+        Snapshot_Write_File(snapshot, folder, ivars->snapfile);
 
         // Determine whether the index has been updated while this background
         // merge process was running.
 
         CharBuf *start_snapfile
-            = Snapshot_Get_Path(PolyReader_Get_Snapshot(self->polyreader));
+            = Snapshot_Get_Path(PolyReader_Get_Snapshot(ivars->polyreader));
         Snapshot *latest_snapshot
-            = Snapshot_Read_File(Snapshot_new(), self->folder, NULL);
+            = Snapshot_Read_File(Snapshot_new(), ivars->folder, NULL);
         CharBuf *latest_snapfile = Snapshot_Get_Path(latest_snapshot);
         bool index_updated
             = !CB_Equals(start_snapfile, (Obj*)latest_snapfile);
@@ -440,56 +445,58 @@ BGMerger_prepare_commit(BackgroundMerger *self) {
                 CharBuf *file = (CharBuf*)VA_Fetch(files, i);
                 if (CB_Starts_With_Str(file, "seg_", 4)) {
                     int64_t gen = (int64_t)IxFileNames_extract_gen(file);
-                    if (gen > self->cutoff) {
-                        Snapshot_Add_Entry(self->snapshot, file);
+                    if (gen > ivars->cutoff) {
+                        Snapshot_Add_Entry(ivars->snapshot, file);
                     }
                 }
             }
             DECREF(files);
 
             // Since the snapshot content has changed, we need to rewrite it.
-            Folder_Delete(folder, self->snapfile);
-            Snapshot_Write_File(snapshot, folder, self->snapfile);
+            Folder_Delete(folder, ivars->snapfile);
+            Snapshot_Write_File(snapshot, folder, ivars->snapfile);
         }
 
         DECREF(latest_snapshot);
 
-        self->needs_commit = true;
+        ivars->needs_commit = true;
     }
 
     // Close reader, so that we can delete its files if appropriate.
-    PolyReader_Close(self->polyreader);
+    PolyReader_Close(ivars->polyreader);
 
-    self->prepared = true;
+    ivars->prepared = true;
 }
 
 void
 BGMerger_commit(BackgroundMerger *self) {
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+
     // Safety check.
-    if (!self->merge_lock) {
+    if (!ivars->merge_lock) {
         THROW(ERR, "Can't call commit() more than once");
     }
 
-    if (!self->prepared) {
+    if (!ivars->prepared) {
         BGMerger_Prepare_Commit(self);
     }
 
-    if (self->needs_commit) {
+    if (ivars->needs_commit) {
         bool success = false;
-        CharBuf *temp_snapfile = CB_Clone(self->snapfile);
+        CharBuf *temp_snapfile = CB_Clone(ivars->snapfile);
 
         // Rename temp snapshot file.
-        CB_Chop(self->snapfile, sizeof(".temp") - 1);
-        success = Folder_Hard_Link(self->folder, temp_snapfile,
-                                   self->snapfile);
-        Snapshot_Set_Path(self->snapshot, self->snapfile);
+        CB_Chop(ivars->snapfile, sizeof(".temp") - 1);
+        success = Folder_Hard_Link(ivars->folder, temp_snapfile,
+                                   ivars->snapfile);
+        Snapshot_Set_Path(ivars->snapshot, ivars->snapfile);
         if (!success) {
             CharBuf *mess = CB_newf("Can't create hard link from %o to %o",
-                                    temp_snapfile, self->snapfile);
+                                    temp_snapfile, ivars->snapfile);
             DECREF(temp_snapfile);
             Err_throw_mess(ERR, mess);
         }
-        if (!Folder_Delete(self->folder, temp_snapfile)) {
+        if (!Folder_Delete(ivars->folder, temp_snapfile)) {
             CharBuf *mess = CB_newf("Can't delete %o", temp_snapfile);
             DECREF(temp_snapfile);
             Err_throw_mess(ERR, mess);
@@ -499,11 +506,11 @@ BGMerger_commit(BackgroundMerger *self) {
 
     // Release the merge lock and remove the merge data file.
     S_release_merge_lock(self);
-    IxManager_Remove_Merge_Data(self->manager);
+    IxManager_Remove_Merge_Data(ivars->manager);
 
-    if (self->needs_commit) {
+    if (ivars->needs_commit) {
         // Purge obsolete files.
-        FilePurger_Purge(self->file_purger);
+        FilePurger_Purge(ivars->file_purger);
     }
 
     // Release the write lock.
@@ -512,11 +519,12 @@ BGMerger_commit(BackgroundMerger *self) {
 
 static void
 S_obtain_write_lock(BackgroundMerger *self) {
-    Lock *write_lock = IxManager_Make_Write_Lock(self->manager);
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+    Lock *write_lock = IxManager_Make_Write_Lock(ivars->manager);
     Lock_Clear_Stale(write_lock);
     if (Lock_Obtain(write_lock)) {
         // Only assign if successful, otherwise DESTROY unlocks -- bad!
-        self->write_lock = write_lock;
+        ivars->write_lock = write_lock;
     }
     else {
         DECREF(write_lock);
@@ -525,11 +533,12 @@ S_obtain_write_lock(BackgroundMerger *self) {
 
 static void
 S_obtain_merge_lock(BackgroundMerger *self) {
-    Lock *merge_lock = IxManager_Make_Merge_Lock(self->manager);
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+    Lock *merge_lock = IxManager_Make_Merge_Lock(ivars->manager);
     Lock_Clear_Stale(merge_lock);
     if (Lock_Obtain(merge_lock)) {
         // Only assign if successful, same rationale as above.
-        self->merge_lock = merge_lock;
+        ivars->merge_lock = merge_lock;
     }
     else {
         // We can't get the merge lock, so it seems there must be another
@@ -540,19 +549,21 @@ S_obtain_merge_lock(BackgroundMerger *self) {
 
 static void
 S_release_write_lock(BackgroundMerger *self) {
-    if (self->write_lock) {
-        Lock_Release(self->write_lock);
-        DECREF(self->write_lock);
-        self->write_lock = NULL;
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+    if (ivars->write_lock) {
+        Lock_Release(ivars->write_lock);
+        DECREF(ivars->write_lock);
+        ivars->write_lock = NULL;
     }
 }
 
 static void
 S_release_merge_lock(BackgroundMerger *self) {
-    if (self->merge_lock) {
-        Lock_Release(self->merge_lock);
-        DECREF(self->merge_lock);
-        self->merge_lock = NULL;
+    BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self);
+    if (ivars->merge_lock) {
+        Lock_Release(ivars->merge_lock);
+        DECREF(ivars->merge_lock);
+        ivars->merge_lock = NULL;
     }
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/BitVecDelDocs.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/BitVecDelDocs.c b/core/Lucy/Index/BitVecDelDocs.c
index 690e6d5..e0dac0e 100644
--- a/core/Lucy/Index/BitVecDelDocs.c
+++ b/core/Lucy/Index/BitVecDelDocs.c
@@ -30,30 +30,30 @@ BitVecDelDocs_new(Folder *folder, const CharBuf *filename) {
 BitVecDelDocs*
 BitVecDelDocs_init(BitVecDelDocs *self, Folder *folder,
                    const CharBuf *filename) {
-    int32_t len;
-
     BitVec_init((BitVector*)self, 0);
-    self->filename = CB_Clone(filename);
-    self->instream = Folder_Open_In(folder, filename);
-    if (!self->instream) {
+    BitVecDelDocsIVARS *const ivars = BitVecDelDocs_IVARS(self);
+    ivars->filename = CB_Clone(filename);
+    ivars->instream = Folder_Open_In(folder, filename);
+    if (!ivars->instream) {
         Err *error = (Err*)INCREF(Err_get_error());
         DECREF(self);
         RETHROW(error);
     }
-    len            = (int32_t)InStream_Length(self->instream);
-    self->bits     = (uint8_t*)InStream_Buf(self->instream, len);
-    self->cap      = (uint32_t)(len * 8);
+    int32_t len    = (int32_t)InStream_Length(ivars->instream);
+    ivars->bits    = (uint8_t*)InStream_Buf(ivars->instream, len);
+    ivars->cap     = (uint32_t)(len * 8);
     return self;
 }
 
 void
 BitVecDelDocs_destroy(BitVecDelDocs *self) {
-    DECREF(self->filename);
-    if (self->instream) {
-        InStream_Close(self->instream);
-        DECREF(self->instream);
+    BitVecDelDocsIVARS *const ivars = BitVecDelDocs_IVARS(self);
+    DECREF(ivars->filename);
+    if (ivars->instream) {
+        InStream_Close(ivars->instream);
+        DECREF(ivars->instream);
     }
-    self->bits = NULL;
+    ivars->bits = NULL;
     SUPER_DESTROY(self, BITVECDELDOCS);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DataReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DataReader.c b/core/Lucy/Index/DataReader.c
index f8df36f..3404dcf 100644
--- a/core/Lucy/Index/DataReader.c
+++ b/core/Lucy/Index/DataReader.c
@@ -26,11 +26,12 @@
 DataReader*
 DataReader_init(DataReader *self, Schema *schema, Folder *folder,
                 Snapshot *snapshot, VArray *segments, int32_t seg_tick) {
-    self->schema   = (Schema*)INCREF(schema);
-    self->folder   = (Folder*)INCREF(folder);
-    self->snapshot = (Snapshot*)INCREF(snapshot);
-    self->segments = (VArray*)INCREF(segments);
-    self->seg_tick = seg_tick;
+    DataReaderIVARS *const ivars = DataReader_IVARS(self);
+    ivars->schema   = (Schema*)INCREF(schema);
+    ivars->folder   = (Folder*)INCREF(folder);
+    ivars->snapshot = (Snapshot*)INCREF(snapshot);
+    ivars->segments = (VArray*)INCREF(segments);
+    ivars->seg_tick = seg_tick;
     if (seg_tick != -1) {
         if (!segments) {
             THROW(ERR, "No segments array provided, but seg_tick is %i32",
@@ -41,11 +42,11 @@ DataReader_init(DataReader *self, Schema *schema, Folder *folder,
             if (!segment) {
                 THROW(ERR, "No segment at seg_tick %i32", seg_tick);
             }
-            self->segment = (Segment*)INCREF(segment);
+            ivars->segment = (Segment*)INCREF(segment);
         }
     }
     else {
-        self->segment = NULL;
+        ivars->segment = NULL;
     }
 
     ABSTRACT_CLASS_CHECK(self, DATAREADER);
@@ -54,42 +55,43 @@ DataReader_init(DataReader *self, Schema *schema, Folder *folder,
 
 void
 DataReader_destroy(DataReader *self) {
-    DECREF(self->schema);
-    DECREF(self->folder);
-    DECREF(self->snapshot);
-    DECREF(self->segments);
-    DECREF(self->segment);
+    DataReaderIVARS *const ivars = DataReader_IVARS(self);
+    DECREF(ivars->schema);
+    DECREF(ivars->folder);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->segments);
+    DECREF(ivars->segment);
     SUPER_DESTROY(self, DATAREADER);
 }
 
 Schema*
 DataReader_get_schema(DataReader *self) {
-    return self->schema;
+    return DataReader_IVARS(self)->schema;
 }
 
 Folder*
 DataReader_get_folder(DataReader *self) {
-    return self->folder;
+    return DataReader_IVARS(self)->folder;
 }
 
 Snapshot*
 DataReader_get_snapshot(DataReader *self) {
-    return self->snapshot;
+    return DataReader_IVARS(self)->snapshot;
 }
 
 VArray*
 DataReader_get_segments(DataReader *self) {
-    return self->segments;
+    return DataReader_IVARS(self)->segments;
 }
 
 int32_t
 DataReader_get_seg_tick(DataReader *self) {
-    return self->seg_tick;
+    return DataReader_IVARS(self)->seg_tick;
 }
 
 Segment*
 DataReader_get_segment(DataReader *self) {
-    return self->segment;
+    return DataReader_IVARS(self)->segment;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DataWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DataWriter.c b/core/Lucy/Index/DataWriter.c
index bddc287..3177ed6 100644
--- a/core/Lucy/Index/DataWriter.c
+++ b/core/Lucy/Index/DataWriter.c
@@ -29,48 +29,50 @@
 DataWriter*
 DataWriter_init(DataWriter *self, Schema *schema, Snapshot *snapshot,
                 Segment *segment, PolyReader *polyreader) {
-    self->snapshot   = (Snapshot*)INCREF(snapshot);
-    self->segment    = (Segment*)INCREF(segment);
-    self->polyreader = (PolyReader*)INCREF(polyreader);
-    self->schema     = (Schema*)INCREF(schema);
-    self->folder     = (Folder*)INCREF(PolyReader_Get_Folder(polyreader));
+    DataWriterIVARS *const ivars = DataWriter_IVARS(self);
+    ivars->snapshot   = (Snapshot*)INCREF(snapshot);
+    ivars->segment    = (Segment*)INCREF(segment);
+    ivars->polyreader = (PolyReader*)INCREF(polyreader);
+    ivars->schema     = (Schema*)INCREF(schema);
+    ivars->folder     = (Folder*)INCREF(PolyReader_Get_Folder(polyreader));
     ABSTRACT_CLASS_CHECK(self, DATAWRITER);
     return self;
 }
 
 void
 DataWriter_destroy(DataWriter *self) {
-    DECREF(self->snapshot);
-    DECREF(self->segment);
-    DECREF(self->polyreader);
-    DECREF(self->schema);
-    DECREF(self->folder);
+    DataWriterIVARS *const ivars = DataWriter_IVARS(self);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->segment);
+    DECREF(ivars->polyreader);
+    DECREF(ivars->schema);
+    DECREF(ivars->folder);
     SUPER_DESTROY(self, DATAWRITER);
 }
 
 Snapshot*
 DataWriter_get_snapshot(DataWriter *self) {
-    return self->snapshot;
+    return DataWriter_IVARS(self)->snapshot;
 }
 
 Segment*
 DataWriter_get_segment(DataWriter *self) {
-    return self->segment;
+    return DataWriter_IVARS(self)->segment;
 }
 
 PolyReader*
 DataWriter_get_polyreader(DataWriter *self) {
-    return self->polyreader;
+    return DataWriter_IVARS(self)->polyreader;
 }
 
 Schema*
 DataWriter_get_schema(DataWriter *self) {
-    return self->schema;
+    return DataWriter_IVARS(self)->schema;
 }
 
 Folder*
 DataWriter_get_folder(DataWriter *self) {
-    return self->folder;
+    return DataWriter_IVARS(self)->folder;
 }
 
 void

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DeletionsReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DeletionsReader.c b/core/Lucy/Index/DeletionsReader.c
index 391bf63..ef45ebd 100644
--- a/core/Lucy/Index/DeletionsReader.c
+++ b/core/Lucy/Index/DeletionsReader.c
@@ -58,54 +58,58 @@ PolyDeletionsReader*
 PolyDelReader_init(PolyDeletionsReader *self, VArray *readers,
                    I32Array *offsets) {
     DelReader_init((DeletionsReader*)self, NULL, NULL, NULL, NULL, -1);
-    self->del_count = 0;
+    PolyDeletionsReaderIVARS *const ivars = PolyDelReader_IVARS(self);
+    ivars->del_count = 0;
     for (uint32_t i = 0, max = VA_Get_Size(readers); i < max; i++) {
         DeletionsReader *reader = (DeletionsReader*)CERTIFY(
                                       VA_Fetch(readers, i), DELETIONSREADER);
-        self->del_count += DelReader_Del_Count(reader);
+        ivars->del_count += DelReader_Del_Count(reader);
     }
-    self->readers = (VArray*)INCREF(readers);
-    self->offsets = (I32Array*)INCREF(offsets);
+    ivars->readers = (VArray*)INCREF(readers);
+    ivars->offsets = (I32Array*)INCREF(offsets);
     return self;
 }
 
 void
 PolyDelReader_close(PolyDeletionsReader *self) {
-    if (self->readers) {
-        for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
+    PolyDeletionsReaderIVARS *const ivars = PolyDelReader_IVARS(self);
+    if (ivars->readers) {
+        for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
             DeletionsReader *reader
-                = (DeletionsReader*)VA_Fetch(self->readers, i);
+                = (DeletionsReader*)VA_Fetch(ivars->readers, i);
             if (reader) { DelReader_Close(reader); }
         }
-        VA_Clear(self->readers);
+        VA_Clear(ivars->readers);
     }
 }
 
 void
 PolyDelReader_destroy(PolyDeletionsReader *self) {
-    DECREF(self->readers);
-    DECREF(self->offsets);
+    PolyDeletionsReaderIVARS *const ivars = PolyDelReader_IVARS(self);
+    DECREF(ivars->readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYDELETIONSREADER);
 }
 
 int32_t
 PolyDelReader_del_count(PolyDeletionsReader *self) {
-    return self->del_count;
+    return PolyDelReader_IVARS(self)->del_count;
 }
 
 Matcher*
 PolyDelReader_iterator(PolyDeletionsReader *self) {
+    PolyDeletionsReaderIVARS *const ivars = PolyDelReader_IVARS(self);
     SeriesMatcher *deletions = NULL;
-    if (self->del_count) {
-        uint32_t num_readers = VA_Get_Size(self->readers);
+    if (ivars->del_count) {
+        uint32_t num_readers = VA_Get_Size(ivars->readers);
         VArray *matchers = VA_new(num_readers);
         for (uint32_t i = 0; i < num_readers; i++) {
             DeletionsReader *reader
-                = (DeletionsReader*)VA_Fetch(self->readers, i);
+                = (DeletionsReader*)VA_Fetch(ivars->readers, i);
             Matcher *matcher = DelReader_Iterator(reader);
             if (matcher) { VA_Store(matchers, i, (Obj*)matcher); }
         }
-        deletions = SeriesMatcher_new(matchers, self->offsets);
+        deletions = SeriesMatcher_new(matchers, ivars->offsets);
         DECREF(matchers);
     }
     return (Matcher*)deletions;
@@ -126,28 +130,32 @@ DefDelReader_init(DefaultDeletionsReader *self, Schema *schema,
                   int32_t seg_tick) {
     DelReader_init((DeletionsReader*)self, schema, folder, snapshot, segments,
                    seg_tick);
+    DefaultDeletionsReaderIVARS *const ivars = DefDelReader_IVARS(self);
     DefDelReader_Read_Deletions(self);
-    if (!self->deldocs) {
-        self->del_count = 0;
-        self->deldocs   = BitVec_new(0);
+    if (!ivars->deldocs) {
+        ivars->del_count = 0;
+        ivars->deldocs   = BitVec_new(0);
     }
     return self;
 }
 
 void
 DefDelReader_close(DefaultDeletionsReader *self) {
-    DECREF(self->deldocs);
-    self->deldocs = NULL;
+    DefaultDeletionsReaderIVARS *const ivars = DefDelReader_IVARS(self);
+    DECREF(ivars->deldocs);
+    ivars->deldocs = NULL;
 }
 
 void
 DefDelReader_destroy(DefaultDeletionsReader *self) {
-    DECREF(self->deldocs);
+    DefaultDeletionsReaderIVARS *const ivars = DefDelReader_IVARS(self);
+    DECREF(ivars->deldocs);
     SUPER_DESTROY(self, DEFAULTDELETIONSREADER);
 }
 
 BitVector*
 DefDelReader_read_deletions(DefaultDeletionsReader *self) {
+    DefaultDeletionsReaderIVARS *const ivars = DefDelReader_IVARS(self);
     VArray  *segments    = DefDelReader_Get_Segments(self);
     Segment *segment     = DefDelReader_Get_Segment(self);
     CharBuf *my_seg_name = Seg_Get_Name(segment);
@@ -179,27 +187,28 @@ DefDelReader_read_deletions(DefaultDeletionsReader *self) {
         }
     }
 
-    DECREF(self->deldocs);
+    DECREF(ivars->deldocs);
     if (del_file) {
-        self->deldocs = (BitVector*)BitVecDelDocs_new(self->folder, del_file);
-        self->del_count = del_count;
+        ivars->deldocs = (BitVector*)BitVecDelDocs_new(ivars->folder, del_file);
+        ivars->del_count = del_count;
     }
     else {
-        self->deldocs = NULL;
-        self->del_count = 0;
+        ivars->deldocs = NULL;
+        ivars->del_count = 0;
     }
 
-    return self->deldocs;
+    return ivars->deldocs;
 }
 
 Matcher*
 DefDelReader_iterator(DefaultDeletionsReader *self) {
-    return (Matcher*)BitVecMatcher_new(self->deldocs);
+    DefaultDeletionsReaderIVARS *const ivars = DefDelReader_IVARS(self);
+    return (Matcher*)BitVecMatcher_new(ivars->deldocs);
 }
 
 int32_t
 DefDelReader_del_count(DefaultDeletionsReader *self) {
-    return self->del_count;
+    return DefDelReader_IVARS(self)->del_count;
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DeletionsWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DeletionsWriter.c b/core/Lucy/Index/DeletionsWriter.c
index 9dc34d2..759c6de 100644
--- a/core/Lucy/Index/DeletionsWriter.c
+++ b/core/Lucy/Index/DeletionsWriter.c
@@ -82,17 +82,18 @@ DefDelWriter_init(DefaultDeletionsWriter *self, Schema *schema,
                   PolyReader *polyreader) {
 
     DataWriter_init((DataWriter*)self, schema, snapshot, segment, polyreader);
-    self->seg_readers           = PolyReader_Seg_Readers(polyreader);
-    uint32_t num_seg_readers    = VA_Get_Size(self->seg_readers);
-    self->seg_starts            = PolyReader_Offsets(polyreader);
-    self->bit_vecs              = VA_new(num_seg_readers);
-    self->updated               = (bool*)CALLOCATE(num_seg_readers, sizeof(bool));
-    self->searcher              = IxSearcher_new((Obj*)polyreader);
-    self->name_to_tick          = Hash_new(num_seg_readers);
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    ivars->seg_readers          = PolyReader_Seg_Readers(polyreader);
+    uint32_t num_seg_readers    = VA_Get_Size(ivars->seg_readers);
+    ivars->seg_starts           = PolyReader_Offsets(polyreader);
+    ivars->bit_vecs             = VA_new(num_seg_readers);
+    ivars->updated              = (bool*)CALLOCATE(num_seg_readers, sizeof(bool));
+    ivars->searcher             = IxSearcher_new((Obj*)polyreader);
+    ivars->name_to_tick         = Hash_new(num_seg_readers);
 
     // Materialize a BitVector of deletions for each segment.
     for (uint32_t i = 0; i < num_seg_readers; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->seg_readers, i);
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->seg_readers, i);
         BitVector *bit_vec    = BitVec_new(SegReader_Doc_Max(seg_reader));
         DeletionsReader *del_reader
             = (DeletionsReader*)SegReader_Fetch(
@@ -108,8 +109,8 @@ DefDelWriter_init(DefaultDeletionsWriter *self, Schema *schema,
             }
             DECREF(seg_dels);
         }
-        VA_Store(self->bit_vecs, i, (Obj*)bit_vec);
-        Hash_Store(self->name_to_tick,
+        VA_Store(ivars->bit_vecs, i, (Obj*)bit_vec);
+        Hash_Store(ivars->name_to_tick,
                    (Obj*)SegReader_Get_Seg_Name(seg_reader),
                    (Obj*)Int32_new(i));
     }
@@ -119,30 +120,33 @@ DefDelWriter_init(DefaultDeletionsWriter *self, Schema *schema,
 
 void
 DefDelWriter_destroy(DefaultDeletionsWriter *self) {
-    DECREF(self->seg_readers);
-    DECREF(self->seg_starts);
-    DECREF(self->bit_vecs);
-    DECREF(self->searcher);
-    DECREF(self->name_to_tick);
-    FREEMEM(self->updated);
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    DECREF(ivars->seg_readers);
+    DECREF(ivars->seg_starts);
+    DECREF(ivars->bit_vecs);
+    DECREF(ivars->searcher);
+    DECREF(ivars->name_to_tick);
+    FREEMEM(ivars->updated);
     SUPER_DESTROY(self, DEFAULTDELETIONSWRITER);
 }
 
 static CharBuf*
 S_del_filename(DefaultDeletionsWriter *self, SegReader *target_reader) {
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
     Segment *target_seg = SegReader_Get_Segment(target_reader);
-    return CB_newf("%o/deletions-%o.bv", Seg_Get_Name(self->segment),
+    return CB_newf("%o/deletions-%o.bv", Seg_Get_Name(ivars->segment),
                    Seg_Get_Name(target_seg));
 }
 
 void
 DefDelWriter_finish(DefaultDeletionsWriter *self) {
-    Folder *const folder = self->folder;
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    Folder *const folder = ivars->folder;
 
-    for (uint32_t i = 0, max = VA_Get_Size(self->seg_readers); i < max; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->seg_readers, i);
-        if (self->updated[i]) {
-            BitVector *deldocs   = (BitVector*)VA_Fetch(self->bit_vecs, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->seg_readers); i < max; i++) {
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->seg_readers, i);
+        if (ivars->updated[i]) {
+            BitVector *deldocs   = (BitVector*)VA_Fetch(ivars->bit_vecs, i);
             int32_t    doc_max   = SegReader_Doc_Max(seg_reader);
             double     used      = (doc_max + 1) / 8.0;
             uint32_t   byte_size = (uint32_t)ceil(used);
@@ -164,19 +168,20 @@ DefDelWriter_finish(DefaultDeletionsWriter *self) {
         }
     }
 
-    Seg_Store_Metadata_Str(self->segment, "deletions", 9,
+    Seg_Store_Metadata_Str(ivars->segment, "deletions", 9,
                            (Obj*)DefDelWriter_Metadata(self));
 }
 
 Hash*
 DefDelWriter_metadata(DefaultDeletionsWriter *self) {
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
     Hash    *const metadata = DataWriter_metadata((DataWriter*)self);
     Hash    *const files    = Hash_new(0);
 
-    for (uint32_t i = 0, max = VA_Get_Size(self->seg_readers); i < max; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->seg_readers, i);
-        if (self->updated[i]) {
-            BitVector *deldocs   = (BitVector*)VA_Fetch(self->bit_vecs, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->seg_readers); i < max; i++) {
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->seg_readers, i);
+        if (ivars->updated[i]) {
+            BitVector *deldocs   = (BitVector*)VA_Fetch(ivars->bit_vecs, i);
             Segment   *segment   = SegReader_Get_Segment(seg_reader);
             Hash      *mini_meta = Hash_new(2);
             Hash_Store_Str(mini_meta, "count", 5,
@@ -200,22 +205,23 @@ DefDelWriter_format(DefaultDeletionsWriter *self) {
 Matcher*
 DefDelWriter_seg_deletions(DefaultDeletionsWriter *self,
                            SegReader *seg_reader) {
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
     Matcher *deletions    = NULL;
     Segment *segment      = SegReader_Get_Segment(seg_reader);
     CharBuf *seg_name     = Seg_Get_Name(segment);
-    Integer32 *tick_obj   = (Integer32*)Hash_Fetch(self->name_to_tick,
+    Integer32 *tick_obj   = (Integer32*)Hash_Fetch(ivars->name_to_tick,
                                                    (Obj*)seg_name);
     int32_t tick          = tick_obj ? Int32_Get_Value(tick_obj) : 0;
     SegReader *candidate  = tick_obj
-                            ? (SegReader*)VA_Fetch(self->seg_readers, tick)
+                            ? (SegReader*)VA_Fetch(ivars->seg_readers, tick)
                             : NULL;
 
     if (tick_obj) {
         DeletionsReader *del_reader
             = (DeletionsReader*)SegReader_Obtain(
                   candidate, VTable_Get_Name(DELETIONSREADER));
-        if (self->updated[tick] || DelReader_Del_Count(del_reader)) {
-            BitVector *deldocs = (BitVector*)VA_Fetch(self->bit_vecs, tick);
+        if (ivars->updated[tick] || DelReader_Del_Count(del_reader)) {
+            BitVector *deldocs = (BitVector*)VA_Fetch(ivars->bit_vecs, tick);
             deletions = (Matcher*)BitVecMatcher_new(deldocs);
         }
     }
@@ -229,10 +235,11 @@ DefDelWriter_seg_deletions(DefaultDeletionsWriter *self,
 int32_t
 DefDelWriter_seg_del_count(DefaultDeletionsWriter *self,
                            const CharBuf *seg_name) {
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
     Integer32 *tick
-        = (Integer32*)Hash_Fetch(self->name_to_tick, (Obj*)seg_name);
+        = (Integer32*)Hash_Fetch(ivars->name_to_tick, (Obj*)seg_name);
     BitVector *deldocs = tick
-                         ? (BitVector*)VA_Fetch(self->bit_vecs, Int32_Get_Value(tick))
+                         ? (BitVector*)VA_Fetch(ivars->bit_vecs, Int32_Get_Value(tick))
                          : NULL;
     return deldocs ? BitVec_Count(deldocs) : 0;
 }
@@ -240,12 +247,13 @@ DefDelWriter_seg_del_count(DefaultDeletionsWriter *self,
 void
 DefDelWriter_delete_by_term(DefaultDeletionsWriter *self,
                             const CharBuf *field, Obj *term) {
-    for (uint32_t i = 0, max = VA_Get_Size(self->seg_readers); i < max; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->seg_readers, i);
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->seg_readers); i < max; i++) {
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->seg_readers, i);
         PostingListReader *plist_reader
             = (PostingListReader*)SegReader_Fetch(
                   seg_reader, VTable_Get_Name(POSTINGLISTREADER));
-        BitVector *bit_vec = (BitVector*)VA_Fetch(self->bit_vecs, i);
+        BitVector *bit_vec = (BitVector*)VA_Fetch(ivars->bit_vecs, i);
         PostingList *plist = plist_reader
                              ? PListReader_Posting_List(plist_reader, field, term)
                              : NULL;
@@ -258,7 +266,7 @@ DefDelWriter_delete_by_term(DefaultDeletionsWriter *self,
                 num_zapped += !BitVec_Get(bit_vec, doc_id);
                 BitVec_Set(bit_vec, doc_id);
             }
-            if (num_zapped) { self->updated[i] = true; }
+            if (num_zapped) { ivars->updated[i] = true; }
             DECREF(plist);
         }
     }
@@ -266,12 +274,13 @@ DefDelWriter_delete_by_term(DefaultDeletionsWriter *self,
 
 void
 DefDelWriter_delete_by_query(DefaultDeletionsWriter *self, Query *query) {
-    Compiler *compiler = Query_Make_Compiler(query, (Searcher*)self->searcher,
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    Compiler *compiler = Query_Make_Compiler(query, (Searcher*)ivars->searcher,
                                              Query_Get_Boost(query), false);
 
-    for (uint32_t i = 0, max = VA_Get_Size(self->seg_readers); i < max; i++) {
-        SegReader *seg_reader = (SegReader*)VA_Fetch(self->seg_readers, i);
-        BitVector *bit_vec = (BitVector*)VA_Fetch(self->bit_vecs, i);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->seg_readers); i < max; i++) {
+        SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->seg_readers, i);
+        BitVector *bit_vec = (BitVector*)VA_Fetch(ivars->bit_vecs, i);
         Matcher *matcher = Compiler_Make_Matcher(compiler, seg_reader, false);
 
         if (matcher) {
@@ -283,7 +292,7 @@ DefDelWriter_delete_by_query(DefaultDeletionsWriter *self, Query *query) {
                 num_zapped += !BitVec_Get(bit_vec, doc_id);
                 BitVec_Set(bit_vec, doc_id);
             }
-            if (num_zapped) { self->updated[i] = true; }
+            if (num_zapped) { ivars->updated[i] = true; }
 
             DECREF(matcher);
         }
@@ -294,21 +303,23 @@ DefDelWriter_delete_by_query(DefaultDeletionsWriter *self, Query *query) {
 
 void
 DefDelWriter_delete_by_doc_id(DefaultDeletionsWriter *self, int32_t doc_id) {
-    uint32_t   sub_tick   = PolyReader_sub_tick(self->seg_starts, doc_id);
-    BitVector *bit_vec    = (BitVector*)VA_Fetch(self->bit_vecs, sub_tick);
-    uint32_t   offset     = I32Arr_Get(self->seg_starts, sub_tick);
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    uint32_t   sub_tick   = PolyReader_sub_tick(ivars->seg_starts, doc_id);
+    BitVector *bit_vec    = (BitVector*)VA_Fetch(ivars->bit_vecs, sub_tick);
+    uint32_t   offset     = I32Arr_Get(ivars->seg_starts, sub_tick);
     int32_t    seg_doc_id = doc_id - offset;
 
     if (!BitVec_Get(bit_vec, seg_doc_id)) {
-        self->updated[sub_tick] = true;
+        ivars->updated[sub_tick] = true;
         BitVec_Set(bit_vec, seg_doc_id);
     }
 }
 
 bool
 DefDelWriter_updated(DefaultDeletionsWriter *self) {
-    for (uint32_t i = 0, max = VA_Get_Size(self->seg_readers); i < max; i++) {
-        if (self->updated[i]) { return true; }
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
+    for (uint32_t i = 0, max = VA_Get_Size(ivars->seg_readers); i < max; i++) {
+        if (ivars->updated[i]) { return true; }
     }
     return false;
 }
@@ -327,12 +338,13 @@ DefDelWriter_add_segment(DefaultDeletionsWriter *self, SegReader *reader,
 void
 DefDelWriter_merge_segment(DefaultDeletionsWriter *self, SegReader *reader,
                            I32Array *doc_map) {
+    DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self);
     UNUSED_VAR(doc_map);
     Segment *segment = SegReader_Get_Segment(reader);
     Hash *del_meta = (Hash*)Seg_Fetch_Metadata_Str(segment, "deletions", 9);
 
     if (del_meta) {
-        VArray *seg_readers = self->seg_readers;
+        VArray *seg_readers = ivars->seg_readers;
         Hash   *files = (Hash*)Hash_Fetch_Str(del_meta, "files", 5);
         if (files) {
             CharBuf *seg;
@@ -360,7 +372,7 @@ DefDelWriter_merge_segment(DefaultDeletionsWriter *self, SegReader *reader,
                             = (DeletionsReader*)SegReader_Obtain(
                                   candidate, VTable_Get_Name(DELETIONSREADER));
                         if (count == DelReader_Del_Count(del_reader)) {
-                            self->updated[i] = true;
+                            ivars->updated[i] = true;
                         }
                         break;
                     }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DocReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DocReader.c b/core/Lucy/Index/DocReader.c
index 934969b..24c8052 100644
--- a/core/Lucy/Index/DocReader.c
+++ b/core/Lucy/Index/DocReader.c
@@ -51,37 +51,41 @@ PolyDocReader_new(VArray *readers, I32Array *offsets) {
 PolyDocReader*
 PolyDocReader_init(PolyDocReader *self, VArray *readers, I32Array *offsets) {
     DocReader_init((DocReader*)self, NULL, NULL, NULL, NULL, -1);
+    PolyDocReaderIVARS *const ivars = PolyDocReader_IVARS(self);
     for (uint32_t i = 0, max = VA_Get_Size(readers); i < max; i++) {
         CERTIFY(VA_Fetch(readers, i), DOCREADER);
     }
-    self->readers = (VArray*)INCREF(readers);
-    self->offsets = (I32Array*)INCREF(offsets);
+    ivars->readers = (VArray*)INCREF(readers);
+    ivars->offsets = (I32Array*)INCREF(offsets);
     return self;
 }
 
 void
 PolyDocReader_close(PolyDocReader *self) {
-    if (self->readers) {
-        for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
-            DocReader *reader = (DocReader*)VA_Fetch(self->readers, i);
+    PolyDocReaderIVARS *const ivars = PolyDocReader_IVARS(self);
+    if (ivars->readers) {
+        for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
+            DocReader *reader = (DocReader*)VA_Fetch(ivars->readers, i);
             if (reader) { DocReader_Close(reader); }
         }
-        VA_Clear(self->readers);
+        VA_Clear(ivars->readers);
     }
 }
 
 void
 PolyDocReader_destroy(PolyDocReader *self) {
-    DECREF(self->readers);
-    DECREF(self->offsets);
+    PolyDocReaderIVARS *const ivars = PolyDocReader_IVARS(self);
+    DECREF(ivars->readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYDOCREADER);
 }
 
 HitDoc*
 PolyDocReader_fetch_doc(PolyDocReader *self, int32_t doc_id) {
-    uint32_t seg_tick = PolyReader_sub_tick(self->offsets, doc_id);
-    int32_t  offset   = I32Arr_Get(self->offsets, seg_tick);
-    DocReader *doc_reader = (DocReader*)VA_Fetch(self->readers, seg_tick);
+    PolyDocReaderIVARS *const ivars = PolyDocReader_IVARS(self);
+    uint32_t seg_tick = PolyReader_sub_tick(ivars->offsets, doc_id);
+    int32_t  offset   = I32Arr_Get(ivars->offsets, seg_tick);
+    DocReader *doc_reader = (DocReader*)VA_Fetch(ivars->readers, seg_tick);
     HitDoc *hit_doc = NULL;
     if (!doc_reader) {
         THROW(ERR, "Invalid doc_id: %i32", doc_id);
@@ -104,22 +108,24 @@ DefDocReader_new(Schema *schema, Folder *folder, Snapshot *snapshot,
 
 void
 DefDocReader_close(DefaultDocReader *self) {
-    if (self->dat_in != NULL) {
-        InStream_Close(self->dat_in);
-        DECREF(self->dat_in);
-        self->dat_in = NULL;
+    DefaultDocReaderIVARS *const ivars = DefDocReader_IVARS(self);
+    if (ivars->dat_in != NULL) {
+        InStream_Close(ivars->dat_in);
+        DECREF(ivars->dat_in);
+        ivars->dat_in = NULL;
     }
-    if (self->ix_in != NULL) {
-        InStream_Close(self->ix_in);
-        DECREF(self->ix_in);
-        self->ix_in = NULL;
+    if (ivars->ix_in != NULL) {
+        InStream_Close(ivars->ix_in);
+        DECREF(ivars->ix_in);
+        ivars->ix_in = NULL;
     }
 }
 
 void
 DefDocReader_destroy(DefaultDocReader *self) {
-    DECREF(self->ix_in);
-    DECREF(self->dat_in);
+    DefaultDocReaderIVARS *const ivars = DefDocReader_IVARS(self);
+    DECREF(ivars->ix_in);
+    DECREF(ivars->dat_in);
     SUPER_DESTROY(self, DEFAULTDOCREADER);
 }
 
@@ -130,6 +136,7 @@ DefDocReader_init(DefaultDocReader *self, Schema *schema, Folder *folder,
     Segment *segment;
     DocReader_init((DocReader*)self, schema, folder, snapshot, segments,
                    seg_tick);
+    DefaultDocReaderIVARS *const ivars = DefDocReader_IVARS(self);
     segment = DefDocReader_Get_Segment(self);
     metadata = (Hash*)Seg_Fetch_Metadata_Str(segment, "documents", 9);
 
@@ -154,16 +161,16 @@ DefDocReader_init(DefaultDocReader *self, Schema *schema, Folder *folder,
 
         // Get streams.
         if (Folder_Exists(folder, ix_file)) {
-            self->ix_in = Folder_Open_In(folder, ix_file);
-            if (!self->ix_in) {
+            ivars->ix_in = Folder_Open_In(folder, ix_file);
+            if (!ivars->ix_in) {
                 Err *error = (Err*)INCREF(Err_get_error());
                 DECREF(ix_file);
                 DECREF(dat_file);
                 DECREF(self);
                 RETHROW(error);
             }
-            self->dat_in = Folder_Open_In(folder, dat_file);
-            if (!self->dat_in) {
+            ivars->dat_in = Folder_Open_In(folder, dat_file);
+            if (!ivars->dat_in) {
                 Err *error = (Err*)INCREF(Err_get_error());
                 DECREF(ix_file);
                 DECREF(dat_file);
@@ -181,16 +188,18 @@ DefDocReader_init(DefaultDocReader *self, Schema *schema, Folder *folder,
 void
 DefDocReader_read_record(DefaultDocReader *self, ByteBuf *buffer,
                          int32_t doc_id) {
+    DefaultDocReaderIVARS *const ivars = DefDocReader_IVARS(self);
+
     // Find start and length of variable length record.
-    InStream_Seek(self->ix_in, (int64_t)doc_id * 8);
-    int64_t start = InStream_Read_I64(self->ix_in);
-    int64_t end   = InStream_Read_I64(self->ix_in);
+    InStream_Seek(ivars->ix_in, (int64_t)doc_id * 8);
+    int64_t start = InStream_Read_I64(ivars->ix_in);
+    int64_t end   = InStream_Read_I64(ivars->ix_in);
     size_t size  = (size_t)(end - start);
 
     // Read in the record.
     char *buf = BB_Grow(buffer, size);
-    InStream_Seek(self->dat_in, start);
-    InStream_Read_Bytes(self->dat_in, buf, size);
+    InStream_Seek(ivars->dat_in, start);
+    InStream_Read_Bytes(ivars->dat_in, buf, size);
     BB_Set_Size(buffer, size);
 }
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DocVector.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DocVector.c b/core/Lucy/Index/DocVector.c
index 609c43b..131aaf4 100644
--- a/core/Lucy/Index/DocVector.c
+++ b/core/Lucy/Index/DocVector.c
@@ -41,62 +41,70 @@ DocVec_new() {
 
 DocVector*
 DocVec_init(DocVector *self) {
-    self->field_bufs    = Hash_new(0);
-    self->field_vectors = Hash_new(0);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    ivars->field_bufs    = Hash_new(0);
+    ivars->field_vectors = Hash_new(0);
     return self;
 }
 
 void
 DocVec_serialize(DocVector *self, OutStream *outstream) {
-    Freezer_serialize_hash(self->field_bufs, outstream);
-    Freezer_serialize_hash(self->field_vectors, outstream);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    Freezer_serialize_hash(ivars->field_bufs, outstream);
+    Freezer_serialize_hash(ivars->field_vectors, outstream);
 }
 
 DocVector*
 DocVec_deserialize(DocVector *self, InStream *instream) {
-    self->field_bufs    = Freezer_read_hash(instream);
-    self->field_vectors = Freezer_read_hash(instream);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    ivars->field_bufs    = Freezer_read_hash(instream);
+    ivars->field_vectors = Freezer_read_hash(instream);
     return self;
 }
 
 void
 DocVec_destroy(DocVector *self) {
-    DECREF(self->field_bufs);
-    DECREF(self->field_vectors);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    DECREF(ivars->field_bufs);
+    DECREF(ivars->field_vectors);
     SUPER_DESTROY(self, DOCVECTOR);
 }
 
 void
 DocVec_add_field_buf(DocVector *self, const CharBuf *field,
                      ByteBuf *field_buf) {
-    Hash_Store(self->field_bufs, (Obj*)field, INCREF(field_buf));
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    Hash_Store(ivars->field_bufs, (Obj*)field, INCREF(field_buf));
 }
 
 ByteBuf*
 DocVec_field_buf(DocVector *self, const CharBuf *field) {
-    return (ByteBuf*)Hash_Fetch(self->field_bufs, (Obj*)field);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    return (ByteBuf*)Hash_Fetch(ivars->field_bufs, (Obj*)field);
 }
 
 VArray*
 DocVec_field_names(DocVector *self) {
-    return Hash_Keys(self->field_bufs);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    return Hash_Keys(ivars->field_bufs);
 }
 
 TermVector*
 DocVec_term_vector(DocVector *self, const CharBuf *field,
                    const CharBuf *term_text) {
-    Hash *field_vector = (Hash*)Hash_Fetch(self->field_vectors, (Obj*)field);
+    DocVectorIVARS *const ivars = DocVec_IVARS(self);
+    Hash *field_vector = (Hash*)Hash_Fetch(ivars->field_vectors, (Obj*)field);
 
     // If no cache hit, try to fill cache.
     if (field_vector == NULL) {
         ByteBuf *field_buf
-            = (ByteBuf*)Hash_Fetch(self->field_bufs, (Obj*)field);
+            = (ByteBuf*)Hash_Fetch(ivars->field_bufs, (Obj*)field);
 
         // Bail if there's no content or the field isn't highlightable.
         if (field_buf == NULL) { return NULL; }
 
         field_vector = S_extract_tv_cache(field_buf);
-        Hash_Store(self->field_vectors, (Obj*)field, (Obj*)field_vector);
+        Hash_Store(ivars->field_vectors, (Obj*)field, (Obj*)field_vector);
     }
 
     // Get a buf for the term text or bail.

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/DocWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/DocWriter.c b/core/Lucy/Index/DocWriter.c
index 06355f0..0299de2 100644
--- a/core/Lucy/Index/DocWriter.c
+++ b/core/Lucy/Index/DocWriter.c
@@ -52,39 +52,42 @@ DocWriter_init(DocWriter *self, Schema *schema, Snapshot *snapshot,
 
 void
 DocWriter_destroy(DocWriter *self) {
-    DECREF(self->dat_out);
-    DECREF(self->ix_out);
+    DocWriterIVARS *const ivars = DocWriter_IVARS(self);
+    DECREF(ivars->dat_out);
+    DECREF(ivars->ix_out);
     SUPER_DESTROY(self, DOCWRITER);
 }
 
 static OutStream*
 S_lazy_init(DocWriter *self) {
-    if (!self->dat_out) {
-        Folder  *folder   = self->folder;
-        CharBuf *seg_name = Seg_Get_Name(self->segment);
+    DocWriterIVARS *const ivars = DocWriter_IVARS(self);
+    if (!ivars->dat_out) {
+        Folder  *folder   = ivars->folder;
+        CharBuf *seg_name = Seg_Get_Name(ivars->segment);
 
         // Get streams.
         CharBuf *ix_file = CB_newf("%o/documents.ix", seg_name);
-        self->ix_out = Folder_Open_Out(folder, ix_file);
+        ivars->ix_out = Folder_Open_Out(folder, ix_file);
         DECREF(ix_file);
-        if (!self->ix_out) { RETHROW(INCREF(Err_get_error())); }
+        if (!ivars->ix_out) { RETHROW(INCREF(Err_get_error())); }
         CharBuf *dat_file = CB_newf("%o/documents.dat", seg_name);
-        self->dat_out = Folder_Open_Out(folder, dat_file);
+        ivars->dat_out = Folder_Open_Out(folder, dat_file);
         DECREF(dat_file);
-        if (!self->dat_out) { RETHROW(INCREF(Err_get_error())); }
+        if (!ivars->dat_out) { RETHROW(INCREF(Err_get_error())); }
 
         // Go past non-doc #0.
-        OutStream_Write_I64(self->ix_out, 0);
+        OutStream_Write_I64(ivars->ix_out, 0);
     }
 
-    return self->dat_out;
+    return ivars->dat_out;
 }
 
 void
 DocWriter_add_inverted_doc(DocWriter *self, Inverter *inverter,
                            int32_t doc_id) {
+    DocWriterIVARS *const ivars = DocWriter_IVARS(self);
     OutStream *dat_out    = S_lazy_init(self);
-    OutStream *ix_out     = self->ix_out;
+    OutStream *ix_out     = ivars->ix_out;
     uint32_t   num_stored = 0;
     int64_t    start      = OutStream_Tell(dat_out);
     int64_t    expected   = OutStream_Tell(ix_out) / 8;
@@ -158,6 +161,7 @@ DocWriter_add_inverted_doc(DocWriter *self, Inverter *inverter,
 void
 DocWriter_add_segment(DocWriter *self, SegReader *reader,
                       I32Array *doc_map) {
+    DocWriterIVARS *const ivars = DocWriter_IVARS(self);
     int32_t doc_max = SegReader_Doc_Max(reader);
 
     if (doc_max == 0) {
@@ -166,7 +170,7 @@ DocWriter_add_segment(DocWriter *self, SegReader *reader,
     }
     else {
         OutStream *const dat_out = S_lazy_init(self);
-        OutStream *const ix_out  = self->ix_out;
+        OutStream *const ix_out  = ivars->ix_out;
         ByteBuf   *const buffer  = BB_new(0);
         DefaultDocReader *const doc_reader
             = (DefaultDocReader*)CERTIFY(
@@ -194,16 +198,17 @@ DocWriter_add_segment(DocWriter *self, SegReader *reader,
 
 void
 DocWriter_finish(DocWriter *self) {
-    if (self->dat_out) {
+    DocWriterIVARS *const ivars = DocWriter_IVARS(self);
+    if (ivars->dat_out) {
         // Write one final file pointer, so that we can derive the length of
         // the last record.
-        int64_t end = OutStream_Tell(self->dat_out);
-        OutStream_Write_I64(self->ix_out, end);
+        int64_t end = OutStream_Tell(ivars->dat_out);
+        OutStream_Write_I64(ivars->ix_out, end);
 
         // Close down output streams.
-        OutStream_Close(self->dat_out);
-        OutStream_Close(self->ix_out);
-        Seg_Store_Metadata_Str(self->segment, "documents", 9,
+        OutStream_Close(ivars->dat_out);
+        OutStream_Close(ivars->ix_out);
+        Seg_Store_Metadata_Str(ivars->segment, "documents", 9,
                                (Obj*)DocWriter_Metadata(self));
     }
 }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/FilePurger.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/FilePurger.c b/core/Lucy/Index/FilePurger.c
index 70e2ef8..35283ee 100644
--- a/core/Lucy/Index/FilePurger.c
+++ b/core/Lucy/Index/FilePurger.c
@@ -50,37 +50,40 @@ FilePurger_new(Folder *folder, Snapshot *snapshot, IndexManager *manager) {
 FilePurger*
 FilePurger_init(FilePurger *self, Folder *folder, Snapshot *snapshot,
                 IndexManager *manager) {
-    self->folder       = (Folder*)INCREF(folder);
-    self->snapshot     = (Snapshot*)INCREF(snapshot);
-    self->manager      = manager
+    FilePurgerIVARS *const ivars = FilePurger_IVARS(self);
+    ivars->folder       = (Folder*)INCREF(folder);
+    ivars->snapshot     = (Snapshot*)INCREF(snapshot);
+    ivars->manager      = manager
                          ? (IndexManager*)INCREF(manager)
                          : IxManager_new(NULL, NULL);
-    IxManager_Set_Folder(self->manager, folder);
+    IxManager_Set_Folder(ivars->manager, folder);
 
     // Don't allow the locks directory to be zapped.
-    self->disallowed = Hash_new(0);
-    Hash_Store_Str(self->disallowed, "locks", 5, (Obj*)CFISH_TRUE);
+    ivars->disallowed = Hash_new(0);
+    Hash_Store_Str(ivars->disallowed, "locks", 5, (Obj*)CFISH_TRUE);
 
     return self;
 }
 
 void
 FilePurger_destroy(FilePurger *self) {
-    DECREF(self->folder);
-    DECREF(self->snapshot);
-    DECREF(self->manager);
-    DECREF(self->disallowed);
+    FilePurgerIVARS *const ivars = FilePurger_IVARS(self);
+    DECREF(ivars->folder);
+    DECREF(ivars->snapshot);
+    DECREF(ivars->manager);
+    DECREF(ivars->disallowed);
     SUPER_DESTROY(self, FILEPURGER);
 }
 
 void
 FilePurger_purge(FilePurger *self) {
-    Lock *deletion_lock = IxManager_Make_Deletion_Lock(self->manager);
+    FilePurgerIVARS *const ivars = FilePurger_IVARS(self);
+    Lock *deletion_lock = IxManager_Make_Deletion_Lock(ivars->manager);
 
     // Obtain deletion lock, purge files, release deletion lock.
     Lock_Clear_Stale(deletion_lock);
     if (Lock_Obtain(deletion_lock)) {
-        Folder *folder   = self->folder;
+        Folder *folder   = ivars->folder;
         Hash   *failures = Hash_new(0);
         VArray *purgables;
         VArray *snapshots;
@@ -93,7 +96,7 @@ FilePurger_purge(FilePurger *self) {
         VA_Sort(purgables, NULL, NULL);
         for (uint32_t i = VA_Get_Size(purgables); i--;) {
             CharBuf *entry = (CharBuf*)VA_Fetch(purgables, i);
-            if (Hash_Fetch(self->disallowed, (Obj*)entry)) { continue; }
+            if (Hash_Fetch(ivars->disallowed, (Obj*)entry)) { continue; }
             if (!Folder_Delete(folder, entry)) {
                 if (Folder_Exists(folder, entry)) {
                     Hash_Store(failures, (Obj*)entry, (Obj*)CFISH_TRUE);
@@ -138,7 +141,8 @@ FilePurger_purge(FilePurger *self) {
 
 static void
 S_zap_dead_merge(FilePurger *self, Hash *candidates) {
-    IndexManager *manager    = self->manager;
+    FilePurgerIVARS *const ivars = FilePurger_IVARS(self);
+    IndexManager *manager    = ivars->manager;
     Lock         *merge_lock = IxManager_Make_Merge_Lock(manager);
 
     Lock_Clear_Stale(merge_lock);
@@ -150,9 +154,9 @@ S_zap_dead_merge(FilePurger *self, Hash *candidates) {
 
         if (cutoff) {
             CharBuf *cutoff_seg = Seg_num_to_name(Obj_To_I64(cutoff));
-            if (Folder_Exists(self->folder, cutoff_seg)) {
+            if (Folder_Exists(ivars->folder, cutoff_seg)) {
                 ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
-                DirHandle *dh = Folder_Open_Dir(self->folder, cutoff_seg);
+                DirHandle *dh = Folder_Open_Dir(ivars->folder, cutoff_seg);
                 CharBuf *entry = dh ? DH_Get_Entry(dh) : NULL;
                 CharBuf *filepath = CB_new(32);
 
@@ -183,7 +187,8 @@ S_zap_dead_merge(FilePurger *self, Hash *candidates) {
 static void
 S_discover_unused(FilePurger *self, VArray **purgables_ptr,
                   VArray **snapshots_ptr) {
-    Folder      *folder       = self->folder;
+    FilePurgerIVARS *const ivars = FilePurger_IVARS(self);
+    Folder      *folder       = ivars->folder;
     DirHandle   *dh           = Folder_Open_Dir(folder, NULL);
     if (!dh) { RETHROW(INCREF(Err_get_error())); }
     VArray      *spared       = VA_new(1);
@@ -191,13 +196,13 @@ S_discover_unused(FilePurger *self, VArray **purgables_ptr,
     CharBuf     *snapfile     = NULL;
 
     // Start off with the list of files in the current snapshot.
-    if (self->snapshot) {
-        VArray *entries    = Snapshot_List(self->snapshot);
+    if (ivars->snapshot) {
+        VArray *entries    = Snapshot_List(ivars->snapshot);
         VArray *referenced = S_find_all_referenced(folder, entries);
         VA_Push_VArray(spared, referenced);
         DECREF(entries);
         DECREF(referenced);
-        snapfile = Snapshot_Get_Path(self->snapshot);
+        snapfile = Snapshot_Get_Path(ivars->snapshot);
         if (snapfile) { VA_Push(spared, INCREF(snapfile)); }
     }
 
@@ -211,7 +216,7 @@ S_discover_unused(FilePurger *self, VArray **purgables_ptr,
             Snapshot *snapshot
                 = Snapshot_Read_File(Snapshot_new(), folder, entry);
             Lock *lock
-                = IxManager_Make_Snapshot_Read_Lock(self->manager, entry);
+                = IxManager_Make_Snapshot_Read_Lock(ivars->manager, entry);
             VArray *snap_list  = Snapshot_List(snapshot);
             VArray *referenced = S_find_all_referenced(folder, snap_list);
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/HighlightReader.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/HighlightReader.c b/core/Lucy/Index/HighlightReader.c
index 8f4b1d6..655a5e4 100644
--- a/core/Lucy/Index/HighlightReader.c
+++ b/core/Lucy/Index/HighlightReader.c
@@ -58,42 +58,46 @@ PolyHighlightReader*
 PolyHLReader_init(PolyHighlightReader *self, VArray *readers,
                   I32Array *offsets) {
     HLReader_init((HighlightReader*)self, NULL, NULL, NULL, NULL, -1);
+    PolyHighlightReaderIVARS *const ivars = PolyHLReader_IVARS(self);
     for (uint32_t i = 0, max = VA_Get_Size(readers); i < max; i++) {
         CERTIFY(VA_Fetch(readers, i), HIGHLIGHTREADER);
     }
-    self->readers = (VArray*)INCREF(readers);
-    self->offsets = (I32Array*)INCREF(offsets);
+    ivars->readers = (VArray*)INCREF(readers);
+    ivars->offsets = (I32Array*)INCREF(offsets);
     return self;
 }
 
 void
 PolyHLReader_close(PolyHighlightReader *self) {
-    if (self->readers) {
-        for (uint32_t i = 0, max = VA_Get_Size(self->readers); i < max; i++) {
+    PolyHighlightReaderIVARS *const ivars = PolyHLReader_IVARS(self);
+    if (ivars->readers) {
+        for (uint32_t i = 0, max = VA_Get_Size(ivars->readers); i < max; i++) {
             HighlightReader *sub_reader
-                = (HighlightReader*)VA_Fetch(self->readers, i);
+                = (HighlightReader*)VA_Fetch(ivars->readers, i);
             if (sub_reader) { HLReader_Close(sub_reader); }
         }
-        DECREF(self->readers);
-        DECREF(self->offsets);
-        self->readers = NULL;
-        self->offsets = NULL;
+        DECREF(ivars->readers);
+        DECREF(ivars->offsets);
+        ivars->readers = NULL;
+        ivars->offsets = NULL;
     }
 }
 
 void
 PolyHLReader_destroy(PolyHighlightReader *self) {
-    DECREF(self->readers);
-    DECREF(self->offsets);
+    PolyHighlightReaderIVARS *const ivars = PolyHLReader_IVARS(self);
+    DECREF(ivars->readers);
+    DECREF(ivars->offsets);
     SUPER_DESTROY(self, POLYHIGHLIGHTREADER);
 }
 
 DocVector*
 PolyHLReader_fetch_doc_vec(PolyHighlightReader *self, int32_t doc_id) {
-    uint32_t seg_tick = PolyReader_sub_tick(self->offsets, doc_id);
-    int32_t  offset   = I32Arr_Get(self->offsets, seg_tick);
+    PolyHighlightReaderIVARS *const ivars = PolyHLReader_IVARS(self);
+    uint32_t seg_tick = PolyReader_sub_tick(ivars->offsets, doc_id);
+    int32_t  offset   = I32Arr_Get(ivars->offsets, seg_tick);
     HighlightReader *sub_reader
-        = (HighlightReader*)VA_Fetch(self->readers, seg_tick);
+        = (HighlightReader*)VA_Fetch(ivars->readers, seg_tick);
     if (!sub_reader) { THROW(ERR, "Invalid doc_id: %i32", doc_id); }
     return HLReader_Fetch_Doc_Vec(sub_reader, doc_id - offset);
 }
@@ -113,6 +117,7 @@ DefHLReader_init(DefaultHighlightReader *self, Schema *schema,
                  int32_t seg_tick) {
     HLReader_init((HighlightReader*)self, schema, folder, snapshot,
                   segments, seg_tick);
+    DefaultHighlightReaderIVARS *const ivars = DefHLReader_IVARS(self);
     Segment *segment    = DefHLReader_Get_Segment(self);
     Hash *metadata      = (Hash*)Seg_Fetch_Metadata_Str(segment, "highlight", 9);
     if (!metadata) {
@@ -136,16 +141,16 @@ DefHLReader_init(DefaultHighlightReader *self, Schema *schema,
     CharBuf *ix_file  = CB_newf("%o/highlight.ix", seg_name);
     CharBuf *dat_file = CB_newf("%o/highlight.dat", seg_name);
     if (Folder_Exists(folder, ix_file)) {
-        self->ix_in = Folder_Open_In(folder, ix_file);
-        if (!self->ix_in) {
+        ivars->ix_in = Folder_Open_In(folder, ix_file);
+        if (!ivars->ix_in) {
             Err *error = (Err*)INCREF(Err_get_error());
             DECREF(ix_file);
             DECREF(dat_file);
             DECREF(self);
             RETHROW(error);
         }
-        self->dat_in = Folder_Open_In(folder, dat_file);
-        if (!self->dat_in) {
+        ivars->dat_in = Folder_Open_In(folder, dat_file);
+        if (!ivars->dat_in) {
             Err *error = (Err*)INCREF(Err_get_error());
             DECREF(ix_file);
             DECREF(dat_file);
@@ -161,29 +166,32 @@ DefHLReader_init(DefaultHighlightReader *self, Schema *schema,
 
 void
 DefHLReader_close(DefaultHighlightReader *self) {
-    if (self->dat_in != NULL) {
-        InStream_Close(self->dat_in);
-        DECREF(self->dat_in);
-        self->dat_in = NULL;
+    DefaultHighlightReaderIVARS *const ivars = DefHLReader_IVARS(self);
+    if (ivars->dat_in != NULL) {
+        InStream_Close(ivars->dat_in);
+        DECREF(ivars->dat_in);
+        ivars->dat_in = NULL;
     }
-    if (self->ix_in != NULL) {
-        InStream_Close(self->ix_in);
-        DECREF(self->ix_in);
-        self->ix_in = NULL;
+    if (ivars->ix_in != NULL) {
+        InStream_Close(ivars->ix_in);
+        DECREF(ivars->ix_in);
+        ivars->ix_in = NULL;
     }
 }
 
 void
 DefHLReader_destroy(DefaultHighlightReader *self) {
-    DECREF(self->ix_in);
-    DECREF(self->dat_in);
+    DefaultHighlightReaderIVARS *const ivars = DefHLReader_IVARS(self);
+    DECREF(ivars->ix_in);
+    DECREF(ivars->dat_in);
     SUPER_DESTROY(self, DEFAULTHIGHLIGHTREADER);
 }
 
 DocVector*
 DefHLReader_fetch_doc_vec(DefaultHighlightReader *self, int32_t doc_id) {
-    InStream *const ix_in  = self->ix_in;
-    InStream *const dat_in = self->dat_in;
+    DefaultHighlightReaderIVARS *const ivars = DefHLReader_IVARS(self);
+    InStream *const ix_in  = ivars->ix_in;
+    InStream *const dat_in = ivars->dat_in;
     DocVector *doc_vec = DocVec_new();
 
     InStream_Seek(ix_in, doc_id * 8);
@@ -205,8 +213,9 @@ DefHLReader_fetch_doc_vec(DefaultHighlightReader *self, int32_t doc_id) {
 void
 DefHLReader_read_record(DefaultHighlightReader *self, int32_t doc_id,
                         ByteBuf *target) {
-    InStream *dat_in = self->dat_in;
-    InStream *ix_in  = self->ix_in;
+    DefaultHighlightReaderIVARS *const ivars = DefHLReader_IVARS(self);
+    InStream *dat_in = ivars->dat_in;
+    InStream *ix_in  = ivars->ix_in;
 
     InStream_Seek(ix_in, doc_id * 8);
 

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/HighlightWriter.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/HighlightWriter.c b/core/Lucy/Index/HighlightWriter.c
index 175e165..b398552 100644
--- a/core/Lucy/Index/HighlightWriter.c
+++ b/core/Lucy/Index/HighlightWriter.c
@@ -60,41 +60,44 @@ HLWriter_init(HighlightWriter *self, Schema *schema, Snapshot *snapshot,
 
 void
 HLWriter_destroy(HighlightWriter *self) {
-    DECREF(self->dat_out);
-    DECREF(self->ix_out);
+    HighlightWriterIVARS *const ivars = HLWriter_IVARS(self);
+    DECREF(ivars->dat_out);
+    DECREF(ivars->ix_out);
     SUPER_DESTROY(self, HIGHLIGHTWRITER);
 }
 
 static OutStream*
 S_lazy_init(HighlightWriter *self) {
-    if (!self->dat_out) {
-        Segment  *segment  = self->segment;
-        Folder   *folder   = self->folder;
+    HighlightWriterIVARS *const ivars = HLWriter_IVARS(self);
+    if (!ivars->dat_out) {
+        Segment  *segment  = ivars->segment;
+        Folder   *folder   = ivars->folder;
         CharBuf  *seg_name = Seg_Get_Name(segment);
 
         // Open outstreams.
         CharBuf *ix_file = CB_newf("%o/highlight.ix", seg_name);
-        self->ix_out = Folder_Open_Out(folder, ix_file);
+        ivars->ix_out = Folder_Open_Out(folder, ix_file);
         DECREF(ix_file);
-        if (!self->ix_out) { RETHROW(INCREF(Err_get_error())); }
+        if (!ivars->ix_out) { RETHROW(INCREF(Err_get_error())); }
 
         CharBuf *dat_file = CB_newf("%o/highlight.dat", seg_name);
-        self->dat_out = Folder_Open_Out(folder, dat_file);
+        ivars->dat_out = Folder_Open_Out(folder, dat_file);
         DECREF(dat_file);
-        if (!self->dat_out) { RETHROW(INCREF(Err_get_error())); }
+        if (!ivars->dat_out) { RETHROW(INCREF(Err_get_error())); }
 
         // Go past invalid doc 0.
-        OutStream_Write_I64(self->ix_out, 0);
+        OutStream_Write_I64(ivars->ix_out, 0);
     }
 
-    return self->dat_out;
+    return ivars->dat_out;
 }
 
 void
 HLWriter_add_inverted_doc(HighlightWriter *self, Inverter *inverter,
                           int32_t doc_id) {
+    HighlightWriterIVARS *const ivars = HLWriter_IVARS(self);
     OutStream *dat_out = S_lazy_init(self);
-    OutStream *ix_out  = self->ix_out;
+    OutStream *ix_out  = ivars->ix_out;
     int64_t    filepos = OutStream_Tell(dat_out);
     uint32_t num_highlightable = 0;
     int32_t expected = (int32_t)(OutStream_Tell(ix_out) / 8);
@@ -207,6 +210,7 @@ HLWriter_tv_buf(HighlightWriter *self, Inversion *inversion) {
 void
 HLWriter_add_segment(HighlightWriter *self, SegReader *reader,
                      I32Array *doc_map) {
+    HighlightWriterIVARS *const ivars = HLWriter_IVARS(self);
     int32_t doc_max = SegReader_Doc_Max(reader);
 
     if (doc_max == 0) {
@@ -219,7 +223,7 @@ HLWriter_add_segment(HighlightWriter *self, SegReader *reader,
                   SegReader_Obtain(reader, VTable_Get_Name(HIGHLIGHTREADER)),
                   DEFAULTHIGHLIGHTREADER);
         OutStream *dat_out = S_lazy_init(self);
-        OutStream *ix_out  = self->ix_out;
+        OutStream *ix_out  = ivars->ix_out;
         int32_t    orig;
         ByteBuf   *bb = BB_new(0);
 
@@ -244,16 +248,17 @@ HLWriter_add_segment(HighlightWriter *self, SegReader *reader,
 
 void
 HLWriter_finish(HighlightWriter *self) {
-    if (self->dat_out) {
+    HighlightWriterIVARS *const ivars = HLWriter_IVARS(self);
+    if (ivars->dat_out) {
         // Write one final file pointer, so that we can derive the length of
         // the last record.
-        int64_t end = OutStream_Tell(self->dat_out);
-        OutStream_Write_I64(self->ix_out, end);
+        int64_t end = OutStream_Tell(ivars->dat_out);
+        OutStream_Write_I64(ivars->ix_out, end);
 
         // Close down the output streams.
-        OutStream_Close(self->dat_out);
-        OutStream_Close(self->ix_out);
-        Seg_Store_Metadata_Str(self->segment, "highlight", 9,
+        OutStream_Close(ivars->dat_out);
+        OutStream_Close(ivars->ix_out);
+        Seg_Store_Metadata_Str(ivars->segment, "highlight", 9,
                                (Obj*)HLWriter_Metadata(self));
     }
 }

http://git-wip-us.apache.org/repos/asf/lucy/blob/7c23ce21/core/Lucy/Index/IndexManager.c
----------------------------------------------------------------------
diff --git a/core/Lucy/Index/IndexManager.c b/core/Lucy/Index/IndexManager.c
index 61bb761..684e476 100644
--- a/core/Lucy/Index/IndexManager.c
+++ b/core/Lucy/Index/IndexManager.c
@@ -40,26 +40,28 @@ IxManager_new(const CharBuf *host, LockFactory *lock_factory) {
 IndexManager*
 IxManager_init(IndexManager *self, const CharBuf *host,
                LockFactory *lock_factory) {
-    self->host                = host
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    ivars->host                = host
                                 ? CB_Clone(host)
                                 : CB_new_from_trusted_utf8("", 0);
-    self->lock_factory        = (LockFactory*)INCREF(lock_factory);
-    self->folder              = NULL;
-    self->write_lock_timeout  = 1000;
-    self->write_lock_interval = 100;
-    self->merge_lock_timeout  = 0;
-    self->merge_lock_interval = 1000;
-    self->deletion_lock_timeout  = 1000;
-    self->deletion_lock_interval = 100;
+    ivars->lock_factory        = (LockFactory*)INCREF(lock_factory);
+    ivars->folder              = NULL;
+    ivars->write_lock_timeout  = 1000;
+    ivars->write_lock_interval = 100;
+    ivars->merge_lock_timeout  = 0;
+    ivars->merge_lock_interval = 1000;
+    ivars->deletion_lock_timeout  = 1000;
+    ivars->deletion_lock_interval = 100;
 
     return self;
 }
 
 void
 IxManager_destroy(IndexManager *self) {
-    DECREF(self->host);
-    DECREF(self->folder);
-    DECREF(self->lock_factory);
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    DECREF(ivars->host);
+    DECREF(ivars->folder);
+    DECREF(ivars->lock_factory);
     SUPER_DESTROY(self, INDEXMANAGER);
 }
 
@@ -81,7 +83,8 @@ IxManager_highest_seg_num(IndexManager *self, Snapshot *snapshot) {
 
 CharBuf*
 IxManager_make_snapshot_filename(IndexManager *self) {
-    Folder *folder = (Folder*)CERTIFY(self->folder, FOLDER);
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    Folder *folder = (Folder*)CERTIFY(ivars->folder, FOLDER);
     DirHandle *dh = Folder_Open_Dir(folder, NULL);
     uint64_t max_gen = 0;
 
@@ -216,49 +219,54 @@ IxManager_choose_sparse(IndexManager *self, I32Array *doc_counts) {
 
 static LockFactory*
 S_obtain_lock_factory(IndexManager *self) {
-    if (!self->lock_factory) {
-        if (!self->folder) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    if (!ivars->lock_factory) {
+        if (!ivars->folder) {
             THROW(ERR, "Can't create a LockFactory without a Folder");
         }
-        self->lock_factory = LockFact_new(self->folder, self->host);
+        ivars->lock_factory = LockFact_new(ivars->folder, ivars->host);
     }
-    return self->lock_factory;
+    return ivars->lock_factory;
 }
 
 Lock*
 IxManager_make_write_lock(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *write_lock_name = ZCB_WRAP_STR("write", 5);
     LockFactory *lock_factory = S_obtain_lock_factory(self);
     return LockFact_Make_Lock(lock_factory, (CharBuf*)write_lock_name,
-                              self->write_lock_timeout,
-                              self->write_lock_interval);
+                              ivars->write_lock_timeout,
+                              ivars->write_lock_interval);
 }
 
 Lock*
 IxManager_make_deletion_lock(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *lock_name = ZCB_WRAP_STR("deletion", 8);
     LockFactory *lock_factory = S_obtain_lock_factory(self);
     return LockFact_Make_Lock(lock_factory, (CharBuf*)lock_name,
-                              self->deletion_lock_timeout,
-                              self->deletion_lock_interval);
+                              ivars->deletion_lock_timeout,
+                              ivars->deletion_lock_interval);
 }
 
 Lock*
 IxManager_make_merge_lock(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_lock_name = ZCB_WRAP_STR("merge", 5);
     LockFactory *lock_factory = S_obtain_lock_factory(self);
     return LockFact_Make_Lock(lock_factory, (CharBuf*)merge_lock_name,
-                              self->merge_lock_timeout,
-                              self->merge_lock_interval);
+                              ivars->merge_lock_timeout,
+                              ivars->merge_lock_interval);
 }
 
 void
 IxManager_write_merge_data(IndexManager *self, int64_t cutoff) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
     Hash *data = Hash_new(1);
     bool success;
     Hash_Store_Str(data, "cutoff", 6, (Obj*)CB_newf("%i64", cutoff));
-    success = Json_spew_json((Obj*)data, self->folder, (CharBuf*)merge_json);
+    success = Json_spew_json((Obj*)data, ivars->folder, (CharBuf*)merge_json);
     DECREF(data);
     if (!success) {
         THROW(ERR, "Failed to write to %o", merge_json);
@@ -267,10 +275,11 @@ IxManager_write_merge_data(IndexManager *self, int64_t cutoff) {
 
 Hash*
 IxManager_read_merge_data(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
-    if (Folder_Exists(self->folder, (CharBuf*)merge_json)) {
+    if (Folder_Exists(ivars->folder, (CharBuf*)merge_json)) {
         Hash *stuff
-            = (Hash*)Json_slurp_json(self->folder, (CharBuf*)merge_json);
+            = (Hash*)Json_slurp_json(ivars->folder, (CharBuf*)merge_json);
         if (stuff) {
             CERTIFY(stuff, HASH);
             return stuff;
@@ -286,8 +295,9 @@ IxManager_read_merge_data(IndexManager *self) {
 
 bool
 IxManager_remove_merge_data(IndexManager *self) {
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
     ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10);
-    return Folder_Delete(self->folder, (CharBuf*)merge_json) != 0;
+    return Folder_Delete(ivars->folder, (CharBuf*)merge_json) != 0;
 }
 
 Lock*
@@ -310,78 +320,79 @@ IxManager_make_snapshot_read_lock(IndexManager *self,
 
 void
 IxManager_set_folder(IndexManager *self, Folder *folder) {
-    DECREF(self->folder);
-    self->folder = (Folder*)INCREF(folder);
+    IndexManagerIVARS *const ivars = IxManager_IVARS(self);
+    DECREF(ivars->folder);
+    ivars->folder = (Folder*)INCREF(folder);
 }
 
 Folder*
 IxManager_get_folder(IndexManager *self) {
-    return self->folder;
+    return IxManager_IVARS(self)->folder;
 }
 
 CharBuf*
 IxManager_get_host(IndexManager *self) {
-    return self->host;
+    return IxManager_IVARS(self)->host;
 }
 
 uint32_t
 IxManager_get_write_lock_timeout(IndexManager *self) {
-    return self->write_lock_timeout;
+    return IxManager_IVARS(self)->write_lock_timeout;
 }
 
 uint32_t
 IxManager_get_write_lock_interval(IndexManager *self) {
-    return self->write_lock_interval;
+    return IxManager_IVARS(self)->write_lock_interval;
 }
 
 uint32_t
 IxManager_get_merge_lock_timeout(IndexManager *self) {
-    return self->merge_lock_timeout;
+    return IxManager_IVARS(self)->merge_lock_timeout;
 }
 
 uint32_t
 IxManager_get_merge_lock_interval(IndexManager *self) {
-    return self->merge_lock_interval;
+    return IxManager_IVARS(self)->merge_lock_interval;
 }
 
 uint32_t
 IxManager_get_deletion_lock_timeout(IndexManager *self) {
-    return self->deletion_lock_timeout;
+    return IxManager_IVARS(self)->deletion_lock_timeout;
 }
 
 uint32_t
 IxManager_get_deletion_lock_interval(IndexManager *self) {
-    return self->deletion_lock_interval;
+    return IxManager_IVARS(self)->deletion_lock_interval;
 }
 
 void
 IxManager_set_write_lock_timeout(IndexManager *self, uint32_t timeout) {
-    self->write_lock_timeout = timeout;
+    IxManager_IVARS(self)->write_lock_timeout = timeout;
 }
 
 void
 IxManager_set_write_lock_interval(IndexManager *self, uint32_t interval) {
-    self->write_lock_interval = interval;
+    IxManager_IVARS(self)->write_lock_interval = interval;
 }
 
 void
 IxManager_set_merge_lock_timeout(IndexManager *self, uint32_t timeout) {
-    self->merge_lock_timeout = timeout;
+    IxManager_IVARS(self)->merge_lock_timeout = timeout;
 }
 
 void
 IxManager_set_merge_lock_interval(IndexManager *self, uint32_t interval) {
-    self->merge_lock_interval = interval;
+    IxManager_IVARS(self)->merge_lock_interval = interval;
 }
 
 void
 IxManager_set_deletion_lock_timeout(IndexManager *self, uint32_t timeout) {
-    self->deletion_lock_timeout = timeout;
+    IxManager_IVARS(self)->deletion_lock_timeout = timeout;
 }
 
 void
 IxManager_set_deletion_lock_interval(IndexManager *self, uint32_t interval) {
-    self->deletion_lock_interval = interval;
+    IxManager_IVARS(self)->deletion_lock_interval = interval;
 }
 
 


[lucy-commits] [9/9] git commit: refs/heads/ivars-wip1 - Migrate Perl host code to IVARS.

Posted by ma...@apache.org.
Migrate Perl host code to IVARS.

Migrate host-specific code for Perl to use IVARS rather than access struct
members through `self`.


Project: http://git-wip-us.apache.org/repos/asf/lucy/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucy/commit/6164cdde
Tree: http://git-wip-us.apache.org/repos/asf/lucy/tree/6164cdde
Diff: http://git-wip-us.apache.org/repos/asf/lucy/diff/6164cdde

Branch: refs/heads/ivars-wip1
Commit: 6164cddecf759f330d585ff600a6f4ffa63d908a
Parents: edf18e4
Author: Marvin Humphrey <ma...@rectangular.com>
Authored: Mon Jul 1 08:45:46 2013 -0700
Committer: Marvin Humphrey <ma...@rectangular.com>
Committed: Mon Jul 1 09:06:53 2013 -0700

----------------------------------------------------------------------
 perl/xs/Lucy/Analysis/RegexTokenizer.c | 27 +++++++------
 perl/xs/Lucy/Document/Doc.c            | 60 ++++++++++++++++-------------
 2 files changed, 50 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucy/blob/6164cdde/perl/xs/Lucy/Analysis/RegexTokenizer.c
----------------------------------------------------------------------
diff --git a/perl/xs/Lucy/Analysis/RegexTokenizer.c b/perl/xs/Lucy/Analysis/RegexTokenizer.c
index d175b59..86738fc 100644
--- a/perl/xs/Lucy/Analysis/RegexTokenizer.c
+++ b/perl/xs/Lucy/Analysis/RegexTokenizer.c
@@ -42,6 +42,7 @@ lucy_RegexTokenizer*
 lucy_RegexTokenizer_init(lucy_RegexTokenizer *self,
                          const cfish_CharBuf *pattern) {
     lucy_Analyzer_init((lucy_Analyzer*)self);
+    lucy_RegexTokenizerIVARS *const ivars = lucy_RegexTokenizer_IVARS(self);
     #define DEFAULT_PATTERN "\\w+(?:['\\x{2019}]\\w+)*"
     if (pattern) {
         if (Cfish_CB_Find_Str(pattern, "\\p", 2) != -1
@@ -50,15 +51,15 @@ lucy_RegexTokenizer_init(lucy_RegexTokenizer *self,
             CFISH_DECREF(self);
             THROW(CFISH_ERR, "\\p and \\P constructs forbidden");
         }
-        self->pattern = Cfish_CB_Clone(pattern);
+        ivars->pattern = Cfish_CB_Clone(pattern);
     }
     else {
-        self->pattern = cfish_CB_new_from_trusted_utf8(
+        ivars->pattern = cfish_CB_new_from_trusted_utf8(
                             DEFAULT_PATTERN, sizeof(DEFAULT_PATTERN) - 1);
     }
 
     // Acquire a compiled regex engine for matching one token.
-    SV *token_re_sv = S_compile_token_re(self->pattern);
+    SV *token_re_sv = S_compile_token_re(ivars->pattern);
     S_set_token_re_but_not_pattern(self, SvRV(token_re_sv));
     SvREFCNT_dec(token_re_sv);
 
@@ -86,6 +87,7 @@ S_compile_token_re(const cfish_CharBuf *pattern) {
 
 static void
 S_set_token_re_but_not_pattern(lucy_RegexTokenizer *self, void *token_re) {
+    lucy_RegexTokenizerIVARS *const ivars = lucy_RegexTokenizer_IVARS(self);
 #if (PERL_VERSION > 10)
     REGEXP *rx = SvRX((SV*)token_re);
 #else
@@ -102,17 +104,18 @@ S_set_token_re_but_not_pattern(lucy_RegexTokenizer *self, void *token_re) {
         THROW(CFISH_ERR, "Failed to extract REGEXP from token_re '%s'",
               SvPV_nolen((SV*)token_re));
     }
-    if (self->token_re) { ReREFCNT_dec(((REGEXP*)self->token_re)); }
-    self->token_re = rx;
-    (void)ReREFCNT_inc(((REGEXP*)self->token_re));
+    if (ivars->token_re) { ReREFCNT_dec(((REGEXP*)ivars->token_re)); }
+    ivars->token_re = rx;
+    (void)ReREFCNT_inc(((REGEXP*)ivars->token_re));
 }
 
 static void
 S_set_pattern_from_token_re(lucy_RegexTokenizer *self, void *token_re) {
+    lucy_RegexTokenizerIVARS *const ivars = lucy_RegexTokenizer_IVARS(self);
     SV *rv = newRV((SV*)token_re);
     STRLEN len = 0;
     char *ptr = SvPVutf8((SV*)rv, len);
-    Cfish_CB_Mimic_Str(self->pattern, ptr, len);
+    Cfish_CB_Mimic_Str(ivars->pattern, ptr, len);
     SvREFCNT_dec(rv);
 }
 
@@ -125,8 +128,9 @@ lucy_RegexTokenizer_set_token_re(lucy_RegexTokenizer *self, void *token_re) {
 
 void
 lucy_RegexTokenizer_destroy(lucy_RegexTokenizer *self) {
-    CFISH_DECREF(self->pattern);
-    ReREFCNT_dec(((REGEXP*)self->token_re));
+    lucy_RegexTokenizerIVARS *const ivars = lucy_RegexTokenizer_IVARS(self);
+    CFISH_DECREF(ivars->pattern);
+    ReREFCNT_dec(((REGEXP*)ivars->token_re));
     CFISH_SUPER_DESTROY(self, LUCY_REGEXTOKENIZER);
 }
 
@@ -134,13 +138,14 @@ void
 lucy_RegexTokenizer_tokenize_str(lucy_RegexTokenizer *self,
                                  const char *string, size_t string_len,
                                  lucy_Inversion *inversion) {
+    lucy_RegexTokenizerIVARS *const ivars = lucy_RegexTokenizer_IVARS(self);
     uint32_t   num_code_points = 0;
     SV        *wrapper    = sv_newmortal();
 #if (PERL_VERSION > 10)
-    REGEXP    *rx         = (REGEXP*)self->token_re;
+    REGEXP    *rx         = (REGEXP*)ivars->token_re;
     regexp    *rx_struct  = (regexp*)SvANY(rx);
 #else
-    REGEXP    *rx         = (REGEXP*)self->token_re;
+    REGEXP    *rx         = (REGEXP*)ivars->token_re;
     regexp    *rx_struct  = rx;
 #endif
     char      *string_beg = (char*)string;

http://git-wip-us.apache.org/repos/asf/lucy/blob/6164cdde/perl/xs/Lucy/Document/Doc.c
----------------------------------------------------------------------
diff --git a/perl/xs/Lucy/Document/Doc.c b/perl/xs/Lucy/Document/Doc.c
index c20c2b0..220a7bf 100644
--- a/perl/xs/Lucy/Document/Doc.c
+++ b/perl/xs/Lucy/Document/Doc.c
@@ -23,32 +23,36 @@
 
 lucy_Doc*
 lucy_Doc_init(lucy_Doc *self, void *fields, int32_t doc_id) {
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
     // Assign.
     if (fields) {
         if (SvTYPE((SV*)fields) != SVt_PVHV) { THROW(CFISH_ERR, "Not a hash"); }
-        self->fields = SvREFCNT_inc((SV*)fields);
+        ivars->fields = SvREFCNT_inc((SV*)fields);
     }
     else {
-        self->fields = newHV();
+        ivars->fields = newHV();
     }
-    self->doc_id = doc_id;
+    ivars->doc_id = doc_id;
 
     return self;
 }
 
 void
 lucy_Doc_set_fields(lucy_Doc *self, void *fields) {
-    if (self->fields) { SvREFCNT_dec((SV*)self->fields); }
-    self->fields = SvREFCNT_inc((SV*)fields);
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
+    if (ivars->fields) { SvREFCNT_dec((SV*)ivars->fields); }
+    ivars->fields = SvREFCNT_inc((SV*)fields);
 }
 
 uint32_t
 lucy_Doc_get_size(lucy_Doc *self) {
-    return self->fields ? HvKEYS((HV*)self->fields) : 0;
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
+    return ivars->fields ? HvKEYS((HV*)ivars->fields) : 0;
 }
 
 void
 lucy_Doc_store(lucy_Doc *self, const cfish_CharBuf *field, cfish_Obj *value) {
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
     char   *key      = (char*)Cfish_CB_Get_Ptr8(field);
     size_t  key_size = Cfish_CB_Get_Size(field);
     SV *key_sv = newSVpvn(key, key_size);
@@ -58,19 +62,20 @@ lucy_Doc_store(lucy_Doc *self, const cfish_CharBuf *field, cfish_Obj *value) {
                  ? XSBind_cb_to_sv((cfish_CharBuf*)value)
                  : (SV*)Cfish_Obj_To_Host(value);
     SvUTF8_on(key_sv);
-    (void)hv_store_ent((HV*)self->fields, key_sv, val_sv, 0);
+    (void)hv_store_ent((HV*)ivars->fields, key_sv, val_sv, 0);
     // TODO: make this a thread-local instead of creating it every time?
     SvREFCNT_dec(key_sv);
 }
 
 static SV*
 S_nfreeze_fields(lucy_Doc *self) {
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
     dSP;
     ENTER;
     SAVETMPS;
     EXTEND(SP, 1);
     PUSHMARK(SP);
-    mPUSHs((SV*)newRV_inc((SV*)self->fields));
+    mPUSHs((SV*)newRV_inc((SV*)ivars->fields));
     PUTBACK;
     call_pv("Storable::nfreeze", G_SCALAR);
     SPAGAIN;
@@ -84,7 +89,8 @@ S_nfreeze_fields(lucy_Doc *self) {
 
 void
 lucy_Doc_serialize(lucy_Doc *self, lucy_OutStream *outstream) {
-    Lucy_OutStream_Write_C32(outstream, self->doc_id);
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
+    Lucy_OutStream_Write_C32(outstream, ivars->doc_id);
     SV *frozen = S_nfreeze_fields(self);
     STRLEN len;
     char *buf = SvPV(frozen, len);
@@ -138,8 +144,9 @@ lucy_Doc_deserialize(lucy_Doc *self, lucy_InStream *instream) {
 cfish_Obj*
 lucy_Doc_extract(lucy_Doc *self, cfish_CharBuf *field,
                  cfish_ViewCharBuf *target) {
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
     cfish_Obj *retval = NULL;
-    SV **sv_ptr = hv_fetch((HV*)self->fields, (char*)Cfish_CB_Get_Ptr8(field),
+    SV **sv_ptr = hv_fetch((HV*)ivars->fields, (char*)Cfish_CB_Get_Ptr8(field),
                            Cfish_CB_Get_Size(field), 0);
 
     if (sv_ptr && XSBind_sv_defined(*sv_ptr)) {
@@ -170,13 +177,14 @@ lucy_Doc_to_host(lucy_Doc *self) {
 
 cfish_Hash*
 lucy_Doc_dump(lucy_Doc *self) {
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
     cfish_Hash *dump = cfish_Hash_new(0);
     Cfish_Hash_Store_Str(dump, "_class", 6,
                         (cfish_Obj*)Cfish_CB_Clone(Lucy_Doc_Get_Class_Name(self)));
     Cfish_Hash_Store_Str(dump, "doc_id", 7,
-                        (cfish_Obj*)cfish_CB_newf("%i32", self->doc_id));
+                        (cfish_Obj*)cfish_CB_newf("%i32", ivars->doc_id));
     Cfish_Hash_Store_Str(dump, "fields", 6,
-                        XSBind_perl_to_cfish((SV*)self->fields));
+                        XSBind_perl_to_cfish((SV*)ivars->fields));
     return dump;
 }
 
@@ -197,8 +205,9 @@ lucy_Doc_load(lucy_Doc *self, cfish_Obj *dump) {
     SV *fields_sv = XSBind_cfish_to_perl((cfish_Obj*)fields);
     CHY_UNUSED_VAR(self);
 
-    loaded->doc_id = (int32_t)Cfish_Obj_To_I64(doc_id);
-    loaded->fields  = SvREFCNT_inc(SvRV(fields_sv));
+    lucy_DocIVARS *const loaded_ivars = lucy_Doc_IVARS(loaded);
+    loaded_ivars->doc_id = (int32_t)Cfish_Obj_To_I64(doc_id);
+    loaded_ivars->fields  = SvREFCNT_inc(SvRV(fields_sv));
     SvREFCNT_dec(fields_sv);
 
     return loaded;
@@ -206,21 +215,19 @@ lucy_Doc_load(lucy_Doc *self, cfish_Obj *dump) {
 
 bool
 lucy_Doc_equals(lucy_Doc *self, cfish_Obj *other) {
-    lucy_Doc *twin = (lucy_Doc*)other;
-    HV *my_fields;
-    HV *other_fields;
-    I32 num_fields;
-
-    if (twin == self)                    { return true;  }
+    if ((lucy_Doc*)other  == self)        { return true;  }
     if (!Cfish_Obj_Is_A(other, LUCY_DOC)) { return false; }
-    if (!self->doc_id == twin->doc_id)   { return false; }
-    if (!!self->fields ^ !!twin->fields) { return false; }
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
+    lucy_DocIVARS *const ovars = lucy_Doc_IVARS((lucy_Doc*)other);
+
+    if (!ivars->doc_id == ovars->doc_id)   { return false; }
+    if (!!ivars->fields ^ !!ovars->fields) { return false; }
 
     // Verify fields.  Don't allow any deep data structures.
-    my_fields    = (HV*)self->fields;
-    other_fields = (HV*)twin->fields;
+    HV *my_fields    = (HV*)ivars->fields;
+    HV *other_fields = (HV*)ovars->fields;
     if (HvKEYS(my_fields) != HvKEYS(other_fields)) { return false; }
-    num_fields = hv_iterinit(my_fields);
+    I32 num_fields = hv_iterinit(my_fields);
     while (num_fields--) {
         HE *my_entry = hv_iternext(my_fields);
         SV *my_val_sv = HeVAL(my_entry);
@@ -236,7 +243,8 @@ lucy_Doc_equals(lucy_Doc *self, cfish_Obj *other) {
 
 void
 lucy_Doc_destroy(lucy_Doc *self) {
-    if (self->fields) { SvREFCNT_dec((SV*)self->fields); }
+    lucy_DocIVARS *const ivars = lucy_Doc_IVARS(self);
+    if (ivars->fields) { SvREFCNT_dec((SV*)ivars->fields); }
     CFISH_SUPER_DESTROY(self, LUCY_DOC);
 }