You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/08/12 20:43:39 UTC

svn commit: r685258 [3/3] - in /hadoop/hbase/trunk: ./ src/examples/ src/examples/uploaders/ src/examples/uploaders/hbrep/ src/examples/uploaders/hbrep/Hbase/ src/java/org/apache/hadoop/hbase/io/

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/__init__.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/__init__.py?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/__init__.py (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/__init__.py Tue Aug 12 11:43:38 2008
@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants', 'Hbase']

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/constants.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/constants.py?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/constants.py (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/constants.py Tue Aug 12 11:43:38 2008
@@ -0,0 +1,9 @@
+#
+# Autogenerated by Thrift
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+
+from thrift.Thrift import *
+from ttypes import *
+

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/ttypes.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/ttypes.py?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/ttypes.py (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/Hbase/ttypes.py Tue Aug 12 11:43:38 2008
@@ -0,0 +1,708 @@
+#
+# Autogenerated by Thrift
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+
+from thrift.Thrift import *
+
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+class ColumnDescriptor:
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'name', None, None, ), # 1
+    (2, TType.I32, 'maxVersions', None, None, ), # 2
+    (3, TType.STRING, 'compression', None, None, ), # 3
+    (4, TType.BOOL, 'inMemory', None, None, ), # 4
+    (5, TType.I32, 'maxValueLength', None, None, ), # 5
+    (6, TType.STRING, 'bloomFilterType', None, None, ), # 6
+    (7, TType.I32, 'bloomFilterVectorSize', None, None, ), # 7
+    (8, TType.I32, 'bloomFilterNbHashes', None, None, ), # 8
+    (9, TType.BOOL, 'blockCacheEnabled', None, None, ), # 9
+    (10, TType.I32, 'timeToLive', None, None, ), # 10
+  )
+
+  def __init__(self, d=None):
+    self.name = None
+    self.maxVersions = 3
+    self.compression = 'NONE'
+    self.inMemory = False
+    self.maxValueLength = 2147483647
+    self.bloomFilterType = 'NONE'
+    self.bloomFilterVectorSize = 0
+    self.bloomFilterNbHashes = 0
+    self.blockCacheEnabled = False
+    self.timeToLive = -1
+    if isinstance(d, dict):
+      if 'name' in d:
+        self.name = d['name']
+      if 'maxVersions' in d:
+        self.maxVersions = d['maxVersions']
+      if 'compression' in d:
+        self.compression = d['compression']
+      if 'inMemory' in d:
+        self.inMemory = d['inMemory']
+      if 'maxValueLength' in d:
+        self.maxValueLength = d['maxValueLength']
+      if 'bloomFilterType' in d:
+        self.bloomFilterType = d['bloomFilterType']
+      if 'bloomFilterVectorSize' in d:
+        self.bloomFilterVectorSize = d['bloomFilterVectorSize']
+      if 'bloomFilterNbHashes' in d:
+        self.bloomFilterNbHashes = d['bloomFilterNbHashes']
+      if 'blockCacheEnabled' in d:
+        self.blockCacheEnabled = d['blockCacheEnabled']
+      if 'timeToLive' in d:
+        self.timeToLive = d['timeToLive']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.maxVersions = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.compression = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.BOOL:
+          self.inMemory = iprot.readBool();
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.I32:
+          self.maxValueLength = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRING:
+          self.bloomFilterType = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.I32:
+          self.bloomFilterVectorSize = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.I32:
+          self.bloomFilterNbHashes = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 9:
+        if ftype == TType.BOOL:
+          self.blockCacheEnabled = iprot.readBool();
+        else:
+          iprot.skip(ftype)
+      elif fid == 10:
+        if ftype == TType.I32:
+          self.timeToLive = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('ColumnDescriptor')
+    if self.name != None:
+      oprot.writeFieldBegin('name', TType.STRING, 1)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.maxVersions != None:
+      oprot.writeFieldBegin('maxVersions', TType.I32, 2)
+      oprot.writeI32(self.maxVersions)
+      oprot.writeFieldEnd()
+    if self.compression != None:
+      oprot.writeFieldBegin('compression', TType.STRING, 3)
+      oprot.writeString(self.compression)
+      oprot.writeFieldEnd()
+    if self.inMemory != None:
+      oprot.writeFieldBegin('inMemory', TType.BOOL, 4)
+      oprot.writeBool(self.inMemory)
+      oprot.writeFieldEnd()
+    if self.maxValueLength != None:
+      oprot.writeFieldBegin('maxValueLength', TType.I32, 5)
+      oprot.writeI32(self.maxValueLength)
+      oprot.writeFieldEnd()
+    if self.bloomFilterType != None:
+      oprot.writeFieldBegin('bloomFilterType', TType.STRING, 6)
+      oprot.writeString(self.bloomFilterType)
+      oprot.writeFieldEnd()
+    if self.bloomFilterVectorSize != None:
+      oprot.writeFieldBegin('bloomFilterVectorSize', TType.I32, 7)
+      oprot.writeI32(self.bloomFilterVectorSize)
+      oprot.writeFieldEnd()
+    if self.bloomFilterNbHashes != None:
+      oprot.writeFieldBegin('bloomFilterNbHashes', TType.I32, 8)
+      oprot.writeI32(self.bloomFilterNbHashes)
+      oprot.writeFieldEnd()
+    if self.blockCacheEnabled != None:
+      oprot.writeFieldBegin('blockCacheEnabled', TType.BOOL, 9)
+      oprot.writeBool(self.blockCacheEnabled)
+      oprot.writeFieldEnd()
+    if self.timeToLive != None:
+      oprot.writeFieldBegin('timeToLive', TType.I32, 10)
+      oprot.writeI32(self.timeToLive)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class RegionDescriptor:
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'startKey', None, None, ), # 1
+  )
+
+  def __init__(self, d=None):
+    self.startKey = None
+    if isinstance(d, dict):
+      if 'startKey' in d:
+        self.startKey = d['startKey']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.startKey = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('RegionDescriptor')
+    if self.startKey != None:
+      oprot.writeFieldBegin('startKey', TType.STRING, 1)
+      oprot.writeString(self.startKey)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class Mutation:
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.BOOL, 'isDelete', None, None, ), # 1
+    (2, TType.STRING, 'column', None, None, ), # 2
+    (3, TType.STRING, 'value', None, None, ), # 3
+  )
+
+  def __init__(self, d=None):
+    self.isDelete = False
+    self.column = None
+    self.value = None
+    if isinstance(d, dict):
+      if 'isDelete' in d:
+        self.isDelete = d['isDelete']
+      if 'column' in d:
+        self.column = d['column']
+      if 'value' in d:
+        self.value = d['value']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.BOOL:
+          self.isDelete = iprot.readBool();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.column = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.value = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('Mutation')
+    if self.isDelete != None:
+      oprot.writeFieldBegin('isDelete', TType.BOOL, 1)
+      oprot.writeBool(self.isDelete)
+      oprot.writeFieldEnd()
+    if self.column != None:
+      oprot.writeFieldBegin('column', TType.STRING, 2)
+      oprot.writeString(self.column)
+      oprot.writeFieldEnd()
+    if self.value != None:
+      oprot.writeFieldBegin('value', TType.STRING, 3)
+      oprot.writeString(self.value)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class BatchMutation:
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'row', None, None, ), # 1
+    (2, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 2
+  )
+
+  def __init__(self, d=None):
+    self.row = None
+    self.mutations = None
+    if isinstance(d, dict):
+      if 'row' in d:
+        self.row = d['row']
+      if 'mutations' in d:
+        self.mutations = d['mutations']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.row = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.LIST:
+          self.mutations = []
+          (_etype3, _size0) = iprot.readListBegin()
+          for _i4 in xrange(_size0):
+            _elem5 = Mutation()
+            _elem5.read(iprot)
+            self.mutations.append(_elem5)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('BatchMutation')
+    if self.row != None:
+      oprot.writeFieldBegin('row', TType.STRING, 1)
+      oprot.writeString(self.row)
+      oprot.writeFieldEnd()
+    if self.mutations != None:
+      oprot.writeFieldBegin('mutations', TType.LIST, 2)
+      oprot.writeListBegin(TType.STRUCT, len(self.mutations))
+      for iter6 in self.mutations:
+        iter6.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class ScanEntry:
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'row', None, None, ), # 1
+    (2, TType.MAP, 'columns', (TType.STRING,None,TType.STRING,None), None, ), # 2
+  )
+
+  def __init__(self, d=None):
+    self.row = None
+    self.columns = None
+    if isinstance(d, dict):
+      if 'row' in d:
+        self.row = d['row']
+      if 'columns' in d:
+        self.columns = d['columns']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.row = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.MAP:
+          self.columns = {}
+          (_ktype8, _vtype9, _size7 ) = iprot.readMapBegin() 
+          for _i11 in xrange(_size7):
+            _key12 = iprot.readString();
+            _val13 = iprot.readString();
+            self.columns[_key12] = _val13
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('ScanEntry')
+    if self.row != None:
+      oprot.writeFieldBegin('row', TType.STRING, 1)
+      oprot.writeString(self.row)
+      oprot.writeFieldEnd()
+    if self.columns != None:
+      oprot.writeFieldBegin('columns', TType.MAP, 2)
+      oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.columns))
+      for kiter14,viter15 in self.columns.items():
+        oprot.writeString(kiter14)
+        oprot.writeString(viter15)
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class IOError(Exception):
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'message', None, None, ), # 1
+  )
+
+  def __init__(self, d=None):
+    self.message = None
+    if isinstance(d, dict):
+      if 'message' in d:
+        self.message = d['message']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.message = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('IOError')
+    if self.message != None:
+      oprot.writeFieldBegin('message', TType.STRING, 1)
+      oprot.writeString(self.message)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class IllegalArgument(Exception):
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'message', None, None, ), # 1
+  )
+
+  def __init__(self, d=None):
+    self.message = None
+    if isinstance(d, dict):
+      if 'message' in d:
+        self.message = d['message']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.message = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('IllegalArgument')
+    if self.message != None:
+      oprot.writeFieldBegin('message', TType.STRING, 1)
+      oprot.writeString(self.message)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class NotFound(Exception):
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'message', None, None, ), # 1
+  )
+
+  def __init__(self, d=None):
+    self.message = None
+    if isinstance(d, dict):
+      if 'message' in d:
+        self.message = d['message']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.message = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('NotFound')
+    if self.message != None:
+      oprot.writeFieldBegin('message', TType.STRING, 1)
+      oprot.writeString(self.message)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class AlreadyExists(Exception):
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'message', None, None, ), # 1
+  )
+
+  def __init__(self, d=None):
+    self.message = None
+    if isinstance(d, dict):
+      if 'message' in d:
+        self.message = d['message']
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.message = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('AlreadyExists')
+    if self.message != None:
+      oprot.writeFieldBegin('message', TType.STRING, 1)
+      oprot.writeString(self.message)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __str__(self): 
+    return str(self.__dict__)
+
+  def __repr__(self): 
+    return repr(self.__dict__)
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/README
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/README?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/README (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/README Tue Aug 12 11:43:38 2008
@@ -0,0 +1,107 @@
+hbrep is a tool for replicating data from postgresql tables to hbase tables.
+
+Dependancies:
+ - python 2.4
+ - hbase 0.2.0
+ - skytools 2.1.7
+ - postgresql
+ 
+It has two main functions.
+ - bootstrap, which bootstraps all the data from specified columns of a table
+ - play, which processes incoming insert, update and delete events and applies them to hbase.
+
+Example usage:
+install triggers:
+  ./hbrep.py hbrep.ini install schema1.table1 schema2.table2
+now that future updates are queuing, bootstrap the tables.
+  ./hbrep.py hbrep.ini bootstrap schema1.table1 schema2.table2
+start pgq ticker
+  pgqadm.py pgq.ini ticker
+play our queue consumer
+  ./hbrep.py hbrep.ini play schema1.table1 schema2.table2
+
+
+More details follow.
+
+
+All functions require an ini file (say hbrep.ini) with a HBaseReplic section, and a section for each postgresql table you wish to replicate containing the table mapping. Note the table mapping section names should match the name of the postgresql table.
+
+eg. ini file:
+####################
+[HBaseReplic]
+job_name = hbase_replic_job
+logfile = %(job_name)s.log
+pidfile = %(job_name)s.pid
+postgresql_db = dbname=source_database user=dbuser
+pgq_queue_name = hbase_replic_queue
+hbase_hostname = localhost
+hbase_port = 9090
+# If omitted, default is 10000
+max_batch_size = 10000
+# file to use when copying a table, if omitted a select columns will be done instead.
+bootstrap_tmpfile = tabledump.dat
+
+# For each table mapping, there must be the same number psql_columns as hbase_column_descriptors
+[public.users]
+psql_schema = public
+psql_table_name = users
+psql_key_column = user_id
+psql_columns = dob
+hbase_table_name = stuff
+hbase_column_descriptors = users:dob
+hbase_row_prefix = user_id:
+####################
+
+Bootstrapping:
+To bootstrap the public.users table from postgresql to hbase, 
+
+  ./hbrep.py hbrep.ini bootstrap public.users
+  
+you can specify multiple tables as arguments.
+ 
+ 
+Play:
+This mode uses pgq from the skytools package to create and manage event queues on postgresql.
+You need to have pgq installed on the database you are replicating.
+
+With a pgq.ini file like this:
+####################
+[pgqadm]
+job_name = sourcedb_ticker
+db = dbname=source_database user=dbuser
+# how often to run maintenance [minutes]
+maint_delay_min = 1
+# how often to check for activity [secs]
+loop_delay = 0.2
+logfile = %(job_name)s.log
+pidfile = %(job_name)s.pid
+use_skylog = 0
+####################
+
+You install pgq on the database by, 
+
+  pgqadm.py pgq.ini install
+
+Next you install hbrep.
+
+  hbrep.py hbrep.ini install public.users
+  
+This creates a queue using pgq, which in this case will be called hbase_replic_queue. It also registers the hbrep consumer (called HBaseReplic) with that queue. Then finally it creates triggers on each table specified to add an event for each insert, update or delete.
+
+Start the pgq event ticker,
+
+  pgqadm.py pgq.ini ticker
+
+Finally, run the hbreplic consumer
+  ./hbrep.py hbrep.ini play public.users
+  
+Now any inserts, updates or deletes on the postgresql users table will be processed and sent to the 
+hbase table.
+
+
+uninstall:
+You can remove the triggers from a table by
+  ./hbrep.py hbrep.ini uninstall public.users
+  
+  
+

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/__init__.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/__init__.py?rev=685258&view=auto
==============================================================================
    (empty)

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/bootstrap.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/bootstrap.py?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/bootstrap.py (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/bootstrap.py Tue Aug 12 11:43:38 2008
@@ -0,0 +1,190 @@
+import sys, os
+
+import pgq, pgq.producer
+import skytools
+
+from HBaseConnection import *
+import tablemapping
+
+class HBaseBootstrap(skytools.DBScript):
+  """Bootstrapping script for loading columns from a table in postgresql to hbase."""
+  
+  def __init__(self, service_name, args):
+    # This will process any options eg -k -v -d
+    skytools.DBScript.__init__(self, service_name, args)
+    
+    config_file = self.args[0]
+    if len(self.args) < 2:
+      print "need table names"
+      sys.exit(1)
+    else:
+      self.table_names = self.args[1:]
+    
+    #just to check this option exists
+    self.cf.get("postgresql_db")
+    
+    self.max_batch_size = int(self.cf.get("max_batch_size", "10000"))
+    self.hbase_hostname = self.cf.get("hbase_hostname", "localhost")
+    self.hbase_port = int(self.cf.get("hbase_port", "9090"))
+    self.table_mappings = tablemapping.load_table_mappings(config_file, self.table_names)
+  
+  def startup(self):
+    # make sure the script loops only once.
+    self.set_single_loop(1)
+    self.log.info("Starting " + self.job_name)
+    
+  def work(self):
+    for t in self.table_names:
+      self.bootstrap_table(t)
+      
+  def bootstrap_table(self, table_name):
+    try:
+      self.log.info("Bootstrapping table %s" % table_name)
+      hbase = HBaseConnection(self.hbase_hostname, self.hbase_port)
+      try:
+        table_mapping = self.table_mappings[table_name]
+      
+        self.log.debug("Connecting to HBase")
+        hbase.connect()
+        
+        # Fetch postgresql cursor
+        self.log.debug("Getting postgresql cursor")
+        db = self.get_database("postgresql_db")
+        curs = db.cursor()
+        
+        hbase.validate_table_name(table_mapping.hbase_table_name)
+        hbase.validate_column_descriptors(table_mapping.hbase_table_name, table_mapping.hbase_column_descriptors)
+        
+        try:
+          dump_file = self.cf.get("bootstrap_tmpfile")
+        except:
+          dump_file = None
+        
+        if dump_file != None:
+          row_source = CopiedRows(self.log, curs, dump_file)
+        else:
+          row_source = SelectedRows(self.log, curs)
+        
+        table_name = table_mapping.psql_schema+"."+table_mapping.psql_table_name
+        # we are careful to make sure that the first column will be the key.
+        column_list = [table_mapping.psql_key_column] + table_mapping.psql_columns
+        
+        # Load the rows either via a select or via a table copy to file. 
+        # Either way, it does not load it all into memory. 
+        # copy is faster, but may incorrectly handle data with tabs in it.
+        row_source.load_rows(table_name, column_list)
+        
+        # max number of rows to fetch at once
+        batch_size = self.max_batch_size
+        total_rows = 0L
+          
+        self.log.debug("Starting puts to hbase")
+        rows = row_source.get_rows(batch_size)
+        while rows != []:
+          batches = []
+          for row in rows:
+            batches.append(self.createRowBatch(table_mapping, row))
+          
+          hbase.client.mutateRows(table_mapping.hbase_table_name, batches)
+          total_rows = total_rows + len(batches)
+          self.log.debug("total rows put = %d" % (total_rows))
+          # get next batch of rows
+          rows = row_source.get_rows(batch_size)
+          
+        self.log.info("total rows put = %d" % (total_rows))
+        self.log.info("Bootstrapping table %s complete" % table_name)
+        
+        
+      except Exception, e:
+        #self.log.info(e)
+        sys.exit(e)
+      
+    finally:
+      hbase.disconnect()
+  
+  def createRowBatch(self, table_mapping, row):
+    batch = BatchMutation()
+    batch.row = table_mapping.hbase_row_prefix + str(row[0])
+    batch.mutations = []
+    for column, value in zip(table_mapping.hbase_column_descriptors, row[1:]):
+      if value != 'NULL' and  value != None:
+        m = Mutation()
+        m.column = column
+        m.value = str(value)
+        batch.mutations.append(m)
+    return batch
+  
+  
+## Helper classes to fetch rows from a select, or from a table dumped by copy
+  
+class RowSource:
+  """ Base class for fetching rows from somewhere. """
+  
+  def __init__(self, log):
+    self.log = log
+    
+  def make_column_str(self, column_list):
+    i = 0
+    while i < len(column_list):
+      column_list[i] = '"%s"' % column_list[i]
+      i += 1
+    return ",".join(column_list)
+  
+   
+class CopiedRows(RowSource):
+  """ 
+  Class for fetching rows from a postgresql database,
+  rows are dumped to a copied to a file first
+  """
+  def __init__(self, log, curs, dump_file):
+    RowSource.__init__(self, log)
+    self.dump_file = dump_file
+    # Set DBAPI-2.0 cursor
+    self.curs = curs
+    
+  def load_rows(self, table_name, column_list):
+    columns = self.make_column_str(column_list)
+    self.log.debug("starting dump to file:%s. table:%s. columns:%s" % (self.dump_file, table_name, columns))
+    dump_out = open(self.dump_file, 'w')
+    self.curs.copy_to(dump_out, table_name + "(%s)" % columns, '\t', 'NULL')
+    dump_out.close()
+    self.log.debug("table %s dump complete" % table_name)
+    
+    self.dump_in = open(self.dump_file, 'r')
+    
+  def get_rows(self, no_of_rows):
+    rows = []
+    if not self.dump_in.closed:
+      for line in self.dump_in:
+        rows.append(line.split())      
+        if len(rows) >= no_of_rows:
+          break
+      if rows == []:
+        self.dump_in.close()
+    return rows
+
+
+class SelectedRows(RowSource):
+  """ 
+  Class for fetching rows from a postgresql database,
+  rows are fetched via a select on the entire table.
+  """
+  def __init__(self, log, curs):
+    RowSource.__init__(self, log)
+    # Set DBAPI-2.0 cursor
+    self.curs = curs
+    
+  def load_rows(self, table_name, column_list):
+    columns = self.make_column_str(column_list)
+    q = "SELECT %s FROM %s" % (columns,table_name)
+    self.log.debug("Executing query %s" % q)
+    self.curs.execute(q)
+    self.log.debug("query finished")
+    
+  def get_rows(self, no_of_rows):
+    return self.curs.fetchmany(no_of_rows)
+    
+
+if __name__ == '__main__':
+  bootstrap = HBaseBootstrap("HBaseReplic",sys.argv[1:])
+  bootstrap.start()

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.ini
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.ini?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.ini (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.ini Tue Aug 12 11:43:38 2008
@@ -0,0 +1,22 @@
+[HBaseReplic]
+job_name = hbase_replic_job
+logfile = %(job_name)s.log
+pidfile = %(job_name)s.pid
+postgresql_db = dbname=source_database user=dbuser
+pgq_queue_name = hbase_replic_queue
+hbase_hostname = localhost
+hbase_port = 9090
+# If omitted, default is 10000
+max_batch_size = 10000
+# file to use when copying a table, if omitted a select columns will be done instead.
+bootstrap_tmpfile = tabledump.dat
+
+# For each table mapping, there must be the same number psql_columns as hbase_column_descriptors
+[public.users]
+psql_schema = public
+psql_table_name = users
+psql_key_column = user_id
+psql_columns = dob
+hbase_table_name = stuff
+hbase_column_descriptors = users:dob
+hbase_row_prefix = user_id:

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.py?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.py (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.py Tue Aug 12 11:43:38 2008
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+import sys, os
+
+import pgq, pgq.producer
+import skytools, skytools._pyquoting
+
+from bootstrap import HBaseBootstrap
+from HBaseConsumer import HBaseConsumer
+
+command_usage = """
+%prog [options] inifile command [tablenames]
+
+commands:
+  play          Run event consumer to update specified tables with hbase.
+  bootstrap     Bootstrap specified tables args into hbase.
+  install       Setup the pgq queue, and install trigger on each table.
+  uninstall     Remove the triggers from each specified table.
+"""
+
+class HBaseReplic(skytools.DBScript):
+  def __init__(self, service_name, args):
+    try:
+      self.run_script = 0
+      
+      # This will process any options eg -k -v -d
+      skytools.DBScript.__init__(self, service_name, args)
+      
+      self.config_file = self.args[0]
+      
+      if len(self.args) < 2:
+        self.print_usage()
+        print "need command"
+        sys.exit(0)
+      cmd = self.args[1]
+        
+      if not cmd in ["play","bootstrap","install", "uninstall"]:
+        self.print_usage()
+        print "unknown command"
+        sys.exit(0)
+        
+      if len(self.args) < 3:
+        self.print_usage()
+        print "need table names"
+        sys.exit(0)
+      else:
+        self.table_names = self.args[2:]
+      
+      if cmd == "play":
+        self.run_script = HBaseConsumer(service_name, [self.config_file] + self.table_names)
+      elif cmd == "bootstrap":
+        self.run_script = HBaseBootstrap(service_name, [self.config_file] + self.table_names)
+      elif cmd == "install":
+        self.work = self.do_install
+      elif cmd == "uninstall":
+        self.work = self.do_uninstall
+        
+    except Exception, e:
+      sys.exit(e)
+  
+  def print_usage(self):
+    print "Usage: " + command_usage
+    
+  def init_optparse(self, parser=None):
+    p = skytools.DBScript.init_optparse(self, parser)
+    p.set_usage(command_usage.strip())
+    return p
+
+  def start(self):
+    if self.run_script:
+      self.run_script.start()
+    else:
+      skytools.DBScript.start(self)
+      
+  def startup(self):
+    # make sure the script loops only once.
+    self.set_single_loop(1)
+    
+  def do_install(self):
+    try:
+      queue_name = self.cf.get("pgq_queue_name")
+      consumer = self.job_name
+      
+      self.log.info('Creating queue: %s' % queue_name)
+      self.exec_sql("select pgq.create_queue(%s)", [queue_name])
+  
+      self.log.info('Registering consumer %s on queue %s' % (consumer, queue_name))
+      self.exec_sql("select pgq.register_consumer(%s, %s)", [queue_name, consumer])
+  
+      for table_name in self.table_names:
+        self.log.info('Creating trigger hbase_replic on table %s' % (table_name))
+        q = """
+        CREATE TRIGGER hbase_replic
+          AFTER INSERT OR UPDATE OR DELETE
+          ON %s
+          FOR EACH ROW
+          EXECUTE PROCEDURE pgq.logutriga('%s')"""
+        self.exec_sql(q % (table_name, queue_name), [])
+    except Exception, e:
+      sys.exit(e)
+      
+  def do_uninstall(self):
+    try:
+      queue_name = self.cf.get("pgq_queue_name")
+      consumer = "HBaseReplic"
+      
+      #self.log.info('Unregistering consumer %s on queue %s' % (consumer, queue_name))
+      #self.exec_sql("select pgq.unregister_consumer(%s, %s)", [queue_name, consumer])
+  
+      for table_name in self.table_names:
+        self.log.info('Dropping trigger hbase_replic on table %s' % (table_name))
+        q = "DROP TRIGGER hbase_replic ON %s" % table_name
+        self.exec_sql(q, [])
+        
+    except Exception, e:
+      sys.exit(e)
+    
+  def exec_sql(self, q, args):
+    self.log.debug(q)
+    db = self.get_database('postgresql_db')
+    curs = db.cursor()
+    curs.execute(q, args)
+    db.commit()
+    
+if __name__ == '__main__':
+  script = HBaseReplic("HBaseReplic",sys.argv[1:])
+  script.start()

Propchange: hadoop/hbase/trunk/src/examples/uploaders/hbrep/hbrep.py
------------------------------------------------------------------------------
    svn:executable = *

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/pgq.ini
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/pgq.ini?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/pgq.ini (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/pgq.ini Tue Aug 12 11:43:38 2008
@@ -0,0 +1,10 @@
+[pgqadm]
+job_name = sourcedb_ticker
+db = dbname=source_database user=dbuser
+# how often to run maintenance [minutes]
+maint_delay_min = 1
+# how often to check for activity [secs]
+loop_delay = 0.2
+logfile = %(job_name)s.log
+pidfile = %(job_name)s.pid
+use_skylog = 0

Added: hadoop/hbase/trunk/src/examples/uploaders/hbrep/tablemapping.py
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/examples/uploaders/hbrep/tablemapping.py?rev=685258&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/examples/uploaders/hbrep/tablemapping.py (added)
+++ hadoop/hbase/trunk/src/examples/uploaders/hbrep/tablemapping.py Tue Aug 12 11:43:38 2008
@@ -0,0 +1,33 @@
+import sys, os
+from skytools.config import *
+
+PSQL_SCHEMA = "psql_schema"
+PSQL_TABLENAME = "psql_table_name"
+PSQL_KEYCOL = "psql_key_column"
+PSQL_COLUMNS = "psql_columns"
+HBASE_TABLENAME = "hbase_table_name"
+HBASE_COLUMNDESCS = "hbase_column_descriptors"
+HBASE_ROWPREFIX = "hbase_row_prefix"
+
+def load_table_mappings(config_file, table_names):
+  table_mappings = {}
+  for table_name in table_names:
+    conf = Config(table_name, config_file)
+    table_mappings[table_name] = PSqlHBaseTableMapping(conf)
+  return table_mappings
+  
+class PSqlHBaseTableMapping:
+  # conf can be anything with a get function eg, a dictionary
+  def __init__(self, conf):
+    self.psql_schema = conf.get(PSQL_SCHEMA)
+    self.psql_table_name = conf.get(PSQL_TABLENAME)
+    self.psql_key_column = conf.get(PSQL_KEYCOL)
+    self.psql_columns = conf.get(PSQL_COLUMNS).split()
+    self.hbase_table_name = conf.get(HBASE_TABLENAME)
+    self.hbase_column_descriptors = conf.get(HBASE_COLUMNDESCS).split()
+    self.hbase_row_prefix = conf.get(HBASE_ROWPREFIX)
+    
+    if len(self.psql_columns) != len(self.hbase_column_descriptors):
+      raise Exception("psql_columns and hbase_column_descriptors must have same length")
+    
+  

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java?rev=685258&r1=685257&r2=685258&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java Tue Aug 12 11:43:38 2008
@@ -23,9 +23,11 @@
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
+import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -47,7 +49,7 @@
  * only.
  */
 public class HbaseMapWritable <K, V>
-implements Map<byte [], V>, Writable, Configurable {
+implements SortedMap<byte [], V>, Writable, Configurable {
   private AtomicReference<Configuration> conf =
     new AtomicReference<Configuration>();
   
@@ -74,7 +76,7 @@
     CODE_TO_CLASS.put(code, clazz);
   }
   
-  private Map<byte [], V> instance =
+  private SortedMap<byte [], V> instance =
     new TreeMap<byte [], V>(Bytes.BYTES_COMPARATOR);
 
   /** @return the conf */
@@ -131,6 +133,42 @@
   public Collection<V> values() {
     return instance.values();
   }
+
+  public void putAll(Map<? extends byte [], ? extends V> m) {
+    this.instance.putAll(m);
+  }
+
+  public V remove(Object key) {
+    return this.instance.remove(key);
+  }
+
+  public V put(byte [] key, V value) {
+    return this.instance.put(key, value);
+  }
+
+  public Comparator<? super byte[]> comparator() {
+    return this.instance.comparator();
+  }
+
+  public byte[] firstKey() {
+    return this.instance.firstKey();
+  }
+
+  public SortedMap<byte[], V> headMap(byte[] toKey) {
+    return this.instance.headMap(toKey);
+  }
+
+  public byte[] lastKey() {
+    return this.instance.lastKey();
+  }
+
+  public SortedMap<byte[], V> subMap(byte[] fromKey, byte[] toKey) {
+    return this.instance.subMap(fromKey, toKey);
+  }
+
+  public SortedMap<byte[], V> tailMap(byte[] fromKey) {
+    return this.instance.tailMap(fromKey);
+  }
   
   // Writable
 
@@ -187,16 +225,4 @@
       this.instance.put(key, v);
     }
   }
-
-  public void putAll(Map<? extends byte [], ? extends V> m) {
-    this.instance.putAll(m);
-  }
-
-  public V remove(Object key) {
-    return this.instance.remove(key);
-  }
-
-  public V put(byte [] key, V value) {
-    return this.instance.put(key, value);
-  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java?rev=685258&r1=685257&r2=685258&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java Tue Aug 12 11:43:38 2008
@@ -25,8 +25,10 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.Map;
 import java.util.Set;
+import java.util.SortedMap;
 import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.HConstants;
@@ -37,7 +39,7 @@
 /**
  * Holds row name and then a map of columns to cells.
  */
-public class RowResult implements Writable, Map<byte [], Cell> {
+public class RowResult implements Writable, SortedMap<byte [], Cell> {
   private byte [] row = null;
   private final HbaseMapWritable<byte [], Cell> cells;
 
@@ -136,6 +138,31 @@
   public Cell get(String key) {
     return get(Bytes.toBytes(key));
   }
+  
+
+  public Comparator<? super byte[]> comparator() {
+    return this.cells.comparator();
+  }
+
+  public byte[] firstKey() {
+    return this.cells.firstKey();
+  }
+
+  public SortedMap<byte[], Cell> headMap(byte[] toKey) {
+    return this.cells.headMap(toKey);
+  }
+
+  public byte[] lastKey() {
+    return this.cells.lastKey();
+  }
+
+  public SortedMap<byte[], Cell> subMap(byte[] fromKey, byte[] toKey) {
+    return this.cells.subMap(fromKey, toKey);
+  }
+
+  public SortedMap<byte[], Cell> tailMap(byte[] fromKey) {
+    return this.cells.tailMap(fromKey);
+  }
 
   /**
    * Row entry.