You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by "Timu Eren (JIRA)" <ji...@apache.org> on 2011/04/22 14:02:05 UTC

[jira] [Created] (CASSANDRA-2542) InvalidRequestException

InvalidRequestException
-----------------------

                 Key: CASSANDRA-2542
                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
             Project: Cassandra
          Issue Type: Bug
          Components: Core
    Affects Versions: 0.7.4
            Reporter: Timu Eren
            Priority: Minor


i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine




--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira

[jira] [Commented] (CASSANDRA-2542) InvalidRequestException

Posted by "Jonathan Ellis (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/CASSANDRA-2542?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13023212#comment-13023212 ] 

Jonathan Ellis commented on CASSANDRA-2542:
-------------------------------------------

and what is IRE.why?

> InvalidRequestException
> -----------------------
>
>                 Key: CASSANDRA-2542
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Core
>    Affects Versions: 0.7.4
>            Reporter: Timu Eren
>            Priority: Minor
>
> i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine
> Here is reprocedure details.
> create column family
>      SuperTest
> with
>      column_type=Super and
>      comparator=LongType and
>      subcomparator=UTF8Type and
>      default_validation_class=UTF8Type;
> list SuperTest;
> Using default limit of 100
> -------------------
> RowKey: 2
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459440853000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459437632000))
> => (super_column=12,
>      (column=1, value=1, timestamp=1303459432652000))
> => (super_column=13,
>      (column=1, value=1, timestamp=1303459435102000))
> -------------------
> RowKey: 1
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459423202000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459419112000))
> => (super_column=12,
>      (column=deneme, value=1, timestamp=1303459362702000))
> => (super_column=13,
>      (column=deneme, value=1, timestamp=1303459382023000))
> => (super_column=15,
>      (column=1, value=1, timestamp=1303459426402000))
> 2 Rows Returned.
> == Test class ==
> import java.io.IOException;
> import java.nio.ByteBuffer;
> import java.util.List;
> import java.util.SortedMap;
> import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
> import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
> import org.apache.cassandra.hadoop.ConfigHelper;
> import org.apache.cassandra.avro.Mutation;
> import org.apache.cassandra.thrift.SliceRange;
> import org.apache.cassandra.thrift.SlicePredicate;
> import org.apache.cassandra.utils.ByteBufferUtil;
> import org.apache.cassandra.db.IColumn;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
> import org.slf4j.Logger;
> import org.slf4j.LoggerFactory;
> class SuperColumnTest extends Configured implements Tool {
>     
>     private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
>     
>     public static void main(String[] args) throws Exception {
>         ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
>         System.exit(0);
>     }
>     
>     public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
>         private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
>         
>         public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
>             
>             logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
>                        
>            
>         }
>     }
>     
>     public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
>         public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
>             
>         }
>     }
>     
>     public int run(String[] args) throws Exception{
>         
>         Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
>         job.setJarByClass(SuperColumnTest.class);
>         job.setMapperClass(SuperColumnTestMapper.class);
>         job.setReducerClass(SuperColumnTestReducer.class);
>         
>         
>         job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
>         ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         
>         job.setMapOutputKeyClass(Text.class);
>         job.setMapOutputValueClass(IntWritable.class); 
>         
>         job.setOutputKeyClass(ByteBuffer.class);
>         job.setOutputValueClass(List.class);        
>         job.setInputFormatClass(ColumnFamilyInputFormat.class);
>         
>         
>         ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
>         ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
>         ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
>         
>       
>         
>         SlicePredicate sp =new SlicePredicate();
>         
>         ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
>         ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
>         SliceRange range = new SliceRange();
>         range.setStart(startKey);
>         range.setFinish(endKey);
>         range.setCount(Integer.MAX_VALUE);
>         
>         sp.setSlice_range(range);
>         
>         ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
>         
>         Long start = System.currentTimeMillis();           
>         job.waitForCompletion(true);
>         logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
>         
>         return 0;
>     }
> }
> java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
> 	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
> 	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
> 	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
> 	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
> 	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
> 	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
> 	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
> 	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
> Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
> 	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
> 	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
> 	... 11 more

--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira

[jira] [Commented] (CASSANDRA-2542) InvalidRequestException

Posted by "Timu Eren (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/CASSANDRA-2542?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13023227#comment-13023227 ] 

Timu Eren commented on CASSANDRA-2542:
--------------------------------------

yes, i see this message but i don't pass any row name in map reduce job, i use column names (subcomparator=UTF8Type) instead SliceRange for SlicePredicate and i still get IRE, so who is passing invalid row names if i don't passed? and same code, same data is works if i use IntegerType for comparator instead LongType. 

more readable code is here: http://pastebin.com/dVYGvy8A

> InvalidRequestException
> -----------------------
>
>                 Key: CASSANDRA-2542
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Core
>    Affects Versions: 0.7.4
>            Reporter: Timu Eren
>            Priority: Minor
>
> i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine
> Here is reprocedure details.
> create column family
>      SuperTest
> with
>      column_type=Super and
>      comparator=LongType and
>      subcomparator=UTF8Type and
>      default_validation_class=UTF8Type;
> list SuperTest;
> Using default limit of 100
> -------------------
> RowKey: 2
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459440853000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459437632000))
> => (super_column=12,
>      (column=1, value=1, timestamp=1303459432652000))
> => (super_column=13,
>      (column=1, value=1, timestamp=1303459435102000))
> -------------------
> RowKey: 1
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459423202000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459419112000))
> => (super_column=12,
>      (column=deneme, value=1, timestamp=1303459362702000))
> => (super_column=13,
>      (column=deneme, value=1, timestamp=1303459382023000))
> => (super_column=15,
>      (column=1, value=1, timestamp=1303459426402000))
> 2 Rows Returned.
> == Test class ==
> import java.io.IOException;
> import java.nio.ByteBuffer;
> import java.util.List;
> import java.util.SortedMap;
> import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
> import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
> import org.apache.cassandra.hadoop.ConfigHelper;
> import org.apache.cassandra.avro.Mutation;
> import org.apache.cassandra.thrift.SliceRange;
> import org.apache.cassandra.thrift.SlicePredicate;
> import org.apache.cassandra.utils.ByteBufferUtil;
> import org.apache.cassandra.db.IColumn;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
> import org.slf4j.Logger;
> import org.slf4j.LoggerFactory;
> class SuperColumnTest extends Configured implements Tool {
>     
>     private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
>     
>     public static void main(String[] args) throws Exception {
>         ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
>         System.exit(0);
>     }
>     
>     public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
>         private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
>         
>         public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
>             
>             logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
>                        
>            
>         }
>     }
>     
>     public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
>         public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
>             
>         }
>     }
>     
>     public int run(String[] args) throws Exception{
>         
>         Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
>         job.setJarByClass(SuperColumnTest.class);
>         job.setMapperClass(SuperColumnTestMapper.class);
>         job.setReducerClass(SuperColumnTestReducer.class);
>         
>         
>         job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
>         ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         
>         job.setMapOutputKeyClass(Text.class);
>         job.setMapOutputValueClass(IntWritable.class); 
>         
>         job.setOutputKeyClass(ByteBuffer.class);
>         job.setOutputValueClass(List.class);        
>         job.setInputFormatClass(ColumnFamilyInputFormat.class);
>         
>         
>         ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
>         ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
>         ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
>         
>       
>         
>         SlicePredicate sp =new SlicePredicate();
>         
>         ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
>         ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
>         SliceRange range = new SliceRange();
>         range.setStart(startKey);
>         range.setFinish(endKey);
>         range.setCount(Integer.MAX_VALUE);
>         
>         sp.setSlice_range(range);
>         
>         ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
>         
>         Long start = System.currentTimeMillis();           
>         job.waitForCompletion(true);
>         logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
>         
>         return 0;
>     }
> }
> java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
> 	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
> 	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
> 	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
> 	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
> 	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
> 	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
> 	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
> 	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
> Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
> 	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
> 	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
> 	... 11 more

--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira

[jira] [Resolved] (CASSANDRA-2542) InvalidRequestException

Posted by "Jonathan Ellis (JIRA)" <ji...@apache.org>.
     [ https://issues.apache.org/jira/browse/CASSANDRA-2542?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Jonathan Ellis resolved CASSANDRA-2542.
---------------------------------------

    Resolution: Invalid

"Expected 8 or 0 byte long"

you are passing a non-Long value to something that expects a Long

> InvalidRequestException
> -----------------------
>
>                 Key: CASSANDRA-2542
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Core
>    Affects Versions: 0.7.4
>            Reporter: Timu Eren
>            Priority: Minor
>
> i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine
> Here is reprocedure details.
> create column family
>      SuperTest
> with
>      column_type=Super and
>      comparator=LongType and
>      subcomparator=UTF8Type and
>      default_validation_class=UTF8Type;
> list SuperTest;
> Using default limit of 100
> -------------------
> RowKey: 2
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459440853000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459437632000))
> => (super_column=12,
>      (column=1, value=1, timestamp=1303459432652000))
> => (super_column=13,
>      (column=1, value=1, timestamp=1303459435102000))
> -------------------
> RowKey: 1
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459423202000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459419112000))
> => (super_column=12,
>      (column=deneme, value=1, timestamp=1303459362702000))
> => (super_column=13,
>      (column=deneme, value=1, timestamp=1303459382023000))
> => (super_column=15,
>      (column=1, value=1, timestamp=1303459426402000))
> 2 Rows Returned.
> == Test class ==
> import java.io.IOException;
> import java.nio.ByteBuffer;
> import java.util.List;
> import java.util.SortedMap;
> import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
> import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
> import org.apache.cassandra.hadoop.ConfigHelper;
> import org.apache.cassandra.avro.Mutation;
> import org.apache.cassandra.thrift.SliceRange;
> import org.apache.cassandra.thrift.SlicePredicate;
> import org.apache.cassandra.utils.ByteBufferUtil;
> import org.apache.cassandra.db.IColumn;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
> import org.slf4j.Logger;
> import org.slf4j.LoggerFactory;
> class SuperColumnTest extends Configured implements Tool {
>     
>     private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
>     
>     public static void main(String[] args) throws Exception {
>         ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
>         System.exit(0);
>     }
>     
>     public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
>         private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
>         
>         public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
>             
>             logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
>                        
>            
>         }
>     }
>     
>     public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
>         public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
>             
>         }
>     }
>     
>     public int run(String[] args) throws Exception{
>         
>         Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
>         job.setJarByClass(SuperColumnTest.class);
>         job.setMapperClass(SuperColumnTestMapper.class);
>         job.setReducerClass(SuperColumnTestReducer.class);
>         
>         
>         job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
>         ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         
>         job.setMapOutputKeyClass(Text.class);
>         job.setMapOutputValueClass(IntWritable.class); 
>         
>         job.setOutputKeyClass(ByteBuffer.class);
>         job.setOutputValueClass(List.class);        
>         job.setInputFormatClass(ColumnFamilyInputFormat.class);
>         
>         
>         ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
>         ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
>         ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
>         
>       
>         
>         SlicePredicate sp =new SlicePredicate();
>         
>         ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
>         ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
>         SliceRange range = new SliceRange();
>         range.setStart(startKey);
>         range.setFinish(endKey);
>         range.setCount(Integer.MAX_VALUE);
>         
>         sp.setSlice_range(range);
>         
>         ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
>         
>         Long start = System.currentTimeMillis();           
>         job.waitForCompletion(true);
>         logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
>         
>         return 0;
>     }
> }
> java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
> 	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
> 	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
> 	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
> 	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
> 	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
> 	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
> 	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
> 	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
> Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
> 	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
> 	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
> 	... 11 more

--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira

[jira] [Commented] (CASSANDRA-2542) InvalidRequestException

Posted by "Timu Eren (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/CASSANDRA-2542?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13023217#comment-13023217 ] 

Timu Eren commented on CASSANDRA-2542:
--------------------------------------

i have absolutely no idea why i get this exception, if i use column names instead SliceRange for SlicePredicate does not change at all

> InvalidRequestException
> -----------------------
>
>                 Key: CASSANDRA-2542
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Core
>    Affects Versions: 0.7.4
>            Reporter: Timu Eren
>            Priority: Minor
>
> i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine
> Here is reprocedure details.
> create column family
>      SuperTest
> with
>      column_type=Super and
>      comparator=LongType and
>      subcomparator=UTF8Type and
>      default_validation_class=UTF8Type;
> list SuperTest;
> Using default limit of 100
> -------------------
> RowKey: 2
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459440853000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459437632000))
> => (super_column=12,
>      (column=1, value=1, timestamp=1303459432652000))
> => (super_column=13,
>      (column=1, value=1, timestamp=1303459435102000))
> -------------------
> RowKey: 1
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459423202000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459419112000))
> => (super_column=12,
>      (column=deneme, value=1, timestamp=1303459362702000))
> => (super_column=13,
>      (column=deneme, value=1, timestamp=1303459382023000))
> => (super_column=15,
>      (column=1, value=1, timestamp=1303459426402000))
> 2 Rows Returned.
> == Test class ==
> import java.io.IOException;
> import java.nio.ByteBuffer;
> import java.util.List;
> import java.util.SortedMap;
> import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
> import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
> import org.apache.cassandra.hadoop.ConfigHelper;
> import org.apache.cassandra.avro.Mutation;
> import org.apache.cassandra.thrift.SliceRange;
> import org.apache.cassandra.thrift.SlicePredicate;
> import org.apache.cassandra.utils.ByteBufferUtil;
> import org.apache.cassandra.db.IColumn;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
> import org.slf4j.Logger;
> import org.slf4j.LoggerFactory;
> class SuperColumnTest extends Configured implements Tool {
>     
>     private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
>     
>     public static void main(String[] args) throws Exception {
>         ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
>         System.exit(0);
>     }
>     
>     public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
>         private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
>         
>         public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
>             
>             logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
>                        
>            
>         }
>     }
>     
>     public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
>         public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
>             
>         }
>     }
>     
>     public int run(String[] args) throws Exception{
>         
>         Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
>         job.setJarByClass(SuperColumnTest.class);
>         job.setMapperClass(SuperColumnTestMapper.class);
>         job.setReducerClass(SuperColumnTestReducer.class);
>         
>         
>         job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
>         ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         
>         job.setMapOutputKeyClass(Text.class);
>         job.setMapOutputValueClass(IntWritable.class); 
>         
>         job.setOutputKeyClass(ByteBuffer.class);
>         job.setOutputValueClass(List.class);        
>         job.setInputFormatClass(ColumnFamilyInputFormat.class);
>         
>         
>         ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
>         ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
>         ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
>         
>       
>         
>         SlicePredicate sp =new SlicePredicate();
>         
>         ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
>         ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
>         SliceRange range = new SliceRange();
>         range.setStart(startKey);
>         range.setFinish(endKey);
>         range.setCount(Integer.MAX_VALUE);
>         
>         sp.setSlice_range(range);
>         
>         ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
>         
>         Long start = System.currentTimeMillis();           
>         job.waitForCompletion(true);
>         logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
>         
>         return 0;
>     }
> }
> java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
> 	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
> 	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
> 	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
> 	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
> 	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
> 	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
> 	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
> 	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
> Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
> 	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
> 	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
> 	... 11 more

--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira

[jira] [Updated] (CASSANDRA-2542) InvalidRequestException

Posted by "Timu Eren (JIRA)" <ji...@apache.org>.
     [ https://issues.apache.org/jira/browse/CASSANDRA-2542?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Timu Eren updated CASSANDRA-2542:
---------------------------------

    Description: 
i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine

Here is reprocedure details.


create column family
     SuperTest
with
     column_type=Super and
     comparator=LongType and
     subcomparator=UTF8Type and
     default_validation_class=UTF8Type;


list SuperTest;

Using default limit of 100
-------------------
RowKey: 2
=> (super_column=10,
     (column=1, value=1, timestamp=1303459440853000))
=> (super_column=11,
     (column=1, value=1, timestamp=1303459437632000))
=> (super_column=12,
     (column=1, value=1, timestamp=1303459432652000))
=> (super_column=13,
     (column=1, value=1, timestamp=1303459435102000))
-------------------
RowKey: 1
=> (super_column=10,
     (column=1, value=1, timestamp=1303459423202000))
=> (super_column=11,
     (column=1, value=1, timestamp=1303459419112000))
=> (super_column=12,
     (column=deneme, value=1, timestamp=1303459362702000))
=> (super_column=13,
     (column=deneme, value=1, timestamp=1303459382023000))
=> (super_column=15,
     (column=1, value=1, timestamp=1303459426402000))

2 Rows Returned.


== Test class ==
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.SortedMap;

import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
import org.apache.cassandra.hadoop.ConfigHelper;
import org.apache.cassandra.avro.Mutation;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.db.IColumn;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


class SuperColumnTest extends Configured implements Tool {
    
    private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
    
    public static void main(String[] args) throws Exception {

        ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
        System.exit(0);
    }
    
    public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
        private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
        
        public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
            
            logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
                       
           
        }
    }
    
    public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            
        }
    }
    
    public int run(String[] args) throws Exception{

        
        Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
        job.setJarByClass(SuperColumnTest.class);
        job.setMapperClass(SuperColumnTestMapper.class);
        job.setReducerClass(SuperColumnTestReducer.class);
        
        
        job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
        ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
        
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class); 
        
        job.setOutputKeyClass(ByteBuffer.class);
        job.setOutputValueClass(List.class);        
        job.setInputFormatClass(ColumnFamilyInputFormat.class);
        
        
        ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
        ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
        ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
        ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
        
      

        
        SlicePredicate sp =new SlicePredicate();
        
        ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
        ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
        SliceRange range = new SliceRange();
        range.setStart(startKey);
        range.setFinish(endKey);
        range.setCount(Integer.MAX_VALUE);
        
        sp.setSlice_range(range);
        
        ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
        
        Long start = System.currentTimeMillis();           
        job.waitForCompletion(true);
        logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
        
        return 0;
    }
}





java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
	... 11 more

  was:
i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine





> InvalidRequestException
> -----------------------
>
>                 Key: CASSANDRA-2542
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Core
>    Affects Versions: 0.7.4
>            Reporter: Timu Eren
>            Priority: Minor
>
> i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine
> Here is reprocedure details.
> create column family
>      SuperTest
> with
>      column_type=Super and
>      comparator=LongType and
>      subcomparator=UTF8Type and
>      default_validation_class=UTF8Type;
> list SuperTest;
> Using default limit of 100
> -------------------
> RowKey: 2
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459440853000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459437632000))
> => (super_column=12,
>      (column=1, value=1, timestamp=1303459432652000))
> => (super_column=13,
>      (column=1, value=1, timestamp=1303459435102000))
> -------------------
> RowKey: 1
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459423202000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459419112000))
> => (super_column=12,
>      (column=deneme, value=1, timestamp=1303459362702000))
> => (super_column=13,
>      (column=deneme, value=1, timestamp=1303459382023000))
> => (super_column=15,
>      (column=1, value=1, timestamp=1303459426402000))
> 2 Rows Returned.
> == Test class ==
> import java.io.IOException;
> import java.nio.ByteBuffer;
> import java.util.List;
> import java.util.SortedMap;
> import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
> import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
> import org.apache.cassandra.hadoop.ConfigHelper;
> import org.apache.cassandra.avro.Mutation;
> import org.apache.cassandra.thrift.SliceRange;
> import org.apache.cassandra.thrift.SlicePredicate;
> import org.apache.cassandra.utils.ByteBufferUtil;
> import org.apache.cassandra.db.IColumn;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
> import org.slf4j.Logger;
> import org.slf4j.LoggerFactory;
> class SuperColumnTest extends Configured implements Tool {
>     
>     private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
>     
>     public static void main(String[] args) throws Exception {
>         ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
>         System.exit(0);
>     }
>     
>     public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
>         private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
>         
>         public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
>             
>             logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
>                        
>            
>         }
>     }
>     
>     public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
>         public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
>             
>         }
>     }
>     
>     public int run(String[] args) throws Exception{
>         
>         Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
>         job.setJarByClass(SuperColumnTest.class);
>         job.setMapperClass(SuperColumnTestMapper.class);
>         job.setReducerClass(SuperColumnTestReducer.class);
>         
>         
>         job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
>         ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         
>         job.setMapOutputKeyClass(Text.class);
>         job.setMapOutputValueClass(IntWritable.class); 
>         
>         job.setOutputKeyClass(ByteBuffer.class);
>         job.setOutputValueClass(List.class);        
>         job.setInputFormatClass(ColumnFamilyInputFormat.class);
>         
>         
>         ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
>         ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
>         ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
>         
>       
>         
>         SlicePredicate sp =new SlicePredicate();
>         
>         ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
>         ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
>         SliceRange range = new SliceRange();
>         range.setStart(startKey);
>         range.setFinish(endKey);
>         range.setCount(Integer.MAX_VALUE);
>         
>         sp.setSlice_range(range);
>         
>         ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
>         
>         Long start = System.currentTimeMillis();           
>         job.waitForCompletion(true);
>         logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
>         
>         return 0;
>     }
> }
> java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
> 	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
> 	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
> 	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
> 	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
> 	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
> 	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
> 	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
> 	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
> Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
> 	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
> 	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
> 	... 11 more

--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira

[jira] [Commented] (CASSANDRA-2542) InvalidRequestException

Posted by "Timu Eren (JIRA)" <ji...@apache.org>.
    [ https://issues.apache.org/jira/browse/CASSANDRA-2542?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13023231#comment-13023231 ] 

Timu Eren commented on CASSANDRA-2542:
--------------------------------------

ok, thats my folly. i confused rowkey with column name, i am so sory for that.

> InvalidRequestException
> -----------------------
>
>                 Key: CASSANDRA-2542
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-2542
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Core
>    Affects Versions: 0.7.4
>            Reporter: Timu Eren
>            Priority: Minor
>
> i got InvalidRequestException when execute map reduce jobs on supercolumn, if i set the comparator as LongType, other types (AsciiType, UTF8Type, IntegerType) works just fine
> Here is reprocedure details.
> create column family
>      SuperTest
> with
>      column_type=Super and
>      comparator=LongType and
>      subcomparator=UTF8Type and
>      default_validation_class=UTF8Type;
> list SuperTest;
> Using default limit of 100
> -------------------
> RowKey: 2
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459440853000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459437632000))
> => (super_column=12,
>      (column=1, value=1, timestamp=1303459432652000))
> => (super_column=13,
>      (column=1, value=1, timestamp=1303459435102000))
> -------------------
> RowKey: 1
> => (super_column=10,
>      (column=1, value=1, timestamp=1303459423202000))
> => (super_column=11,
>      (column=1, value=1, timestamp=1303459419112000))
> => (super_column=12,
>      (column=deneme, value=1, timestamp=1303459362702000))
> => (super_column=13,
>      (column=deneme, value=1, timestamp=1303459382023000))
> => (super_column=15,
>      (column=1, value=1, timestamp=1303459426402000))
> 2 Rows Returned.
> == Test class ==
> import java.io.IOException;
> import java.nio.ByteBuffer;
> import java.util.List;
> import java.util.SortedMap;
> import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
> import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
> import org.apache.cassandra.hadoop.ConfigHelper;
> import org.apache.cassandra.avro.Mutation;
> import org.apache.cassandra.thrift.SliceRange;
> import org.apache.cassandra.thrift.SlicePredicate;
> import org.apache.cassandra.utils.ByteBufferUtil;
> import org.apache.cassandra.db.IColumn;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
> import org.slf4j.Logger;
> import org.slf4j.LoggerFactory;
> class SuperColumnTest extends Configured implements Tool {
>     
>     private static final Logger logger = LoggerFactory.getLogger(SuperColumnTest.class);
>     
>     public static void main(String[] args) throws Exception {
>         ToolRunner.run(new Configuration(), new SuperColumnTest(), args);
>         System.exit(0);
>     }
>     
>     public static class SuperColumnTestMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, IColumn>, Text, IntWritable> {
>         private static final Logger logger = LoggerFactory.getLogger(SuperColumnTestMapper.class);
>         
>         public void map(ByteBuffer key, SortedMap<ByteBuffer, IColumn> columns, Context context) throws IOException, InterruptedException {
>             
>             logger.info(String.format("Key:%s, size: %s", ByteBufferUtil.string(key), columns.size()));
>                        
>            
>         }
>     }
>     
>     public static class SuperColumnTestReducer extends Reducer<Text, IntWritable, ByteBuffer, List<Mutation>> {
>         public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
>             
>         }
>     }
>     
>     public int run(String[] args) throws Exception{
>         
>         Job job = new Job(this.getConf(), "SuperColumnMapReduceTest");
>         job.setJarByClass(SuperColumnTest.class);
>         job.setMapperClass(SuperColumnTestMapper.class);
>         job.setReducerClass(SuperColumnTestReducer.class);
>         
>         
>         job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
>         ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         
>         job.setMapOutputKeyClass(Text.class);
>         job.setMapOutputValueClass(IntWritable.class); 
>         
>         job.setOutputKeyClass(ByteBuffer.class);
>         job.setOutputValueClass(List.class);        
>         job.setInputFormatClass(ColumnFamilyInputFormat.class);
>         
>         
>         ConfigHelper.setInputColumnFamily(job.getConfiguration(), "xxxx", "SuperTest");
>         ConfigHelper.setRpcPort(job.getConfiguration(), "9160");
>         ConfigHelper.setInitialAddress(job.getConfiguration(), "xxxx");
>         ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner");
>         
>       
>         
>         SlicePredicate sp =new SlicePredicate();
>         
>         ByteBuffer startKey = ByteBuffer.wrap("".getBytes());
>         ByteBuffer endKey = ByteBuffer.wrap("".getBytes());
>         SliceRange range = new SliceRange();
>         range.setStart(startKey);
>         range.setFinish(endKey);
>         range.setCount(Integer.MAX_VALUE);
>         
>         sp.setSlice_range(range);
>         
>         ConfigHelper.setInputSlicePredicate(job.getConfiguration(), sp);
>         
>         Long start = System.currentTimeMillis();           
>         job.waitForCompletion(true);
>         logger.info(String.format("Job Finished in " + (System.currentTimeMillis() - start) / 1000.0 + " seconds"));
>         
>         return 0;
>     }
> }
> java.lang.RuntimeException: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:260)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:275)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.computeNext(ColumnFamilyRecordReader.java:172)
> 	at com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:136)
> 	at com.google.common.collect.AbstractIterator.hasNext(AbstractIterator.java:131)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader.nextKeyValue(ColumnFamilyRecordReader.java:131)
> 	at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:423)
> 	at org.apache.hadoop.mapreduce.MapContext.nextKeyValue(MapContext.java:67)
> 	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:143)
> 	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:621)
> 	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:305)
> 	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
> Caused by: InvalidRequestException(why:Expected 8 or 0 byte long (2))
> 	at org.apache.cassandra.thrift.Cassandra$get_range_slices_result.read(Cassandra.java:12088)
> 	at org.apache.cassandra.thrift.Cassandra$Client.recv_get_range_slices(Cassandra.java:732)
> 	at org.apache.cassandra.thrift.Cassandra$Client.get_range_slices(Cassandra.java:704)
> 	at org.apache.cassandra.hadoop.ColumnFamilyRecordReader$RowIterator.maybeInit(ColumnFamilyRecordReader.java:238)
> 	... 11 more

--
This message is automatically generated by JIRA.
For more information on JIRA, see: http://www.atlassian.com/software/jira