You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@phoenix.apache.org by "sunfl@certusnet.com.cn" <su...@certusnet.com.cn> on 2015/01/04 06:31:05 UTC

Mapreduce job exception when using Apache Spark to query phoenix tables

Hi,all
When trying to complete the mapreduce job over phoenix table using Apache Spark, we got the following error. Guess that it is caused by hbase client scanner timeout exeption? 
Do we need to configure something for hbase-site.xml?Thanks for applying any available advice.

By the way, anyone could explain to me about the current mapreduce framework applied by PHOENIX-1454?Noting that the design aims to transform queryplan to hbase scan instead of 
operating hfiles? Cause we are researching about using HBaseScan and HBaseSnapshotScan with Apache Spark to read hbase tables more efficiently. And these approaches are actually reading from hfiles.

Anyone intrested in designing and implementing about the snapshotscan for phoenix table?Or are there any previous discussions about these kinds of threads?

Thanks,
Sun.

14/12/31 16:59:46 ERROR Executor: Exception in task 17.0 in stage 137.0 (TID 3356) 
java.lang.RuntimeException: org.apache.phoenix.exception.PhoenixIOException: 61234ms passed since the last invocation, timeout is currently set to 60000 
at com.google.common.base.Throwables.propagate(Throwables.java:160) 
at org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:138) 
at org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:145) 
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39) 
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327) 
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327) 
at org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:156) 
at org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151) 
at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601) 
at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601) 
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) 
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263) 
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230) 
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) 
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263) 
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230) 
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68) 
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) 
at org.apache.spark.scheduler.Task.run(Task.scala:56) 
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196) 
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) 
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) 
at java.lang.Thread.run(Thread.java:744) 
Caused by: org.apache.phoenix.exception.PhoenixIOException: 61234ms passed since the last invocation, timeout is currently set to 60000 
at org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:108)
at org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:52) 
at org.apache.phoenix.iterate.TableResultIterator.next(TableResultIterator.java:104) 
at org.apache.phoenix.iterate.LookAheadResultIterator$1.advance(LookAheadResultIterator.java:47) 
at org.apache.phoenix.iterate.LookAheadResultIterator.init(LookAheadResultIterator.java:59) 
at org.apache.phoenix.iterate.LookAheadResultIterator.peek(LookAheadResultIterator.java:73) 
at org.apache.phoenix.iterate.ConcatResultIterator.currentIterator(ConcatResultIterator.java:100) 
at org.apache.phoenix.iterate.ConcatResultIterator.next(ConcatResultIterator.java:117) 
at org.apache.phoenix.jdbc.PhoenixResultSet.next(PhoenixResultSet.java:734) 
at org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:131) 
... 21 more 
Caused by: org.apache.hadoop.hbase.client.ScannerTimeoutException: 61234ms passed since the last invocation, timeout is currently set to 60000 
at org.apache.hadoop.hbase.client.ClientScanner.next(ClientScanner.java:371) 
at org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:47) 
... 29 more 
Caused by: org.apache.hadoop.hbase.UnknownScannerException: org.apache.hadoop.hbase.UnknownScannerException: Name: 7571, already closed? 
at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3145) 
at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29587) 
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2031) 
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108) 
at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) 
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) 
at java.lang.Thread.run(Thread.java:722) 

at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) 
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) 
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) 
at java.lang.reflect.Constructor.newInstance(Constructor.java:526) 
at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) 
at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95) 
at org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:304) 
at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:204) 
at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59) 
at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114) 
at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)




CertusNet 


Re: Mapreduce job exception when using Apache Spark to query phoenix tables

Posted by Ravi Kiran <ma...@gmail.com>.
Hi ,

   You can read more about the Map Reduce integration here
http://phoenix.apache.org/phoenix_mr.html.
   A quick simple spark program can be found at
https://gist.github.com/mravi/444afe7f49821819c987.

   Regarding Snapshot support, their is a jira
https://issues.apache.org/jira/browse/PHOENIX-1550

Regards
Ravi

On Sun, Jan 4, 2015 at 11:02 AM, Ted Yu <yu...@gmail.com> wrote:

> See http://hbase.apache.org/book.html#trouble.client.scantimeout
>
> FYI
>
> On Sat, Jan 3, 2015 at 9:31 PM, sunfl@certusnet.com.cn <
> sunfl@certusnet.com.cn> wrote:
>
> > Hi,all
> > When trying to complete the mapreduce job over phoenix table using Apache
> > Spark, we got the following error. Guess that it is caused by hbase
> client
> > scanner timeout exeption?
> > Do we need to configure something for hbase-site.xml?Thanks for applying
> > any available advice.
> >
> > By the way, anyone could explain to me about the current mapreduce
> > framework applied by PHOENIX-1454?Noting that the design aims to
> transform
> > queryplan to hbase scan instead of
> > operating hfiles? Cause we are researching about using HBaseScan and
> > HBaseSnapshotScan with Apache Spark to read hbase tables more
> efficiently.
> > And these approaches are actually reading from hfiles.
> >
> > Anyone intrested in designing and implementing about the snapshotscan for
> > phoenix table?Or are there any previous discussions about these kinds of
> > threads?
> >
> > Thanks,
> > Sun.
> >
> > 14/12/31 16:59:46 ERROR Executor: Exception in task 17.0 in stage 137.0
> > (TID 3356)
> > java.lang.RuntimeException:
> > org.apache.phoenix.exception.PhoenixIOException: 61234ms passed since the
> > last invocation, timeout is currently set to 60000
> > at com.google.common.base.Throwables.propagate(Throwables.java:160)
> > at
> >
> org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:138)
> > at
> > org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:145)
> > at
> >
> org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
> > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> > at
> >
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:156)
> > at
> >
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151)
> > at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
> > at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
> > at
> >
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
> > at
> >
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> > at org.apache.spark.scheduler.Task.run(Task.scala:56)
> > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
> > at
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> > at
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> > at java.lang.Thread.run(Thread.java:744)
> > Caused by: org.apache.phoenix.exception.PhoenixIOException: 61234ms
> passed
> > since the last invocation, timeout is currently set to 60000
> > at
> >
> org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:108)
> > at
> >
> org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:52)
> > at
> >
> org.apache.phoenix.iterate.TableResultIterator.next(TableResultIterator.java:104)
> > at
> >
> org.apache.phoenix.iterate.LookAheadResultIterator$1.advance(LookAheadResultIterator.java:47)
> > at
> >
> org.apache.phoenix.iterate.LookAheadResultIterator.init(LookAheadResultIterator.java:59)
> > at
> >
> org.apache.phoenix.iterate.LookAheadResultIterator.peek(LookAheadResultIterator.java:73)
> > at
> >
> org.apache.phoenix.iterate.ConcatResultIterator.currentIterator(ConcatResultIterator.java:100)
> > at
> >
> org.apache.phoenix.iterate.ConcatResultIterator.next(ConcatResultIterator.java:117)
> > at
> org.apache.phoenix.jdbc.PhoenixResultSet.next(PhoenixResultSet.java:734)
> > at
> >
> org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:131)
> > ... 21 more
> > Caused by: org.apache.hadoop.hbase.client.ScannerTimeoutException:
> 61234ms
> > passed since the last invocation, timeout is currently set to 60000
> > at
> > org.apache.hadoop.hbase.client.ClientScanner.next(ClientScanner.java:371)
> > at
> >
> org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:47)
> > ... 29 more
> > Caused by: org.apache.hadoop.hbase.UnknownScannerException:
> > org.apache.hadoop.hbase.UnknownScannerException: Name: 7571, already
> closed?
> > at
> >
> org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3145)
> > at
> >
> org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29587)
> > at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2031)
> > at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
> > at
> >
> org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
> > at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
> > at java.lang.Thread.run(Thread.java:722)
> >
> > at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
> > at
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
> > at
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
> > at
> >
> org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
> > at
> >
> org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95)
> > at
> >
> org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:304)
> > at
> >
> org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:204)
> > at
> >
> org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59)
> > at
> >
> org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
> > at
> >
> org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)
> >
> >
> >
> >
> > CertusNet
> >
> >
>

Re: Mapreduce job exception when using Apache Spark to query phoenix tables

Posted by Ravi Kiran <ma...@gmail.com>.
Hi ,

   You can read more about the Map Reduce integration here
http://phoenix.apache.org/phoenix_mr.html.
   A quick simple spark program can be found at
https://gist.github.com/mravi/444afe7f49821819c987.

   Regarding Snapshot support, their is a jira
https://issues.apache.org/jira/browse/PHOENIX-1550

Regards
Ravi

On Sun, Jan 4, 2015 at 11:02 AM, Ted Yu <yu...@gmail.com> wrote:

> See http://hbase.apache.org/book.html#trouble.client.scantimeout
>
> FYI
>
> On Sat, Jan 3, 2015 at 9:31 PM, sunfl@certusnet.com.cn <
> sunfl@certusnet.com.cn> wrote:
>
> > Hi,all
> > When trying to complete the mapreduce job over phoenix table using Apache
> > Spark, we got the following error. Guess that it is caused by hbase
> client
> > scanner timeout exeption?
> > Do we need to configure something for hbase-site.xml?Thanks for applying
> > any available advice.
> >
> > By the way, anyone could explain to me about the current mapreduce
> > framework applied by PHOENIX-1454?Noting that the design aims to
> transform
> > queryplan to hbase scan instead of
> > operating hfiles? Cause we are researching about using HBaseScan and
> > HBaseSnapshotScan with Apache Spark to read hbase tables more
> efficiently.
> > And these approaches are actually reading from hfiles.
> >
> > Anyone intrested in designing and implementing about the snapshotscan for
> > phoenix table?Or are there any previous discussions about these kinds of
> > threads?
> >
> > Thanks,
> > Sun.
> >
> > 14/12/31 16:59:46 ERROR Executor: Exception in task 17.0 in stage 137.0
> > (TID 3356)
> > java.lang.RuntimeException:
> > org.apache.phoenix.exception.PhoenixIOException: 61234ms passed since the
> > last invocation, timeout is currently set to 60000
> > at com.google.common.base.Throwables.propagate(Throwables.java:160)
> > at
> >
> org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:138)
> > at
> > org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:145)
> > at
> >
> org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
> > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> > at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> > at
> >
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:156)
> > at
> >
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151)
> > at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
> > at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
> > at
> >
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
> > at
> >
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> > at org.apache.spark.scheduler.Task.run(Task.scala:56)
> > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
> > at
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> > at
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> > at java.lang.Thread.run(Thread.java:744)
> > Caused by: org.apache.phoenix.exception.PhoenixIOException: 61234ms
> passed
> > since the last invocation, timeout is currently set to 60000
> > at
> >
> org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:108)
> > at
> >
> org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:52)
> > at
> >
> org.apache.phoenix.iterate.TableResultIterator.next(TableResultIterator.java:104)
> > at
> >
> org.apache.phoenix.iterate.LookAheadResultIterator$1.advance(LookAheadResultIterator.java:47)
> > at
> >
> org.apache.phoenix.iterate.LookAheadResultIterator.init(LookAheadResultIterator.java:59)
> > at
> >
> org.apache.phoenix.iterate.LookAheadResultIterator.peek(LookAheadResultIterator.java:73)
> > at
> >
> org.apache.phoenix.iterate.ConcatResultIterator.currentIterator(ConcatResultIterator.java:100)
> > at
> >
> org.apache.phoenix.iterate.ConcatResultIterator.next(ConcatResultIterator.java:117)
> > at
> org.apache.phoenix.jdbc.PhoenixResultSet.next(PhoenixResultSet.java:734)
> > at
> >
> org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:131)
> > ... 21 more
> > Caused by: org.apache.hadoop.hbase.client.ScannerTimeoutException:
> 61234ms
> > passed since the last invocation, timeout is currently set to 60000
> > at
> > org.apache.hadoop.hbase.client.ClientScanner.next(ClientScanner.java:371)
> > at
> >
> org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:47)
> > ... 29 more
> > Caused by: org.apache.hadoop.hbase.UnknownScannerException:
> > org.apache.hadoop.hbase.UnknownScannerException: Name: 7571, already
> closed?
> > at
> >
> org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3145)
> > at
> >
> org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29587)
> > at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2031)
> > at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
> > at
> >
> org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
> > at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
> > at java.lang.Thread.run(Thread.java:722)
> >
> > at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
> > at
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
> > at
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
> > at
> >
> org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
> > at
> >
> org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95)
> > at
> >
> org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:304)
> > at
> >
> org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:204)
> > at
> >
> org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59)
> > at
> >
> org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
> > at
> >
> org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)
> >
> >
> >
> >
> > CertusNet
> >
> >
>

Re: Mapreduce job exception when using Apache Spark to query phoenix tables

Posted by Ted Yu <yu...@gmail.com>.
See http://hbase.apache.org/book.html#trouble.client.scantimeout

FYI

On Sat, Jan 3, 2015 at 9:31 PM, sunfl@certusnet.com.cn <
sunfl@certusnet.com.cn> wrote:

> Hi,all
> When trying to complete the mapreduce job over phoenix table using Apache
> Spark, we got the following error. Guess that it is caused by hbase client
> scanner timeout exeption?
> Do we need to configure something for hbase-site.xml?Thanks for applying
> any available advice.
>
> By the way, anyone could explain to me about the current mapreduce
> framework applied by PHOENIX-1454?Noting that the design aims to transform
> queryplan to hbase scan instead of
> operating hfiles? Cause we are researching about using HBaseScan and
> HBaseSnapshotScan with Apache Spark to read hbase tables more efficiently.
> And these approaches are actually reading from hfiles.
>
> Anyone intrested in designing and implementing about the snapshotscan for
> phoenix table?Or are there any previous discussions about these kinds of
> threads?
>
> Thanks,
> Sun.
>
> 14/12/31 16:59:46 ERROR Executor: Exception in task 17.0 in stage 137.0
> (TID 3356)
> java.lang.RuntimeException:
> org.apache.phoenix.exception.PhoenixIOException: 61234ms passed since the
> last invocation, timeout is currently set to 60000
> at com.google.common.base.Throwables.propagate(Throwables.java:160)
> at
> org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:138)
> at
> org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:145)
> at
> org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> at
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:156)
> at
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151)
> at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
> at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> at org.apache.spark.scheduler.Task.run(Task.scala:56)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:744)
> Caused by: org.apache.phoenix.exception.PhoenixIOException: 61234ms passed
> since the last invocation, timeout is currently set to 60000
> at
> org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:108)
> at
> org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:52)
> at
> org.apache.phoenix.iterate.TableResultIterator.next(TableResultIterator.java:104)
> at
> org.apache.phoenix.iterate.LookAheadResultIterator$1.advance(LookAheadResultIterator.java:47)
> at
> org.apache.phoenix.iterate.LookAheadResultIterator.init(LookAheadResultIterator.java:59)
> at
> org.apache.phoenix.iterate.LookAheadResultIterator.peek(LookAheadResultIterator.java:73)
> at
> org.apache.phoenix.iterate.ConcatResultIterator.currentIterator(ConcatResultIterator.java:100)
> at
> org.apache.phoenix.iterate.ConcatResultIterator.next(ConcatResultIterator.java:117)
> at org.apache.phoenix.jdbc.PhoenixResultSet.next(PhoenixResultSet.java:734)
> at
> org.apache.phoenix.pig.hadoop.PhoenixRecordReader.nextKeyValue(PhoenixRecordReader.java:131)
> ... 21 more
> Caused by: org.apache.hadoop.hbase.client.ScannerTimeoutException: 61234ms
> passed since the last invocation, timeout is currently set to 60000
> at
> org.apache.hadoop.hbase.client.ClientScanner.next(ClientScanner.java:371)
> at
> org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:47)
> ... 29 more
> Caused by: org.apache.hadoop.hbase.UnknownScannerException:
> org.apache.hadoop.hbase.UnknownScannerException: Name: 7571, already closed?
> at
> org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3145)
> at
> org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29587)
> at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2031)
> at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
> at
> org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
> at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
> at java.lang.Thread.run(Thread.java:722)
>
> at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
> at
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
> at
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
> at
> org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
> at
> org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95)
> at
> org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:304)
> at
> org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:204)
> at
> org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59)
> at
> org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
> at
> org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)
>
>
>
>
> CertusNet
>
>