You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@spark.apache.org by "Lau, Kawing (GE Global Research)" <ka...@ge.com> on 2015/03/13 00:45:41 UTC

Error running rdd.first on hadoop

Hi 

I was running with spark-1.3.0-snapshot
rdd = sc.textFile("hdfs://X.X.X.X/data")
rdd.first() 


Then I got this error:
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "/pyspark/rdd.py", line 1243, in first
    rs = self.take(1)
  File "/pyspark/rdd.py", line 1195, in take
    totalParts = self._jrdd.partitions().size()
  File "/spark/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py", line
538, in __call__
  File "/spark/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py", line
300, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling
o28.partitions.
: java.net.ConnectException: Call From etl1.gphd.local/X.X.X.X to
etl1.gphd.local:8020 failed on connection exception:
java.net.ConnectException: Connection refused; For more details see:
http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at 
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAcce
ssorImpl.java:57)
	at 
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstru
ctorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:783)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:730)
	at org.apache.hadoop.ipc.Client.call(Client.java:1351)
	at org.apache.hadoop.ipc.Client.call(Client.java:1300)
	at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.ja
va:206)
	at com.sun.proxy.$Proxy13.getDelegationToken(Unknown Source)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:5
7)
	at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImp
l.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocat
ionHandler.java:186)
	at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHan
dler.java:102)
	at com.sun.proxy.$Proxy13.getDelegationToken(Unknown Source)
	at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getDel
egationToken(ClientNamenodeProtocolTranslatorPB.java:805)
	at org.apache.hadoop.hdfs.DFSClient.getDelegationToken(DFSClient.java:847)
	at 
org.apache.hadoop.hdfs.DistributedFileSystem.getDelegationToken(Distributed
FileSystem.java:1318)
	at 
org.apache.hadoop.fs.FileSystem.collectDelegationTokens(FileSystem.java:526
)
	at 
org.apache.hadoop.fs.FileSystem.addDelegationTokens(FileSystem.java:504)
	at 
org.apache.hadoop.mapreduce.security.TokenCache.obtainTokensForNamenodesInt
ernal(TokenCache.java:121)
	at 
org.apache.hadoop.mapreduce.security.TokenCache.obtainTokensForNamenodesInt
ernal(TokenCache.java:100)
	at 
org.apache.hadoop.mapreduce.security.TokenCache.obtainTokensForNamenodes(To
kenCache.java:80)
	at 
org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:20
2)
	at 
org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:270
)
	at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:203)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
	at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:
32)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
	at 
org.apache.spark.api.java.JavaRDDLike$class.partitions(JavaRDDLike.scala:56
)
	at org.apache.spark.api.java.JavaRDD.partitions(JavaRDD.scala:32)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:5
7)
	at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImp
l.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)
	at py4j.Gateway.invoke(Gateway.java:259)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.GatewayConnection.run(GatewayConnection.java:207)
	at java.lang.Thread.run(Thread.java:744)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:735)
	at 
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:
206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:529)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:493)
	at 
org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:547)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:642)
	at org.apache.hadoop.ipc.Client$Connection.access$2600(Client.java:314)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1399)
	at org.apache.hadoop.ipc.Client.call(Client.java:1318)
	... 43 more

Anyone has an idea how to solve this?

Sherry


---------------------------------------------------------------------
To unsubscribe, e-mail: user-unsubscribe@spark.apache.org
For additional commands, e-mail: user-help@spark.apache.org


Re: Error running rdd.first on hadoop

Posted by Akhil Das <ak...@sigmoidanalytics.com>.
Make sure your hadoop is running on port 8020, you can check it in your
core-site.xml file and use that URI like:

sc.textFile("hdfs://myhost:myport/data")


Thanks
Best Regards

On Fri, Mar 13, 2015 at 5:15 AM, Lau, Kawing (GE Global Research) <
kawing.lau@ge.com> wrote:

> Hi
>
> I was running with spark-1.3.0-snapshot
> rdd = sc.textFile("hdfs://X.X.X.X/data")
> rdd.first()
>
>
> Then I got this error:
> Traceback (most recent call last):
>   File "<stdin>", line 1, in <module>
>   File "/pyspark/rdd.py", line 1243, in first
>     rs = self.take(1)
>   File "/pyspark/rdd.py", line 1195, in take
>     totalParts = self._jrdd.partitions().size()
>   File "/spark/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py", line
> 538, in __call__
>   File "/spark/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py", line
> 300, in get_return_value
> py4j.protocol.Py4JJavaError: An error occurred while calling
> o28.partitions.
> : java.net.ConnectException: Call From etl1.gphd.local/X.X.X.X to
> etl1.gphd.local:8020 failed on connection exception:
> java.net.ConnectException: Connection refused; For more details see:
> http://wiki.apache.org/hadoop/ConnectionRefused
>         at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> Method)
>         at
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAcce
> ssorImpl.java:57)
>         at
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstru
> ctorAccessorImpl.java:45)
>         at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
>         at
> org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:783)
>         at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:730)
>         at org.apache.hadoop.ipc.Client.call(Client.java:1351)
>         at org.apache.hadoop.ipc.Client.call(Client.java:1300)
>         at
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.ja
> va:206)
>         at com.sun.proxy.$Proxy13.getDelegationToken(Unknown Source)
>         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>         at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:5
> 7)
>         at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImp
> l.java:43)
>         at java.lang.reflect.Method.invoke(Method.java:606)
>         at
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocat
> ionHandler.java:186)
>         at
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHan
> dler.java:102)
>         at com.sun.proxy.$Proxy13.getDelegationToken(Unknown Source)
>         at
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getDel
> egationToken(ClientNamenodeProtocolTranslatorPB.java:805)
>         at
> org.apache.hadoop.hdfs.DFSClient.getDelegationToken(DFSClient.java:847)
>         at
> org.apache.hadoop.hdfs.DistributedFileSystem.getDelegationToken(Distributed
> FileSystem.java:1318)
>         at
> org.apache.hadoop.fs.FileSystem.collectDelegationTokens(FileSystem.java:526
> )
>         at
> org.apache.hadoop.fs.FileSystem.addDelegationTokens(FileSystem.java:504)
>         at
> org.apache.hadoop.mapreduce.security.TokenCache.obtainTokensForNamenodesInt
> ernal(TokenCache.java:121)
>         at
> org.apache.hadoop.mapreduce.security.TokenCache.obtainTokensForNamenodesInt
> ernal(TokenCache.java:100)
>         at
> org.apache.hadoop.mapreduce.security.TokenCache.obtainTokensForNamenodes(To
> kenCache.java:80)
>         at
> org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:20
> 2)
>         at
> org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:270
> )
>         at
> org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:203)
>         at
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
>         at
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
>         at scala.Option.getOrElse(Option.scala:120)
>         at org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
>         at
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:
> 32)
>         at
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
>         at
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
>         at scala.Option.getOrElse(Option.scala:120)
>         at org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
>         at
> org.apache.spark.api.java.JavaRDDLike$class.partitions(JavaRDDLike.scala:56
> )
>         at org.apache.spark.api.java.JavaRDD.partitions(JavaRDD.scala:32)
>         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>         at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:5
> 7)
>         at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImp
> l.java:43)
>         at java.lang.reflect.Method.invoke(Method.java:606)
>         at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
>         at
> py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)
>         at py4j.Gateway.invoke(Gateway.java:259)
>         at
> py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
>         at py4j.commands.CallCommand.execute(CallCommand.java:79)
>         at py4j.GatewayConnection.run(GatewayConnection.java:207)
>         at java.lang.Thread.run(Thread.java:744)
> Caused by: java.net.ConnectException: Connection refused
>         at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
>         at
> sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:735)
>         at
> org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:
> 206)
>         at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:529)
>         at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:493)
>         at
> org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:547)
>         at
> org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:642)
>         at
> org.apache.hadoop.ipc.Client$Connection.access$2600(Client.java:314)
>         at org.apache.hadoop.ipc.Client.getConnection(Client.java:1399)
>         at org.apache.hadoop.ipc.Client.call(Client.java:1318)
>         ... 43 more
>
> Anyone has an idea how to solve this?
>
> Sherry
>
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: user-unsubscribe@spark.apache.org
> For additional commands, e-mail: user-help@spark.apache.org
>
>