You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@drill.apache.org by mufy <mu...@gmail.com> on 2014/11/05 06:44:55 UTC

Difficulty Querying Directories

I did the following to setup the directory structure and then issue the
queries.

[root@n67 logs]# hadoop fs -ls /bla
Found 1 items
drwxr-xr-x - root root 3 2014-11-04 15:12 /bla/blo

[root@n67 logs]# hadoop fs -ls /bla/blo
Found 3 items
drwxr-xr-x - root root 1 2014-11-04 15:12 /bla/blo/inhalt1
drwxr-xr-x - root root 1 2014-11-04 15:12 /bla/blo/inhalt2
-rwxr-xr-x 3 root root 2629 2014-11-04 15:12
/bla/blo/lighthouse_sample.json

[root@n67 logs]# hadoop fs -ls /bla/blo/inhalt1
Found 1 items
-rwxr-xr-x 3 root root 2629 2014-11-04 15:12
/bla/blo/inhalt1/lighthouse_sample.json

[root@n67 logs]# hadoop fs -ls /bla/blo/inhalt2
Found 1 items
-rwxr-xr-x 3 root root 2629 2014-11-04 15:12
/bla/blo/inhalt2/lighthouse_sample.json

And the storage plugin as,

{
"type": "file",
"enabled": true,
"connection": "maprfs:///",
"workspaces": {
"root": {
"location": "/bla",
"writable": false,
"defaultInputFormat": null
},
"tmp": {
"location": "/tmp",
"writable": true,
"defaultInputFormat": null
}
},
"formats": {
"psv": {
"type": "text",
"extensions": [
"tbl"
],
"delimiter": "|"
},
"csv": {
"type": "text",
"extensions": [
"csv"
],
"delimiter": ","
},
"tsv": {
"type": "text",
"extensions": [
"tsv"
],
"delimiter": "\t"
},
"parquet": {
"type": "parquet"
},
"json": {
"type": "json"
},
"maprdb": {
"type": "maprdb"
}
}
}

0: jdbc:drill:zk=n67:5181> use dfs.root;
+------------+------------+
| ok | summary |
+------------+------------+
| true | Default schema changed to 'dfs.root' |
+------------+------------+
1 row selected (0.047 seconds)


0: jdbc:drill:zk=n67:5181> select * from blo;
+------------+------------+------------+
| dir0 | source | data |
+------------+------------+------------+
| null |
{"name":"Lighthouse","version":"1.0","timestamp":"2014-09-11T16:09:17 UTC"}
|
[{"class":"warranties","fields":["id","ends_at","started_at","device_id","company_id","created_at","updated_at","country_code"],"records":[["15","2016-10-09","2013-09-05","60"
|
java.lang.IllegalStateException
at
org.apache.drill.exec.vector.complex.RepeatedMapVector.put(RepeatedMapVector.java:378)
at
org.apache.drill.exec.vector.complex.RepeatedMapVector.load(RepeatedMapVector.java:359)
at
org.apache.drill.exec.record.RecordBatchLoader.load(RecordBatchLoader.java:91)
at org.apache.drill.jdbc.DrillCursor.next(DrillCursor.java:102)
at net.hydromatic.avatica.AvaticaResultSet.next(AvaticaResultSet.java:187)
at sqlline.SqlLine$IncrementalRows.hasNext(SqlLine.java:2503)
at sqlline.SqlLine$TableOutputFormat.print(SqlLine.java:2148)
at sqlline.SqlLine.print(SqlLine.java:1809)
at sqlline.SqlLine$Commands.execute(SqlLine.java:3766)
at sqlline.SqlLine$Commands.sql(SqlLine.java:3663)
at sqlline.SqlLine.dispatch(SqlLine.java:889)
at sqlline.SqlLine.begin(SqlLine.java:763)
at sqlline.SqlLine.start(SqlLine.java:498)
at sqlline.SqlLine.main(SqlLine.java:460)


0: jdbc:drill:zk=n67:5181> select dir0 from blo;
Query failed: Failure while running fragment.

Error: exception while executing query: Failure while executing query.
(state=,code=0)


0: jdbc:drill:zk=n67:5181> select dir0 from dfs.root.`blo`;
Query failed: Failure while running fragment.

Error: exception while executing query: Failure while executing query.
(state=,code=0)

I need to understand why this is failing in my case (what it is that I'm
doing wrong). Please advice.

---
Mufeed Usman
My LinkedIn <http://www.linkedin.com/pub/mufeed-usman/28/254/400> | My
Social Cause <http://www.vision2016.org.in/> | My Blogs : LiveJournal
<http://mufeed.livejournal.com>

Re: Difficulty Querying Directories

Posted by mufy <mu...@gmail.com>.
Nobody :-)?


---
Mufeed Usman
My LinkedIn <http://www.linkedin.com/pub/mufeed-usman/28/254/400> | My
Social Cause <http://www.vision2016.org.in/> | My Blogs : LiveJournal
<http://mufeed.livejournal.com>




On Wed, Nov 5, 2014 at 11:14 AM, mufy <mu...@gmail.com> wrote:

> I did the following to setup the directory structure and then issue the
> queries.
>
> [root@n67 logs]# hadoop fs -ls /bla
> Found 1 items
> drwxr-xr-x - root root 3 2014-11-04 15:12 /bla/blo
>
> [root@n67 logs]# hadoop fs -ls /bla/blo
> Found 3 items
> drwxr-xr-x - root root 1 2014-11-04 15:12 /bla/blo/inhalt1
> drwxr-xr-x - root root 1 2014-11-04 15:12 /bla/blo/inhalt2
> -rwxr-xr-x 3 root root 2629 2014-11-04 15:12
> /bla/blo/lighthouse_sample.json
>
> [root@n67 logs]# hadoop fs -ls /bla/blo/inhalt1
> Found 1 items
> -rwxr-xr-x 3 root root 2629 2014-11-04 15:12
> /bla/blo/inhalt1/lighthouse_sample.json
>
> [root@n67 logs]# hadoop fs -ls /bla/blo/inhalt2
> Found 1 items
> -rwxr-xr-x 3 root root 2629 2014-11-04 15:12
> /bla/blo/inhalt2/lighthouse_sample.json
>
> And the storage plugin as,
>
> {
> "type": "file",
> "enabled": true,
> "connection": "maprfs:///",
> "workspaces": {
> "root": {
> "location": "/bla",
> "writable": false,
> "defaultInputFormat": null
> },
> "tmp": {
> "location": "/tmp",
> "writable": true,
> "defaultInputFormat": null
> }
> },
> "formats": {
> "psv": {
> "type": "text",
> "extensions": [
> "tbl"
> ],
> "delimiter": "|"
> },
> "csv": {
> "type": "text",
> "extensions": [
> "csv"
> ],
> "delimiter": ","
> },
> "tsv": {
> "type": "text",
> "extensions": [
> "tsv"
> ],
> "delimiter": "\t"
> },
> "parquet": {
> "type": "parquet"
> },
> "json": {
> "type": "json"
> },
> "maprdb": {
> "type": "maprdb"
> }
> }
> }
>
> 0: jdbc:drill:zk=n67:5181> use dfs.root;
> +------------+------------+
> | ok | summary |
> +------------+------------+
> | true | Default schema changed to 'dfs.root' |
> +------------+------------+
> 1 row selected (0.047 seconds)
>
>
> 0: jdbc:drill:zk=n67:5181> select * from blo;
> +------------+------------+------------+
> | dir0 | source | data |
> +------------+------------+------------+
> | null |
> {"name":"Lighthouse","version":"1.0","timestamp":"2014-09-11T16:09:17 UTC"}
> |
> [{"class":"warranties","fields":["id","ends_at","started_at","device_id","company_id","created_at","updated_at","country_code"],"records":[["15","2016-10-09","2013-09-05","60"
> |
> java.lang.IllegalStateException
> at
> org.apache.drill.exec.vector.complex.RepeatedMapVector.put(RepeatedMapVector.java:378)
> at
> org.apache.drill.exec.vector.complex.RepeatedMapVector.load(RepeatedMapVector.java:359)
> at
> org.apache.drill.exec.record.RecordBatchLoader.load(RecordBatchLoader.java:91)
> at org.apache.drill.jdbc.DrillCursor.next(DrillCursor.java:102)
> at net.hydromatic.avatica.AvaticaResultSet.next(AvaticaResultSet.java:187)
> at sqlline.SqlLine$IncrementalRows.hasNext(SqlLine.java:2503)
> at sqlline.SqlLine$TableOutputFormat.print(SqlLine.java:2148)
> at sqlline.SqlLine.print(SqlLine.java:1809)
> at sqlline.SqlLine$Commands.execute(SqlLine.java:3766)
> at sqlline.SqlLine$Commands.sql(SqlLine.java:3663)
> at sqlline.SqlLine.dispatch(SqlLine.java:889)
> at sqlline.SqlLine.begin(SqlLine.java:763)
> at sqlline.SqlLine.start(SqlLine.java:498)
> at sqlline.SqlLine.main(SqlLine.java:460)
>
>
> 0: jdbc:drill:zk=n67:5181> select dir0 from blo;
> Query failed: Failure while running fragment.
>
> Error: exception while executing query: Failure while executing query.
> (state=,code=0)
>
>
> 0: jdbc:drill:zk=n67:5181> select dir0 from dfs.root.`blo`;
> Query failed: Failure while running fragment.
>
> Error: exception while executing query: Failure while executing query.
> (state=,code=0)
>
> I need to understand why this is failing in my case (what it is that I'm
> doing wrong). Please advice.
>
> ---
> Mufeed Usman
> My LinkedIn <http://www.linkedin.com/pub/mufeed-usman/28/254/400> | My
> Social Cause <http://www.vision2016.org.in/> | My Blogs : LiveJournal
> <http://mufeed.livejournal.com>
>
>
>
>