You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@phoenix.apache.org by "Cox,Lisa" <co...@oclc.org> on 2014/07/09 15:40:13 UTC

Unable to insert after creating secondary index

Hi,

I am new to phoenix and i was just trying some simple things to familiarize myself.  I was able to create a table and insert into it.  I then tried to create a secondary index on the table and from there was unable to insert.

i am running:
hbase-0.94.15-cdh4.6.0
hadoop-2.0.0-cdh4.6.0

i have a very small setup with only 1 region server.
I ended up downloading the source and recompiling with the hadoop-2 profile
I edited the example/STOCK_SYMBOL.sql script to add a statement to create a secondary index.. below is the stack trace... the client just hangs and never gets a command prompt and i have to just kill it.  I have tried this over and over again, also with the example table us_population adding an index on the population field.

Any advice?
(hbase-site.xml at the bottom)

Thanks much,
Lisa Cox
>sqlline.py finddev07.dev.oclc.org:18661:/hbase ../examples/STOCK_SYMBOL.sql
Setting property: [isolation, TRANSACTION_READ_COMMITTED]
Setting property: [run, ../examples/STOCK_SYMBOL.sql]
issuing: !connect jdbc:phoenix:finddev07.dev.oclc.org:18661:/hbase none none org.apache.phoenix.jdbc.Phoenix
Driver
Connecting to jdbc:phoenix:finddev07.dev.oclc.org:18661:/hbase
14/07/09 09:08:29 WARN conf.Configuration: dfs.df.interval is deprecated. Instead, use fs.df.interval
14/07/09 09:08:29 WARN conf.Configuration: hadoop.native.lib is deprecated. Instead, use io.native.lib.available
14/07/09 09:08:29 WARN conf.Configuration: fs.default.name is deprecated. Instead, use fs.defaultFS
14/07/09 09:08:29 WARN conf.Configuration: topology.script.number.args is deprecated. Instead, use net.topology.sc
ript.number.args
14/07/09 09:08:29 WARN conf.Configuration: dfs.umaskmode is deprecated. Instead, use fs.permissions.umask-mode
14/07/09 09:08:29 WARN conf.Configuration: topology.node.switch.mapping.impl is deprecated. Instead, use net.topol
ogy.node.switch.mapping.impl
14/07/09 09:08:30 WARN conf.Configuration: fs.default.name is deprecated. Instead, use fs.defaultFS
14/07/09 09:08:31 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using buil
tin-java classes where applicable
14/07/09 09:08:31 WARN conf.Configuration: fs.default.name is deprecated. Instead, use fs.defaultFS
Connected to: Phoenix (version 3.0)
Driver: org.apache.phoenix.jdbc.PhoenixDriver (version 3.0)
Autocommit status: true
Transaction isolation: TRANSACTION_READ_COMMITTED
Building list of tables and columns for tab-completion (set fastconnect to true to skip)...
13/? (??%)                                                                                                    26/? (??%)                                                                                                    52/?53/53 (100%) Done

-- creates stock table with single row
CREATE TABLE IF NOT EXISTS STOCK_SYMBOL (SYMBOL VARCHAR NOT NULL PRIMARY KEY, COMPANY VARCHAR);
14/07/09 09:08:32 WARN conf.Configuration: fs.default.name is deprecated. Instead, use fs.defaultFS
No rows affected (1.414 seconds)
2/6          CREATE INDEX IF NOT EXISTS STOCK_CMPY ON STOCK_SYMBOL(COMPANY);
14/07/09 09:08:33 WARN conf.Configuration: fs.default.name is deprecated. Instead, use fs.defaultFS
No rows affected (1.535 seconds)
3/6          UPSERT INTO STOCK_SYMBOL VALUES ('CRM','SalesForce.com');
14/07/09 09:08:35 WARN client.HConnectionManager$HConnectionImplementation: Failed all from region=STOCK_SYMBOL,,1
404911312453.2c277d79150fb136786d5cc88819d22f., hostname=finddev07.dev.oclc.org, port=18659
java.util.concurrent.ExecutionException: java.io.IOException: java.io.IOException: java.lang.NullPointerException
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
        at java.util.concurrent.FutureTask.get(FutureTask.java:188)
        at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatchCallback(HConne
ctionManager.java:1708)
        at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatch(HConnectionMan
ager.java:1560)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:816)
        at org.apache.phoenix.execute.MutationState.commit(MutationState.java:393)
        at org.apache.phoenix.jdbc.PhoenixConnection.commit(PhoenixConnection.java:351)
        at org.apache.phoenix.jdbc.PhoenixStatement.executeMutation(PhoenixStatement.java:229)
        at org.apache.phoenix.jdbc.PhoenixStatement.execute(PhoenixStatement.java:919)
        at sqlline.SqlLine$Commands.execute(SqlLine.java:3673)
        at sqlline.SqlLine$Commands.sql(SqlLine.java:3584)
        at sqlline.SqlLine.dispatch(SqlLine.java:821)

Caused by: java.io.IOException: java.io.IOException: java.lang.NullPointerException

        at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
        at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
        at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
        at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
        at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:90)
        at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:79)
        at org.apache.hadoop.hbase.client.ServerCallable.translateException(ServerCallable.java:256)
        at org.apache.hadoop.hbase.client.ServerCallable.withoutRetries(ServerCallable.java:231)
        at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation$3.call(HConnectionManager.j
ava:1544)
        at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation$3.call(HConnectionManager.j
ava:1532)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:744)
Caused by: org.apache.hadoop.ipc.RemoteException(java.io.IOException): java.io.IOException: java.lang.NullPointerE
xception
      ... 6 more
14/07/09 09:08:36 WARN client.HConnectionManager$HConnectionImplementation: Failed all from region=STOCK_SYMBOL,,1
404911312453.2c277d79150fb136786d5cc88819d22f., hostname=finddev07.dev.oclc.org, port=18659
java.util.concurrent.ExecutionException: java.io.IOException: java.io.IOException: java.lang.NullPointerException

        at java.util.concurrent.FutureTask.report(FutureTask.java:122)
        at java.util.concu

hbase-site.xml
<!--
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://finddev07.dev.oclc.org:18658/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.master.startup.retainassign</name>
<value>false</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>finddev07.dev.oclc.org</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/data/lcox/higgins/RB_4.6.0.0/zookeeper</value>
</property>
<property>
<name>hbase.regionserver.port</name>
<value>18659</value>
</property>
<property>
<name>hbase.master.port</name>
<value>18660</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>18652</value>
</property>
<property>
<name>hbase.regionserver.info.port</name>
<value>18654</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>18661</value>
</property>
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
</property>
<property>
<name>hbase.rpc.timeout</name>
<value>10000</value>
</property>
<property>
<name>hbase.client.operation.timeout</name>
<value>20000</value>
</property>
<property>
<name>hbase.replication</name>
<value>true</value>
</property>
<!--See OCLC Jira HADP-415 -->
<property>
<name>replication.source.nb.capacity</name>
<value>20</value>
</property>
<!--See OCLC Jira HADP-415 -->
<property>
<name>replication.source.size.capacity</name>
<value>16777216</value>
</property>
<!--See OCLC Jira HADP-415 -->
<property>
<name>replication.source.sleepforretries</name>
<value>3000</value>
</property>
<property>
<name>hbase.master.ui.readonly</name>
<value>true</value>
</property>
<property>
<name>ipc.socket.timeout</name>
<value>1000</value>
</property>
<property>
<name>hbase.client.retries.number</name>
<value>5</value>
</property>
<property>
<name>hbase.client.pause</name>
<value>50</value>
</property>
<property>
<name>hbase.regionserver.wal.enablecompression</name>
<value>true</value>
</property>
<property>
<name>hbase.snapshot.enabled</name>
<value>true</value>
</property>
<property>
<name>hbase.snapshot.master.timeoutMillis</name>
<value>1200000</value>
</property>
<property>
<name>hbase.snapshot.region.timeout</name>
<value>1200000</value>
</property>
<property>
<name>hbase.snapshot.master.timeout.millis</name>
<value>1200000</value>
</property>
<property>
<name>hbase.regionserver.wal.codec</name>
<value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
</property>
</configuration>