You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@thrift.apache.org by "huangxuefei (JIRA)" <ji...@apache.org> on 2017/03/13 14:41:41 UTC

[jira] [Created] (THRIFT-4117) A deadlock in TBase class static initialization

huangxuefei created THRIFT-4117:
-----------------------------------

             Summary: A deadlock in TBase class static initialization
                 Key: THRIFT-4117
                 URL: https://issues.apache.org/jira/browse/THRIFT-4117
             Project: Thrift
          Issue Type: Bug
          Components: Java - Compiler
    Affects Versions: 0.9.3
            Reporter: huangxuefei


"THRIFT-1618: synchronize access to hashtable in FieldMetaData" may cause another dealock problem:

The case is saveing thrift object in parquet format:

One thread save parquet holded the FieldMetaData class lock (see stack as follow, lock <0x00000004c7a99350> ), but wait for jvm native newInstance0 lock (see stack sun.reflect.NativeConstructorAccessorImpl.newInstance0).

Another thread init TBase class by reflect (eg: StaticInfo.class.newInstance() //StaticInfo is the sub-class of TBase), while holded jvm native newInstance0 lock, but wait the FieldMetaData class lock.

"Executor task launch worker-1" #75 daemon prio=5 os_prio=0 tid=0x0000000000b73000 nid=0x16cf4 in Object.wait() [0x00007fe890822000]
   java.lang.Thread.State: RUNNABLE
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.lang.Class.newInstance(Class.java:442)
	at org.apache.thrift.meta_data.FieldMetaData.getStructMetaDataMap(FieldMetaData.java:61)
	- locked <0x00000004c7a99350> (a java.lang.Class for org.apache.thrift.meta_data.FieldMetaData)
	at com.twitter.elephantbird.thrift.TStructDescriptor.build(TStructDescriptor.java:116)
	at com.twitter.elephantbird.thrift.TStructDescriptor.getInstance(TStructDescriptor.java:105)
	- locked <0x00000004c7a4a768> (a java.util.HashMap)
	at com.twitter.elephantbird.thrift.TStructDescriptor$Field.<init>(TStructDescriptor.java:219)
	at com.twitter.elephantbird.thrift.TStructDescriptor$Field.<init>(TStructDescriptor.java:181)
	at com.twitter.elephantbird.thrift.TStructDescriptor$Field.<init>(TStructDescriptor.java:148)
	at com.twitter.elephantbird.thrift.TStructDescriptor.build(TStructDescriptor.java:123)
	at com.twitter.elephantbird.thrift.TStructDescriptor.getInstance(TStructDescriptor.java:105)
	- locked <0x00000004c7a4a768> (a java.util.HashMap)
	at parquet.thrift.ThriftSchemaConverter$ThriftStructConverter.toStructType(ThriftSchemaConverter.java:68)
	at parquet.thrift.ThriftSchemaConverter.toStructType(ThriftSchemaConverter.java:62)
	at parquet.hadoop.thrift.ThriftWriteSupport.init(ThriftWriteSupport.java:83)
	at parquet.hadoop.thrift.ThriftWriteSupport.init(ThriftWriteSupport.java:106)
	at parquet.hadoop.LazyRecordWriter.lazyInitialize(LazyRecordWriter.java:57)
	at parquet.hadoop.LazyRecordWriter.write(LazyRecordWriter.java:93)
	at parquet.hadoop.LazyRecordWriter.write(LazyRecordWriter.java:19)
	at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1$$anonfun$12$$anonfun$apply$4.apply$mcV$sp(PairRDDFunctions.scala:1056)
	at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1$$anonfun$12$$anonfun$apply$4.apply(PairRDDFunctions.scala:1054)
	at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1$$anonfun$12$$anonfun$apply$4.apply(PairRDDFunctions.scala:1054)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1230)
	at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1$$anonfun$12.apply(PairRDDFunctions.scala:1062)
	at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1$$anonfun$12.apply(PairRDDFunctions.scala:1034)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:68)
	at org.apache.spark.scheduler.Task.run(Task.scala:90)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:253)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
	at java.lang.Thread.run(Thread.java:745)


You can simulate the deadlock by follow code.

// StaticTest.java
package test.staticinit;
import java.util.ArrayList;
import java.util.List;
public class StaticTest
{
    public static void main(String[] args)
        throws IllegalAccessException, InstantiationException
    {
        List<Thread> list = new ArrayList<Thread>(10);
        for(int i=0;i<2;i++){
            list.add(new Thread(){
                @Override
                public void run()
                {
                    StaticInfoHolder.get(StaticInfo.class).size();
                }
            });
        }
        for (Thread t:list){
            t.start();
        }
        StaticInfo.class.newInstance();
        System.out.println("finish");
    }
}

//StaticInfo.java
package test.staticinit;
import java.util.HashMap;
import java.util.Map;
public class StaticInfo
{
    public static final Map<String,String> map;
    static {
        try
        {
            Thread.sleep(5000);
        }
        catch (InterruptedException e)
        {
            e.printStackTrace();
        }
        map=new HashMap<String,String>();
        StaticInfoHolder.add(StaticInfo.class,map);
    }
}

//StaticInfoHolder.java
package test.staticinit;
import java.util.HashMap;
import java.util.Map;
public class StaticInfoHolder
{
    private static Map<Class<?>,Map<String,String>> map;
    static{
        map=new HashMap<Class<?>,Map<String,String>>();
    }
    public static synchronized void add(Class<?> clazz,Map<String,String> pm){
        map.put(clazz,pm);
    }
    public static synchronized Map<String,String> get(Class<?> clazz){
        if(!map.containsKey(clazz)){
            try{
                clazz.newInstance();
            } catch (InstantiationException e){
                throw new RuntimeException("InstantiationException for TBase class: " + clazz.getName() + ", message: " + e.getMessage());
            } catch (IllegalAccessException e){
                throw new RuntimeException("IllegalAccessException for TBase class: " + clazz.getName() + ", message: " + e.getMessage());
            }
        }
        return map.get(clazz);
    }
}



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)