You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by GitBox <gi...@apache.org> on 2019/01/26 00:52:46 UTC

[GitHub] qiuchenjian commented on a change in pull request #3102: [CARBONDATA-3272]fix ArrayIndexOutOfBoundsException of horizontal compaction during update, when cardinality changes within a segment

qiuchenjian commented on a change in pull request #3102: [CARBONDATA-3272]fix ArrayIndexOutOfBoundsException of horizontal compaction during update, when cardinality changes within a segment
URL: https://github.com/apache/carbondata/pull/3102#discussion_r251178424
 
 

 ##########
 File path: processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
 ##########
 @@ -140,25 +139,72 @@ public CarbonCompactionExecutor(Map<String, TaskBlockInfo> segmentMapping,
               || !CarbonCompactionUtil.isSorted(listMetadata.get(0));
       for (String task : taskBlockListMapping) {
         list = taskBlockInfo.getTableBlockInfoList(task);
-        Collections.sort(list);
-        LOGGER.info(
-            "for task -" + task + "- in segment id -" + segmentId + "- block size is -" + list
-                .size());
-        queryModel.setTableBlockInfos(list);
-        if (sortingRequired) {
-          resultList.get(CarbonCompactionUtil.UNSORTED_IDX).add(
-              new RawResultIterator(executeBlockList(list, segmentId, task, configuration),
-                  sourceSegProperties, destinationSegProperties, false));
-        } else {
-          resultList.get(CarbonCompactionUtil.SORTED_IDX).add(
-              new RawResultIterator(executeBlockList(list, segmentId, task, configuration),
-                  sourceSegProperties, destinationSegProperties, false));
+        // during update there may be a chance that the cardinality may change within the segment
+        // which may lead to failure while converting the row, so get all the blocks present in a
+        // task and then split into multiple lists of same key length and create separate
+        // RawResultIterator for each list of same key length. If all the blocks have same keylength
+        // , then make a single RawResultIterator for all the blocks
+        List<List<TableBlockInfo>> listOfTableBlocksBasedOnKeyLength =
+            getListOfTableBlocksBasedOnKeyLength(list);
+        for (List<TableBlockInfo> tableBlockInfoList : listOfTableBlocksBasedOnKeyLength) {
+          Collections.sort(tableBlockInfoList);
+          LOGGER.info(
+              "for task -" + task + "- in segment id -" + segmentId + "- block size is -" + list
+                  .size());
+          queryModel.setTableBlockInfos(tableBlockInfoList);
+          if (sortingRequired) {
+            resultList.get(CarbonCompactionUtil.UNSORTED_IDX).add(new RawResultIterator(
+                executeBlockList(tableBlockInfoList, segmentId, task, configuration),
+                getSourceSegmentProperties(
+                    Collections.singletonList(tableBlockInfoList.get(0).getDataFileFooter())),
+                destinationSegProperties, false));
+          } else {
+            resultList.get(CarbonCompactionUtil.SORTED_IDX).add(new RawResultIterator(
+                executeBlockList(tableBlockInfoList, segmentId, task, configuration),
+                getSourceSegmentProperties(
+                    Collections.singletonList(tableBlockInfoList.get(0).getDataFileFooter())),
+                destinationSegProperties, false));
+          }
         }
       }
     }
     return resultList;
   }
 
+  /**
+   * This method returns the List of TableBlockInfoList, where each listOfTableBlockInfos will have
+   * same keySize
+   * @param tableBlockInfos List of tableBlock Infos
+   * @return
+   */
+  private List<List<TableBlockInfo>> getListOfTableBlocksBasedOnKeyLength(
+      List<TableBlockInfo> tableBlockInfos) {
+    List<List<TableBlockInfo>> listOfTableBlockInfoListOnKeySize = new ArrayList<>();
+    // create a map of keySizeInBytes got from dataFileFooter
+    Map<Integer, List<TableBlockInfo>> keySizeToTableBlockInfoMap = new HashMap<>();
+    // iterate through tableBlockInfos
+    for (TableBlockInfo tableBlock : tableBlockInfos) {
+      // get the keySizeInBytes for the dataFileFooter
+      int keySizeInBytes =
+          getSourceSegmentProperties(Collections.singletonList(tableBlock.getDataFileFooter()))
+              .getDimensionKeyGenerator().getKeySizeInBytes();
+      // get the value for the key keySizeInBytes
+      List<TableBlockInfo> tempBlockInfoList = keySizeToTableBlockInfoMap.get(keySizeInBytes);
 
 Review comment:
   better to use keySizeToTableBlockInfoMap.contains

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services