You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@ignite.apache.org by "Manuel Núñez (Jira)" <ji...@apache.org> on 2020/07/02 08:04:00 UTC

[jira] [Created] (IGNITE-13203) Improve OptimizedObjectOutputStream serialisation performance

Manuel Núñez created IGNITE-13203:
-------------------------------------

             Summary: Improve OptimizedObjectOutputStream serialisation performance
                 Key: IGNITE-13203
                 URL: https://issues.apache.org/jira/browse/IGNITE-13203
             Project: Ignite
          Issue Type: Improvement
          Components: binary
    Affects Versions: 2.8.1, 2.7.6, 2.7.5, 2.8, 2.7, 2.6, 2.5, 2.4
            Reporter: Manuel Núñez
             Fix For: 2.9


1. OptimizedObjectOutputStream -> writeObject0: Strings bigger than 4K reduce serialisation performance ~x2 respect JDK marshaller; proposal, use JDK marshaller for Strings bigger than 4K
{code:java}
 private void writeObject0(Object obj) throws IOException {
        curObj = null;
        curFields = null;
        curPut = null;

        if (obj == null)
            writeByte(NULL);
        else {
        	boolean jdkStringWrite = false;

        	// fix string bigger than 4K use JDK maschaller to improve performance
        	if (obj instanceof String){
        		jdkStringWrite = (((String)obj).length() > 4096);
        	}

            if ((jdkStringWrite || obj instanceof Throwable) && !(obj instanceof Externalizable) || U.isEnum(obj.getClass())) {
                // Avoid problems with differing Enum objects or Enum implementation class deadlocks.
                writeByte(JDK);

...
{code}


2. OptimizedObjectOutputStream -> GridHandleTable: lookup performance can be improved  by caching hashes, especially for objects with high complexity and cycle references.

{code:java}
/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.ignite.internal.util;

import java.util.Arrays;

/**
 * Lightweight identity hash table which maps objects to integer handles,
 * assigned in ascending order.
 *
 */
public class GridHandleTable {
    /** Number of mappings in table/next available handle. */
    private int size;

    /** Size threshold determining when to expand hash spine. */
    private int threshold;

    /** Factor for computing size threshold. */
    private final float loadFactor;

    /** Maps hash value -> candidate handle value. */
    private int[] spine;

    /** Maps handle value -> next candidate handle value. */
    private int[] next;

    /** Maps handle value -> associated object. */
    private Object[] objs;

    /** Maps handle object hash -> associated object hash */
    private int[] objHashes;

    /** */
    private int[] spineEmpty;

    /** */
    private int[] nextEmpty;


    /**
     * Creates new HandleTable with given capacity and load factor.
     *
     * @param initCap Initial capacity.
     * @param loadFactor Load factor.
     */
    public GridHandleTable(int initCap, float loadFactor) {
        this.loadFactor = loadFactor;

        spine = new int[initCap];
        next = new int[initCap];

        objs = new Object[initCap];
        objHashes = new int[initCap];

        spineEmpty = new int[initCap];
        nextEmpty = new int[initCap];

        Arrays.fill(spineEmpty, -1);
        Arrays.fill(nextEmpty, -1);

        threshold = (int)(initCap * loadFactor);

        clear();
    }

    /**
     * Looks up and returns handle associated with given object, or -1 if
     * no mapping found.
     *
     * @param obj Object.
     * @return Handle.
     */
    public int lookup(Object obj) {

    	int objHash = hash(obj);

        int idx = objHash % spine.length;

        if (size > 0) {
            for (int i = spine[idx]; i >= 0; i = next[i])
                if (objs[i] == obj)
                    return i;
        }

        if (size >= next.length)
            growEntries();

        if (size >= threshold) {
            growSpine();

            idx = objHash % spine.length;
        }

        insert(obj, size, idx, objHash);

        size++;

        return -1;
    }

    /**
     * Resets table to its initial (empty) state.
     */
    public void clear() {
        System.arraycopy(spineEmpty, 0, spine, 0, spineEmpty.length);
        System.arraycopy(nextEmpty, 0, next, 0, nextEmpty.length);

        Arrays.fill(objs, null);

        Arrays.fill(objHashes, 0);

        size = 0;
    }

    /**
     * @return Returns objects that were added to handles table.
     */
    public Object[] objects() {
        return objs;
    }

    /**
     * Inserts mapping object -> handle mapping into table. Assumes table
     * is large enough to accommodate new mapping.
     *
     * @param obj Object.
     * @param handle Handle.
     * @param idx Index.
     */
    private void insert(Object obj, int handle, int idx, int hash) {
        objs[handle] = obj;
        next[handle] = spine[idx];
        spine[idx] = handle;
        objHashes[handle] = hash;
    }

    /**
     * Inserts mapping object -> handle mapping into table. Assumes table
     * is large enough to accommodate new mapping.
     *
     * @param handle Handle.
     * @param idx Index.
     */
    private void updateSpine(int handle, int idx) {
        next[handle] = spine[idx];
        spine[idx] = handle;
    }

    /**
     * Expands the hash "spine" - equivalent to increasing the number of
     * buckets in a conventional hash table.
     */
    private void growSpine() {
        int size = (spine.length << 1) + 1;

        spine = new int[size];
        spineEmpty = new int[size];
        threshold = (int)(spine.length * loadFactor);

        Arrays.fill(spineEmpty, -1);

        System.arraycopy(spineEmpty, 0, spine, 0, spineEmpty.length);

        for (int i = 0; i < this.size; i++) {
            updateSpine(i, objHashes[i] % spine.length);
        }
    }

    /**
     * Increases hash table capacity by lengthening entry arrays.
     */
    private void growEntries() {
        int newLen = (next.length << 1) + 1;
        int[] newNext = new int[newLen];

        System.arraycopy(next, 0, newNext, 0, size);

        next = newNext;
        nextEmpty = new int[newLen];

        Arrays.fill(nextEmpty, -1);

        Object[] newObjs = new Object[newLen];

        System.arraycopy(objs, 0, newObjs, 0, size);

        int[] newObjHashes = new int[newLen];

        System.arraycopy(objHashes, 0, newObjHashes, 0, size);

        objs = newObjs;

        objHashes = newObjHashes;
    }

    /**
     * Returns hash value for given object.
     *
     * @param obj Object.
     * @return Hash value.
     */
    private int hash(Object obj) {
        return System.identityHashCode(obj) & 0x7FFFFFFF;
    }
}

{code}






--
This message was sent by Atlassian Jira
(v8.3.4#803005)