You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@unomi.apache.org by sh...@apache.org on 2017/01/09 16:22:20 UTC

[46/50] [abbrv] incubator-unomi git commit: - DMF-1133 Error when displaying an empty list in MF : fixed issue for real this time :) - Some configuration file cleanup - Changed inactive user purging from 30 days to 180 days

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/services/src/main/java/org/apache/unomi/services/services/ClusterServiceImpl.java
----------------------------------------------------------------------
diff --git a/services/src/main/java/org/apache/unomi/services/services/ClusterServiceImpl.java b/services/src/main/java/org/apache/unomi/services/services/ClusterServiceImpl.java
new file mode 100644
index 0000000..8fb67d3
--- /dev/null
+++ b/services/src/main/java/org/apache/unomi/services/services/ClusterServiceImpl.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.unomi.services.services;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.karaf.cellar.config.ClusterConfigurationEvent;
+import org.apache.karaf.cellar.config.Constants;
+import org.apache.karaf.cellar.core.*;
+import org.apache.karaf.cellar.core.control.SwitchStatus;
+import org.apache.karaf.cellar.core.event.EventProducer;
+import org.apache.karaf.cellar.core.event.EventType;
+import org.apache.unomi.api.ClusterNode;
+import org.apache.unomi.api.services.ClusterService;
+import org.apache.unomi.persistence.spi.PersistenceService;
+import org.osgi.service.cm.ConfigurationAdmin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.*;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.OperatingSystemMXBean;
+import java.lang.management.RuntimeMXBean;
+import java.net.MalformedURLException;
+import java.util.*;
+
+/**
+ * Implementation of the persistence service interface
+ */
+public class ClusterServiceImpl implements ClusterService {
+
+    private static final Logger logger = LoggerFactory.getLogger(ClusterServiceImpl.class.getName());
+
+    public static final String CONTEXTSERVER_ADDRESS = "contextserver.address";
+    public static final String CONTEXTSERVER_PORT = "contextserver.port";
+    public static final String CONTEXTSERVER_SECURE_ADDRESS = "contextserver.secureAddress";
+    public static final String CONTEXTSERVER_SECURE_PORT = "contextserver.securePort";
+    public static final String KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION = "org.apache.unomi.nodes";
+    public static final String KARAF_CLUSTER_CONFIGURATION_PUBLIC_ENDPOINTS = "publicEndpoints";
+    public static final String KARAF_CLUSTER_CONFIGURATION_SECURE_ENDPOINTS = "secureEndpoints";
+
+    private ClusterManager karafCellarClusterManager;
+    private EventProducer karafCellarEventProducer;
+    private GroupManager karafCellarGroupManager;
+    private String karafCellarGroupName = Configurations.DEFAULT_GROUP_NAME;
+    private ConfigurationAdmin osgiConfigurationAdmin;
+    private String karafJMXUsername = "karaf";
+    private String karafJMXPassword = "karaf";
+    private int karafJMXPort = 1099;
+    private String address;
+    private String port;
+    private String secureAddress;
+    private String securePort;
+
+    PersistenceService persistenceService;
+
+    public void setPersistenceService(PersistenceService persistenceService) {
+        this.persistenceService = persistenceService;
+    }
+
+    public void setKarafCellarClusterManager(ClusterManager karafCellarClusterManager) {
+        this.karafCellarClusterManager = karafCellarClusterManager;
+    }
+
+    public void setKarafCellarEventProducer(EventProducer karafCellarEventProducer) {
+        this.karafCellarEventProducer = karafCellarEventProducer;
+    }
+
+    public void setKarafCellarGroupManager(GroupManager karafCellarGroupManager) {
+        this.karafCellarGroupManager = karafCellarGroupManager;
+    }
+
+    public void setKarafCellarGroupName(String karafCellarGroupName) {
+        this.karafCellarGroupName = karafCellarGroupName;
+    }
+
+    public void setOsgiConfigurationAdmin(ConfigurationAdmin osgiConfigurationAdmin) {
+        this.osgiConfigurationAdmin = osgiConfigurationAdmin;
+    }
+
+    public void setKarafJMXUsername(String karafJMXUsername) {
+        this.karafJMXUsername = karafJMXUsername;
+    }
+
+    public void setKarafJMXPassword(String karafJMXPassword) {
+        this.karafJMXPassword = karafJMXPassword;
+    }
+
+    public void setKarafJMXPort(int karafJMXPort) {
+        this.karafJMXPort = karafJMXPort;
+    }
+
+    public void setAddress(String address) {
+        this.address = address;
+    }
+
+    public void setPort(String port) {
+        this.port = port;
+    }
+
+    public void setSecureAddress(String secureAddress) {
+        this.secureAddress = secureAddress;
+    }
+
+    public void setSecurePort(String securePort) {
+        this.securePort = securePort;
+    }
+
+    public void init() {
+        logger.debug("init cluster service");
+        if (karafCellarEventProducer != null && karafCellarClusterManager != null) {
+
+            address = System.getProperty(CONTEXTSERVER_ADDRESS, address);
+            port = System.getProperty(CONTEXTSERVER_PORT, port);
+            secureAddress = System.getProperty(CONTEXTSERVER_SECURE_ADDRESS, secureAddress);
+            securePort = System.getProperty(CONTEXTSERVER_SECURE_PORT, securePort);
+
+            boolean setupConfigOk = true;
+            Group group = karafCellarGroupManager.findGroupByName(karafCellarGroupName);
+            if (setupConfigOk && group == null) {
+                logger.error("Cluster group " + karafCellarGroupName + " doesn't exist");
+                setupConfigOk = false;
+            }
+
+            // check if the producer is ON
+            if (setupConfigOk && karafCellarEventProducer.getSwitch().getStatus().equals(SwitchStatus.OFF)) {
+                logger.error("Cluster event producer is OFF");
+                setupConfigOk = false;
+            }
+
+            // check if the config pid is allowed
+            if (setupConfigOk && !isClusterConfigPIDAllowed(group, Constants.CATEGORY, KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION, EventType.OUTBOUND)) {
+                logger.error("Configuration PID " + KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION + " is blocked outbound for cluster group " + karafCellarGroupName);
+                setupConfigOk = false;
+            }
+
+            if (setupConfigOk) {
+                Map<String, Properties> configurations = karafCellarClusterManager.getMap(Constants.CONFIGURATION_MAP + Configurations.SEPARATOR + karafCellarGroupName);
+                org.apache.karaf.cellar.core.Node thisKarafNode = karafCellarClusterManager.getNode();
+                Properties karafCellarClusterNodeConfiguration = configurations.get(KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION);
+                if (karafCellarClusterNodeConfiguration == null) {
+                    karafCellarClusterNodeConfiguration = new Properties();
+                }
+                String publicEndpointsPropValue = karafCellarClusterNodeConfiguration.getProperty(KARAF_CLUSTER_CONFIGURATION_PUBLIC_ENDPOINTS, thisKarafNode.getId() + "=" + address + ":" + port);
+                String secureEndpointsPropValue = karafCellarClusterNodeConfiguration.getProperty(KARAF_CLUSTER_CONFIGURATION_SECURE_ENDPOINTS, thisKarafNode.getId() + "=" + secureAddress + ":" + securePort);
+                String[] publicEndpointsArray = publicEndpointsPropValue.split(",");
+                Set<String> publicEndpoints = new TreeSet<String>(Arrays.asList(publicEndpointsArray));
+                String[] secureEndpointsArray = secureEndpointsPropValue.split(",");
+                Set<String> secureEndpoints = new TreeSet<String>(Arrays.asList(secureEndpointsArray));
+                publicEndpoints.add(thisKarafNode.getId() + "=" + address + ":" + port);
+                secureEndpoints.add(thisKarafNode.getId() + "=" + secureAddress + ":" + securePort);
+                karafCellarClusterNodeConfiguration.setProperty(KARAF_CLUSTER_CONFIGURATION_PUBLIC_ENDPOINTS, StringUtils.join(publicEndpoints, ","));
+                karafCellarClusterNodeConfiguration.setProperty(KARAF_CLUSTER_CONFIGURATION_SECURE_ENDPOINTS, StringUtils.join(secureEndpoints, ","));
+                configurations.put(KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION, karafCellarClusterNodeConfiguration);
+                ClusterConfigurationEvent clusterConfigurationEvent = new ClusterConfigurationEvent(KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION);
+                clusterConfigurationEvent.setSourceGroup(group);
+                karafCellarEventProducer.produce(clusterConfigurationEvent);
+            }
+        }
+    }
+
+    public void destroy() {
+    }
+
+    @Override
+    public List<ClusterNode> getClusterNodes() {
+        Map<String, ClusterNode> clusterNodes = new LinkedHashMap<String, ClusterNode>();
+
+        Set<org.apache.karaf.cellar.core.Node> karafCellarNodes = karafCellarClusterManager.listNodes();
+        org.apache.karaf.cellar.core.Node thisKarafNode = karafCellarClusterManager.getNode();
+        Map<String, Properties> clusterConfigurations = karafCellarClusterManager.getMap(Constants.CONFIGURATION_MAP + Configurations.SEPARATOR + karafCellarGroupName);
+        Properties karafCellarClusterNodeConfiguration = clusterConfigurations.get(KARAF_CELLAR_CLUSTER_NODE_CONFIGURATION);
+        Map<String, String> publicNodeEndpoints = new TreeMap<>();
+        Map<String, String> secureNodeEndpoints = new TreeMap<>();
+        if (karafCellarClusterNodeConfiguration != null) {
+            String publicEndpointsPropValue = karafCellarClusterNodeConfiguration.getProperty(KARAF_CLUSTER_CONFIGURATION_PUBLIC_ENDPOINTS, thisKarafNode.getId() + "=" + address + ":" + port);
+            String secureEndpointsPropValue = karafCellarClusterNodeConfiguration.getProperty(KARAF_CLUSTER_CONFIGURATION_SECURE_ENDPOINTS, thisKarafNode.getId() + "=" + secureAddress + ":" + securePort);
+            String[] publicEndpointsArray = publicEndpointsPropValue.split(",");
+            Set<String> publicEndpoints = new TreeSet<String>(Arrays.asList(publicEndpointsArray));
+            for (String endpoint : publicEndpoints) {
+                String[] endpointParts = endpoint.split("=");
+                publicNodeEndpoints.put(endpointParts[0], endpointParts[1]);
+            }
+            String[] secureEndpointsArray = secureEndpointsPropValue.split(",");
+            Set<String> secureEndpoints = new TreeSet<String>(Arrays.asList(secureEndpointsArray));
+            for (String endpoint : secureEndpoints) {
+                String[] endpointParts = endpoint.split("=");
+                secureNodeEndpoints.put(endpointParts[0], endpointParts[1]);
+            }
+        }
+        for (org.apache.karaf.cellar.core.Node karafCellarNode : karafCellarNodes) {
+            ClusterNode clusterNode = new ClusterNode();
+            clusterNode.setHostName(karafCellarNode.getHost());
+            String publicEndpoint = publicNodeEndpoints.get(karafCellarNode.getId());
+            if (publicEndpoint != null) {
+                String[] publicEndpointParts = publicEndpoint.split(":");
+                clusterNode.setHostAddress(publicEndpointParts[0]);
+                clusterNode.setPublicPort(Integer.parseInt(publicEndpointParts[1]));
+            }
+            String secureEndpoint = secureNodeEndpoints.get(karafCellarNode.getId());
+            if (secureEndpoint != null) {
+                String[] secureEndpointParts = secureEndpoint.split(":");
+                clusterNode.setSecureHostAddress(secureEndpointParts[0]);
+                clusterNode.setSecurePort(Integer.parseInt(secureEndpointParts[1]));
+                clusterNode.setMaster(false);
+                clusterNode.setData(false);
+            }
+            try {
+                // now let's connect to remote JMX service to retrieve information from the runtime and operating system MX beans
+                JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://"+karafCellarNode.getHost() + ":"+karafJMXPort+"/karaf-root");
+                Map<String,Object> environment=new HashMap<String,Object>();
+                if (karafJMXUsername != null && karafJMXPassword != null) {
+                    environment.put(JMXConnector.CREDENTIALS,new String[]{karafJMXUsername,karafJMXPassword});
+                }
+                JMXConnector jmxc = JMXConnectorFactory.connect(url, environment);
+                MBeanServerConnection mbsc = jmxc.getMBeanServerConnection();
+                final RuntimeMXBean remoteRuntime = ManagementFactory.newPlatformMXBeanProxy(mbsc, ManagementFactory.RUNTIME_MXBEAN_NAME, RuntimeMXBean.class);
+                clusterNode.setUptime(remoteRuntime.getUptime());
+                ObjectName operatingSystemMXBeanName = new ObjectName(ManagementFactory.OPERATING_SYSTEM_MXBEAN_NAME);
+                Double processCpuLoad = null;
+                Double systemCpuLoad = null;
+                try {
+                    processCpuLoad = (Double) mbsc.getAttribute(operatingSystemMXBeanName, "ProcessCpuLoad");
+                } catch (MBeanException e) {
+                    e.printStackTrace();
+                } catch (AttributeNotFoundException e) {
+                    e.printStackTrace();
+                }
+                try {
+                    systemCpuLoad = (Double) mbsc.getAttribute(operatingSystemMXBeanName, "SystemCpuLoad");
+                } catch (MBeanException e) {
+                    e.printStackTrace();
+                } catch (AttributeNotFoundException e) {
+                    e.printStackTrace();
+                }
+                final OperatingSystemMXBean remoteOperatingSystemMXBean = ManagementFactory.newPlatformMXBeanProxy(mbsc, ManagementFactory.OPERATING_SYSTEM_MXBEAN_NAME, OperatingSystemMXBean.class);
+                clusterNode.setLoadAverage(new double[] { remoteOperatingSystemMXBean.getSystemLoadAverage()});
+                if (systemCpuLoad != null) {
+                    clusterNode.setCpuLoad(systemCpuLoad);
+                }
+
+            } catch (MalformedURLException e) {
+                logger.error("Error connecting to remote JMX server", e);
+            } catch (IOException e) {
+                logger.error("Error retrieving remote JMX data", e);
+            } catch (MalformedObjectNameException e) {
+                logger.error("Error retrieving remote JMX data", e);
+            } catch (InstanceNotFoundException e) {
+                logger.error("Error retrieving remote JMX data", e);
+            } catch (ReflectionException e) {
+                logger.error("Error retrieving remote JMX data", e);
+            }
+            clusterNodes.put(karafCellarNode.getId(), clusterNode);
+        }
+
+        return new ArrayList<ClusterNode>(clusterNodes.values());
+    }
+
+    @Override
+    public void purge(Date date) {
+        persistenceService.purge(date);
+    }
+
+    @Override
+    public void purge(String scope) {
+        persistenceService.purge(scope);
+    }
+
+    /**
+     * Check if a configuration is allowed.
+     *
+     * @param group the cluster group.
+     * @param category the configuration category constant.
+     * @param pid the configuration PID.
+     * @param type the cluster event type.
+     * @return true if the cluster event type is allowed, false else.
+     */
+    public boolean isClusterConfigPIDAllowed(Group group, String category, String pid, EventType type) {
+        CellarSupport support = new CellarSupport();
+        support.setClusterManager(this.karafCellarClusterManager);
+        support.setGroupManager(this.karafCellarGroupManager);
+        support.setConfigurationAdmin(this.osgiConfigurationAdmin);
+        return support.isAllowed(group, category, pid, type);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/services/src/main/resources/OSGI-INF/blueprint/blueprint.xml
----------------------------------------------------------------------
diff --git a/services/src/main/resources/OSGI-INF/blueprint/blueprint.xml b/services/src/main/resources/OSGI-INF/blueprint/blueprint.xml
index 8c0d371..cb334ab 100644
--- a/services/src/main/resources/OSGI-INF/blueprint/blueprint.xml
+++ b/services/src/main/resources/OSGI-INF/blueprint/blueprint.xml
@@ -26,16 +26,41 @@
                              update-strategy="reload" placeholder-prefix="${services.">
         <cm:default-properties>
             <cm:property name="profile.purge.interval" value="1"/>
-            <cm:property name="profile.purge.inactiveTime" value="30"/>
+            <cm:property name="profile.purge.inactiveTime" value="180"/>
             <cm:property name="profile.purge.existTime" value="-1"/>
             <cm:property name="event.purge.existTime" value="12"/>
             <cm:property name="segment.update.batchSize" value="1000"/>
         </cm:default-properties>
     </cm:property-placeholder>
 
+    <cm:property-placeholder persistent-id="org.apache.unomi.cluster"
+                             update-strategy="reload" placeholder-prefix="${cluster.">
+        <cm:default-properties>
+            <cm:property name="group" value="default" />
+            <cm:property name="jmxUsername" value="karaf" />
+            <cm:property name="jmxPassword" value="karaf" />
+            <cm:property name="jmxPort" value="1099" />
+        </cm:default-properties>
+    </cm:property-placeholder>
+
+    <cm:property-placeholder persistent-id="org.apache.unomi.web"
+                             update-strategy="reload" placeholder-prefix="${web.">
+        <cm:default-properties>
+            <cm:property name="contextserver.address" value="localhost"/>
+            <cm:property name="contextserver.port" value="8181"/>
+            <cm:property name="contextserver.secureAddress" value="localhost"/>
+            <cm:property name="contextserver.securePort" value="9443"/>
+        </cm:default-properties>
+    </cm:property-placeholder>
+
+
     <reference id="persistenceService"
                interface="org.apache.unomi.persistence.spi.PersistenceService"/>
     <reference id="httpService" interface="org.osgi.service.http.HttpService"/>
+    <reference id="karafCellarClusterManager" interface="org.apache.karaf.cellar.core.ClusterManager" />
+    <reference id="karafCellarEventProducer" interface="org.apache.karaf.cellar.core.event.EventProducer" />
+    <reference id="karafCellarGroupManager" interface="org.apache.karaf.cellar.core.GroupManager" />
+    <reference id="osgiConfigurationAdmin" interface="org.osgi.service.cm.ConfigurationAdmin"/>
 
     <!-- Service definitions -->
 
@@ -129,6 +154,24 @@
     </bean>
     <service id="queryService" ref="queryServiceImpl" auto-export="interfaces"/>
 
+    <bean id="clusterServiceImpl" class="org.apache.unomi.services.services.ClusterServiceImpl"
+          init-method="init" destroy-method="destroy">
+        <property name="address" value="${web.contextserver.address}"/>
+        <property name="port" value="${web.contextserver.port}"/>
+        <property name="secureAddress" value="${web.contextserver.secureAddress}"/>
+        <property name="securePort" value="${web.contextserver.securePort}"/>
+        <property name="persistenceService" ref="persistenceService"/>
+        <property name="karafCellarClusterManager" ref="karafCellarClusterManager" />
+        <property name="karafCellarEventProducer" ref="karafCellarEventProducer" />
+        <property name="karafCellarGroupManager" ref="karafCellarGroupManager" />
+        <property name="karafCellarGroupName" value="${cluster.group}" />
+        <property name="osgiConfigurationAdmin" ref="osgiConfigurationAdmin" />
+        <property name="karafJMXUsername" value="${cluster.jmxUsername}" />
+        <property name="karafJMXPassword" value="${cluster.jmxPassword}" />
+        <property name="karafJMXPort" value="${cluster.jmxPort}" />
+    </bean>
+    <service id="clusterService" ref="clusterServiceImpl" auto-export="interfaces"/>
+
     <!-- We use a listener here because using the list directly for listening to proxies coming from the same bundle didn't seem to work -->
     <reference-list id="eventListenerServices"
                     interface="org.apache.unomi.api.services.EventListenerService"

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/services/src/main/resources/hazelcast.xml
----------------------------------------------------------------------
diff --git a/services/src/main/resources/hazelcast.xml b/services/src/main/resources/hazelcast.xml
new file mode 100644
index 0000000..0fc6f5d
--- /dev/null
+++ b/services/src/main/resources/hazelcast.xml
@@ -0,0 +1,219 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.2.xsd"
+           xmlns="http://www.hazelcast.com/schema/config"
+           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+    <group>
+        <name>cellar</name>
+        <password>pass</password>
+    </group>
+    <management-center enabled="false">http://localhost:8080/mancenter</management-center>
+    <network>
+        <port auto-increment="true" port-count="100">5701</port>
+        <outbound-ports>
+            <!--
+                Allowed port range when connecting to other nodes.
+                0 or * means use system provided port.
+            -->
+            <ports>0</ports>
+        </outbound-ports>
+        <join>
+            <multicast enabled="false">
+                <multicast-group>224.2.2.3</multicast-group>
+                <multicast-port>54327</multicast-port>
+            </multicast>
+            <tcp-ip enabled="true">
+                <interface>127.0.0.1</interface>
+            </tcp-ip>
+            <aws enabled="false">
+                <access-key>my-access-key</access-key>
+                <secret-key>my-secret-key</secret-key>
+                <!--optional, default is us-east-1 -->
+                <region>us-west-1</region>
+                <!--optional, default is ec2.amazonaws.com. If set, region shouldn't be set as it will override this property -->
+                <host-header>ec2.amazonaws.com</host-header>
+                <!-- optional, only instances belonging to this group will be discovered, default will try all running instances -->
+                <security-group-name>hazelcast-sg</security-group-name>
+                <tag-key>type</tag-key>
+                <tag-value>hz-nodes</tag-value>
+            </aws>
+        </join>
+        <interfaces enabled="false">
+            <interface>10.10.1.*</interface>
+        </interfaces>
+        <ssl enabled="false"/>
+        <socket-interceptor enabled="false"/>
+        <symmetric-encryption enabled="false">
+            <!--
+               encryption algorithm such as
+               DES/ECB/PKCS5Padding,
+               PBEWithMD5AndDES,
+               AES/CBC/PKCS5Padding,
+               Blowfish,
+               DESede
+            -->
+            <algorithm>PBEWithMD5AndDES</algorithm>
+            <!-- salt value to use when generating the secret key -->
+            <salt>thesalt</salt>
+            <!-- pass phrase to use when generating the secret key -->
+            <password>thepass</password>
+            <!-- iteration count to use when generating the secret key -->
+            <iteration-count>19</iteration-count>
+        </symmetric-encryption>
+    </network>
+    <partition-group enabled="false"/>
+    <executor-service>
+        <pool-size>16</pool-size>
+        <!-- Queue capacity. 0 means Integer.MAX_VALUE -->
+        <queue-capacity>0</queue-capacity>
+    </executor-service>
+    <queue name="default">
+        <!--
+            Maximum size of the queue. When a JVM's local queue size reaches the maximum,
+            all put/offer operations will get blocked until the queue size
+            of the JVM goes down below the maximum.
+            Any integer between 0 and Integer.MAX_VALUE. 0 means
+            Integer.MAX_VALUE. Default is 0.
+        -->
+        <max-size>0</max-size>
+        <!--
+            Number of backups. If 1 is set as the backup-count for example,
+            then all entries of the map will be copied to another JVM for
+            fail-safety. 0 means no backup.
+        -->
+        <backup-count>1</backup-count>
+        <!--
+            Number of async backups. 0 means no backup.
+        -->
+        <async-backup-count>0</async-backup-count>
+        <empty-queue-ttl>-1</empty-queue-ttl>
+    </queue>
+
+    <map name="default">
+        <!--
+            Data type that will be used for storing recordMap.
+            Possible values:
+                BINARY (default): keys and values will be stored as binary data
+                OBJECT : values will be stored in their object forms
+                OFFHEAP : values will be stored in non-heap region of JVM
+        -->
+        <in-memory-format>BINARY</in-memory-format>
+        <!--
+            Number of backups. If 1 is set as the backup-count for example,
+            then all entries of the map will be copied to another JVM for
+            fail-safety. 0 means no backup.
+        -->
+        <backup-count>1</backup-count>
+        <!--
+            Number of async backups. 0 means no backup.
+        -->
+        <async-backup-count>0</async-backup-count>
+        <!--
+            Maximum number of seconds for each entry to stay in the map. Entries that are
+            older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
+            will get automatically evicted from the map.
+            Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
+        -->
+        <time-to-live-seconds>0</time-to-live-seconds>
+        <!--
+            Maximum number of seconds for each entry to stay idle in the map. Entries that are
+            idle(not touched) for more than <max-idle-seconds> will get
+            automatically evicted from the map. Entry is touched if get, put or containsKey is called.
+            Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
+        -->
+        <max-idle-seconds>0</max-idle-seconds>
+        <!--
+            Valid values are:
+            NONE (no eviction),
+            LRU (Least Recently Used),
+            LFU (Least Frequently Used).
+            NONE is the default.
+        -->
+        <eviction-policy>NONE</eviction-policy>
+        <!--
+            Maximum size of the map. When max size is reached,
+            map is evicted based on the policy defined.
+            Any integer between 0 and Integer.MAX_VALUE. 0 means
+            Integer.MAX_VALUE. Default is 0.
+        -->
+        <max-size policy="PER_NODE">0</max-size>
+        <!--
+            When max. size is reached, specified percentage of
+            the map will be evicted. Any integer between 0 and 100.
+            If 25 is set for example, 25% of the entries will
+            get evicted.
+        -->
+        <eviction-percentage>25</eviction-percentage>
+        <!--
+            While recovering from split-brain (network partitioning),
+            map entries in the small cluster will merge into the bigger cluster
+            based on the policy set here. When an entry merge into the
+            cluster, there might an existing entry with the same key already.
+            Values of these entries might be different for that same key.
+            Which value should be set for the key? Conflict is resolved by
+            the policy set here. Default policy is PutIfAbsentMapMergePolicy
+
+            There are built-in merge policies such as
+            com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key.
+            com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
+            com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins.
+            com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
+        -->
+        <merge-policy>com.hazelcast.map.merge.PassThroughMergePolicy</merge-policy>
+    </map>
+
+    <multimap name="default">
+        <backup-count>1</backup-count>
+        <value-collection-type>SET</value-collection-type>
+    </multimap>
+
+    <multimap name="default">
+        <backup-count>1</backup-count>
+        <value-collection-type>SET</value-collection-type>
+    </multimap>
+
+    <list name="default">
+        <backup-count>1</backup-count>
+    </list>
+
+    <set name="default">
+        <backup-count>1</backup-count>
+    </set>
+
+    <jobtracker name="default">
+        <max-thread-size>0</max-thread-size>
+        <!-- Queue size 0 means number of partitions * 2 -->
+        <queue-size>0</queue-size>
+        <retry-count>0</retry-count>
+        <chunk-size>1000</chunk-size>
+        <communicate-stats>true</communicate-stats>
+        <topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
+    </jobtracker>
+
+    <semaphore name="default">
+        <initial-permits>0</initial-permits>
+        <backup-count>1</backup-count>
+        <async-backup-count>0</async-backup-count>
+    </semaphore>
+
+    <serialization>
+        <portable-version>0</portable-version>
+    </serialization>
+
+    <services enable-defaults="true" />
+</hazelcast>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/services/src/main/resources/org.apache.unomi.cluster.cfg
----------------------------------------------------------------------
diff --git a/services/src/main/resources/org.apache.unomi.cluster.cfg b/services/src/main/resources/org.apache.unomi.cluster.cfg
new file mode 100644
index 0000000..a9ecf2e
--- /dev/null
+++ b/services/src/main/resources/org.apache.unomi.cluster.cfg
@@ -0,0 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+cluster.group=default
+cluster.jmxUsername=karaf
+cluster.jmxPassword=karaf
+cluster.jmxPort=1099
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/services/src/main/resources/org.apache.unomi.services.cfg
----------------------------------------------------------------------
diff --git a/services/src/main/resources/org.apache.unomi.services.cfg b/services/src/main/resources/org.apache.unomi.services.cfg
index d69da60..9e59210 100644
--- a/services/src/main/resources/org.apache.unomi.services.cfg
+++ b/services/src/main/resources/org.apache.unomi.services.cfg
@@ -19,7 +19,7 @@
 profile.purge.interval=1
 
 # Purge profiles that have been inactive for a specific number of days
-profile.purge.inactiveTime=30
+profile.purge.inactiveTime=180
 
 # Purge profiles that have been created for a specific number of days
 profile.purge.existTime=-1

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/src/site/markdown/clustering.md
----------------------------------------------------------------------
diff --git a/src/site/markdown/clustering.md b/src/site/markdown/clustering.md
index 7c51889..9e1a548 100644
--- a/src/site/markdown/clustering.md
+++ b/src/site/markdown/clustering.md
@@ -18,30 +18,21 @@
 Cluster setup
 =============
 
-Context server relies on Elasticsearch to discover and configure its cluster. You just need to install multiple context
-servers on the same network, and enable the discovery protocol in $MY_KARAF_HOME/etc/org.apache.unomi.persistence.elasticsearch.cfg file :
-
-    discovery.zen.ping.multicast.enabled=true
+Context server relies on Apache Karaf Cellar to discover and configure its cluster. You just need to install multiple context
+servers on the same network, and enable the discovery protocol in $MY_KARAF_HOME/etc/hazelcast.xml file.
 
 All nodes on the same network, sharing the same cluster name will be part of the same cluster.
 
-###Recommended configurations
-
-It is recommended to have one node dedicated to the context server, where the other nodes take care of the
-Elasticsearch persistence. The node dedicated to the context server will have node.data set to false.
-
 #### 2 nodes  configuration
 One node dedicated to context server, 1 node for elasticsearch storage.
 
 Node A :
 
-    node.data=true
     numberOfReplicas=0
     monthlyIndex.numberOfReplicas=0
 
 Node B :
 
-    node.data=false
     numberOfReplicas=0
     monthlyIndex.numberOfReplicas=0
 
@@ -50,34 +41,15 @@ One node dedicated to context server, 2 nodes for elasticsearch storage with fau
 
 Node A :
 
-    node.data=false
     numberOfReplicas=1
     monthlyIndex.numberOfReplicas=1
 
 Node B :
 
-    node.data=true
     numberOfReplicas=1
     monthlyIndex.numberOfReplicas=1
 
 Node C :
 
-    node.data=true
     numberOfReplicas=1
     monthlyIndex.numberOfReplicas=1
-
-### Specific configuration
-If multicast is not allowed on your network, you'll need to switch to unicast protocol and manually configure the server IPs. This can be
-done by disabling the elasticsearch automatic discovery in $MY_KARAF_HOME/etc/org.apache.unomi.persistence.elasticsearch.cfg :
-
-    discovery.zen.ping.multicast.enabled=false
-
-
-And then set the property discovery.zen.ping.unicast.hosts in $MY_KARAF_HOME/etc/elasticsearch.yml files :
-
-
-    discovery.zen.ping.unicast.hosts: [\u2018192.168.0.1:9300', \u2018192.168.0.2:9300']
-
-
-More information and configuration options can be found at :
-[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html)

http://git-wip-us.apache.org/repos/asf/incubator-unomi/blob/a5b7b156/src/site/markdown/configuration.md
----------------------------------------------------------------------
diff --git a/src/site/markdown/configuration.md b/src/site/markdown/configuration.md
index b925d48..0bf595c 100644
--- a/src/site/markdown/configuration.md
+++ b/src/site/markdown/configuration.md
@@ -43,15 +43,6 @@ with the following contents:
 
     cluster.name=contextElasticSearch
     index.name=context
-    elasticSearchConfig=file:${karaf.etc}/elasticsearch.yml
-
-And replace the cluster.name parameter here by your cluster name.
-
-You can also put an elasticsearch configuration file in $MY_KARAF_HOME/etc/elasticsearch.yml ,
-and put any standard Elasticsearch configuration options in this last file.
-
-If you want your context server to be a client only on a cluster of elasticsearch nodes, just set the node.data property
-to false.
 
 Installing the MaxMind GeoIPLite2 IP lookup database
 ----------------------------------------------------
@@ -160,23 +151,7 @@ node-to-node communication : 9200 (Elasticsearch REST API), 9300 (Elasticsearch
 
 Of course any ports listed here are the default ports configured in each server, you may adjust them if needed.
 
-Step 2 : Adjust the Context Server IP filtering
-
-By default the Context Server limits to connections to port 9200 and 9300 to the following IP ranges
-
-    - localhost
-    - 127.0.0.1
-    - ::1
-    - the current subnet (i.e., 192.168.1.0-192.168.1.255)
-    
-(this is done using a custom plugin for Elasticsearch, that you may find here : 
-https://git-wip-us.apache.org/repos/asf/incubator-unomi/context-server/persistence-elasticsearch/plugins/security)
-
-You can adjust this setting by using the following setting in the $MY_KARAF_HOME/etc/elasticsearch.yml file : 
-
-    security.ipranges: localhost,127.0.0.1,::1,10.0.1.0-10.0.1.255
-
-Step 3 : Follow industry recommended best practices for securing Elasticsearch
+Step 2 : Follow industry recommended best practices for securing Elasticsearch
 
 You may find more valuable recommendations here :