You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@cloudstack.apache.org by Rohit Yadav <ro...@gmail.com> on 2014/12/01 18:34:19 UTC

Re: git commit: updated refs/heads/reporter to 026efd3

On Mon, Dec 1, 2014 at 6:37 PM, <wi...@apache.org> wrote:

> Repository: cloudstack
> Updated Branches:
>   refs/heads/reporter [created] 026efd344
>
>
> Proposal of a Usage Reporter / call-home functionality for CloudStack
>
> With this commit the Management Server will be default generate a
> anonymous Usage
> report every 7 (seven) days and submit this information back to the Apache
> CloudStack project.
>
> These anonymous reports do NOT contain any information about Instance
> names, subnets, etc. It only
> contains numbers about how CloudStack is being used.
>
> This information is vital for the project to gain more insight in how
> CloudStack is being used.
>
> Users can turn the reporting off by setting usage.report.interval to 0
> (zero)
>

Great initiative Wido! Do we have a data collection service that is already
running with ASF or you're building one?

Regards.


>
>
> Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
> Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/026efd34
> Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/026efd34
> Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/026efd34
>
> Branch: refs/heads/reporter
> Commit: 026efd344452cef806f2475009bf690f5c2cbf9a
> Parents: 449d123
> Author: Wido den Hollander <wi...@widodh.nl>
> Authored: Thu Nov 20 14:43:33 2014 +0100
> Committer: Wido den Hollander <wi...@widodh.nl>
> Committed: Mon Dec 1 14:00:10 2014 +0100
>
> ----------------------------------------------------------------------
>  reporter/README.md                              |  18 +
>  reporter/usage-report-collector.py              |  64 +++
>  server/pom.xml                                  |  10 +
>  .../spring-server-core-managers-context.xml     |   2 +
>  server/src/com/cloud/configuration/Config.java  |   5 +-
>  .../apache/cloudstack/report/UsageReporter.java | 403 +++++++++++++++++++
>  setup/db/db/schema-450to460.sql                 |   4 +-
>  7 files changed, 504 insertions(+), 2 deletions(-)
> ----------------------------------------------------------------------
>
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/reporter/README.md
> ----------------------------------------------------------------------
> diff --git a/reporter/README.md b/reporter/README.md
> new file mode 100644
> index 0000000..6453fa4
> --- /dev/null
> +++ b/reporter/README.md
> @@ -0,0 +1,18 @@
> +# CloudStack Usage Report
> +
> +This directory contains the CloudStack reporter webservice used by the
> Apache CloudStack project
> +to gather anonymous statistical information about CloudStack deployments.
> +
> +Since version <FIX ME!!> the management server sends out a anonymized
> Usage Report out to the
> +project every 7 days.
> +
> +This information is used to gain information about how CloudStack is
> being used.
> +
> +Turning this Usage Reporting functionality off can be done in the Global
> Settings by setting
> +'usage.report.interval' to 0.
> +
> +# The webservice
> +The Python Flask application in this directory is the webservice running
> on https://reports.cloudstack.apache.org/ (FIX ME?)
> +and stores all the incoming information in a ElasticSearch database.
> +
> +Since Apache CloudStack is Open Source we show not only how we generate
> the report, but also how we process it.
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/reporter/usage-report-collector.py
> ----------------------------------------------------------------------
> diff --git a/reporter/usage-report-collector.py
> b/reporter/usage-report-collector.py
> new file mode 100755
> index 0000000..500a4d2
> --- /dev/null
> +++ b/reporter/usage-report-collector.py
> @@ -0,0 +1,64 @@
> +#!/usr/bin/env python
> +# Licensed to the Apache Software Foundation (ASF) under one
> +# or more contributor license agreements.  See the NOTICE file
> +# distributed with this work for additional information
> +# regarding copyright ownership.  The ASF licenses this file
> +# to you under the Apache License, Version 2.0 (the
> +# "License"); you may not use this file except in compliance
> +# with the License.  You may obtain a copy of the License at
> +#
> +#   http://www.apache.org/licenses/LICENSE-2.0
> +#
> +# Unless required by applicable law or agreed to in writing,
> +# software distributed under the License is distributed on an
> +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> +# KIND, either express or implied.  See the License for the
> +# specific language governing permissions and limitations
> +# under the License.
> +
> +from flask import abort, Flask, request, Response
> +from elasticsearch import Elasticsearch
> +import json
> +import time
> +
> +def json_response(response):
> +    return json.dumps(response, indent=2) + "\n", 200, {'Content-Type':
> 'application/json; charset=utf-8'}
> +
> +def generate_app(config=None):
> +    app = Flask(__name__)
> +
> +    @app.route('/report/<unique_id>', methods=['POST'])
> +    def report(unique_id):
> +        # We expect JSON data, so if the Content-Type doesn't match JSON
> data we throw an error
> +        if 'Content-Type' in request.headers:
> +            if request.headers['Content-Type'] != 'application/json':
> +                abort(417, "No or incorrect Content-Type header was
> supplied")
> +
> +        index = "cloudstack-%s" % time.strftime("%Y.%m.%d", time.gmtime())
> +        timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
> +
> +        es = Elasticsearch()
> +        es.indices.create(index=index, ignore=400)
> +
> +        report = json.loads(request.data)
> +        report["unique_id"] = unique_id
> +        report["timestamp"] = timestamp
> +
> +        es.index(index=index, doc_type="usage-report",
> body=json.dumps(report), timestamp=timestamp, refresh=True)
> +
> +        response = {}
> +        return json_response(response)
> +
> +    return app
> +
> +
> +app = generate_app()
> +
> +# Only run the App if this script is invoked from a Shell
> +if __name__ == '__main__':
> +    app.debug = True
> +    app.run(host='0.0.0.0', port=8088)
> +
> +# Otherwise provide a variable called 'application' for mod_wsgi
> +else:
> +    application = app
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/server/pom.xml
> ----------------------------------------------------------------------
> diff --git a/server/pom.xml b/server/pom.xml
> index c293aa1..f2a2a0e 100644
> --- a/server/pom.xml
> +++ b/server/pom.xml
> @@ -138,6 +138,16 @@
>        <artifactId>opensaml</artifactId>
>        <version>${cs.opensaml.version}</version>
>      </dependency>
> +    <dependency>
> +      <groupId>com.google.code.gson</groupId>
> +      <artifactId>gson</artifactId>
> +      <version>${cs.gson.version}</version>
> +    </dependency>
> +    <dependency>
> +      <groupId>com.google.guava</groupId>
> +      <artifactId>guava</artifactId>
> +      <version>${cs.guava.version}</version>
> +    </dependency>
>    </dependencies>
>    <build>
>      <testResources>
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
> ----------------------------------------------------------------------
> diff --git
> a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
> b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
> index 6d8f32e..faa5286 100644
> ---
> a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
> +++
> b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
> @@ -222,6 +222,8 @@
>
>      <bean id="statsCollector" class="com.cloud.server.StatsCollector" />
>
> +    <bean id="usageReporter"
> class="org.apache.cloudstack.report.UsageReporter" />
> +
>      <bean id="storagePoolAutomationImpl"
> class="com.cloud.storage.StoragePoolAutomationImpl" />
>
>      <bean id="domainManagerImpl" class="com.cloud.user.DomainManagerImpl"
> />
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/server/src/com/cloud/configuration/Config.java
> ----------------------------------------------------------------------
> diff --git a/server/src/com/cloud/configuration/Config.java
> b/server/src/com/cloud/configuration/Config.java
> index 281d99c..d395108 100644
> --- a/server/src/com/cloud/configuration/Config.java
> +++ b/server/src/com/cloud/configuration/Config.java
> @@ -2052,7 +2052,10 @@ public enum Config {
>      PublishAsynJobEvent("Advanced", ManagementServer.class,
> Boolean.class, "publish.async.job.events", "true", "enable or disable
> publishing of usage events on the event bus", null),
>
>      // StatsCollector
> -    StatsOutPutGraphiteHost("Advanced", ManagementServer.class,
> String.class, "stats.output.uri", "", "URI to additionally send
> StatsCollector statistics to", null);
> +    StatsOutPutGraphiteHost("Advanced", ManagementServer.class,
> String.class, "stats.output.uri", "", "URI to additionally send
> StatsCollector statistics to", null),
> +
> +    // Usage Reporting service
> +    UsageReportInterval("Advanced", ManagementServer.class,
> Integer.class, "usage.report.interval", "7", "Interval (days) between
> sending anonymous Usage Reports back to the CloudStack project", null);
>
>      private final String _category;
>      private final Class<?> _componentClass;
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/server/src/org/apache/cloudstack/report/UsageReporter.java
> ----------------------------------------------------------------------
> diff --git a/server/src/org/apache/cloudstack/report/UsageReporter.java
> b/server/src/org/apache/cloudstack/report/UsageReporter.java
> new file mode 100644
> index 0000000..e7891e9
> --- /dev/null
> +++ b/server/src/org/apache/cloudstack/report/UsageReporter.java
> @@ -0,0 +1,403 @@
> +// Licensed to the Apache Software Foundation (ASF) under one
> +// or more contributor license agreements.  See the NOTICE file
> +// distributed with this work for additional information
> +// regarding copyright ownership.  The ASF licenses this file
> +// to you under the Apache License, Version 2.0 (the
> +// "License"); you may not use this file except in compliance
> +// with the License.  You may obtain a copy of the License at
> +//
> +//   http://www.apache.org/licenses/LICENSE-2.0
> +//
> +// Unless required by applicable law or agreed to in writing,
> +// software distributed under the License is distributed on an
> +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> +// KIND, either express or implied.  See the License for the
> +// specific language governing permissions and limitations
> +// under the License.
> +package org.apache.cloudstack.report;
> +
> +import java.util.concurrent.Executors;
> +import java.util.concurrent.ScheduledExecutorService;
> +import java.util.concurrent.TimeUnit;
> +import java.util.List;
> +import java.util.Map;
> +import java.util.HashMap;
> +import java.sql.Connection;
> +import java.sql.PreparedStatement;
> +import java.sql.ResultSet;
> +import java.sql.SQLException;
> +import java.net.URL;
> +import java.net.HttpURLConnection;
> +import java.net.SocketTimeoutException;
> +import java.net.MalformedURLException;
> +import java.net.ProtocolException;
> +import java.io.OutputStreamWriter;
> +import java.io.IOException;
> +
> +import javax.inject.Inject;
> +
> +import org.apache.log4j.Logger;
> +import org.springframework.stereotype.Component;
> +
> +import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
> +import org.apache.cloudstack.managed.context.ManagedContextRunnable;
> +
> +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
> +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
> +
> +import org.apache.commons.codec.digest.DigestUtils;
> +
> +import com.cloud.host.HostVO;
> +import com.cloud.host.dao.HostDao;
> +import com.cloud.dc.ClusterVO;
> +import com.cloud.dc.dao.ClusterDao;
> +import com.cloud.dc.DataCenterVO;
> +import com.cloud.dc.dao.DataCenterDao;
> +import com.cloud.vm.UserVmVO;
> +import com.cloud.vm.dao.UserVmDao;
> +import com.cloud.vm.VMInstanceVO;
> +import com.cloud.vm.dao.VMInstanceDao;
> +import com.cloud.utils.db.SearchCriteria;
> +import com.cloud.utils.NumbersUtil;
> +import com.cloud.utils.component.ManagerBase;
> +import com.cloud.utils.component.ComponentMethodInterceptable;
> +import com.cloud.utils.concurrency.NamedThreadFactory;
> +import com.cloud.utils.db.DB;
> +import com.cloud.utils.db.TransactionLegacy;
> +
> +import com.google.gson.Gson;
> +import com.google.common.util.concurrent.AtomicLongMap;
> +
> +@Component
> +public class UsageReporter extends ManagerBase implements
> ComponentMethodInterceptable {
> +    public static final Logger s_logger =
> Logger.getLogger(UsageReporter.class.getName());
> +
> +    /* !FIX ME! This should point to a Apache Infra host with SSL! */
> +    private String reportHost = "http://cs-report.widodh.nl:8088/report";
> +
> +    private String uniqueID = null;
> +
> +    private static UsageReporter s_instance = null;
> +
> +    private ScheduledExecutorService _executor = null;
> +
> +    @Inject
> +    private ConfigurationDao _configDao;
> +    @Inject
> +    private HostDao _hostDao;
> +    @Inject
> +    private ClusterDao _clusterDao;
> +    @Inject
> +    private PrimaryDataStoreDao _storagePoolDao;
> +    @Inject
> +    private DataCenterDao _dataCenterDao;
> +    @Inject
> +    private UserVmDao _userVmDao;
> +    @Inject
> +    private VMInstanceDao _vmInstance;
> +
> +    int usageReportInterval = -1;
> +
> +    public static UsageReporter getInstance() {
> +        return s_instance;
> +    }
> +
> +    public static UsageReporter getInstance(Map<String, String> configs) {
> +        s_instance.init(configs);
> +        return s_instance;
> +    }
> +
> +    public UsageReporter() {
> +        s_instance = this;
> +    }
> +
> +    @Override
> +    public boolean start() {
> +        init(_configDao.getConfiguration());
> +        return true;
> +    }
> +
> +    private void init(Map<String, String> configs) {
> +        _executor = Executors.newScheduledThreadPool(1, new
> NamedThreadFactory("UsageReporter"));
> +
> +        usageReportInterval =
> NumbersUtil.parseInt(configs.get("usage.report.interval"), 7);
> +
> +        if (usageReportInterval > 0) {
> +            _executor.scheduleWithFixedDelay(new UsageCollector(), 7,
> usageReportInterval, TimeUnit.DAYS);
> +        }
> +
> +        uniqueID = getUniqueId();
> +    }
> +
> +    private void sendReport(String reportUri, String uniqueID,
> Map<String, Map> reportMap) {
> +        Gson gson = new Gson();
> +        String report = gson.toJson(reportMap);
> +
> +        int http_timeout = 15000;
> +
> +        try {
> +            s_logger.info("Usage Report will be send to: " + reportUri);
> +            s_logger.debug("REPORT: " + report);
> +
> +            URL url = new URL(reportUri + "/" + uniqueID);
> +
> +            HttpURLConnection conn = (HttpURLConnection)
> url.openConnection();
> +            conn.setConnectTimeout(http_timeout);
> +            conn.setReadTimeout(http_timeout);
> +            conn.setRequestMethod("POST");
> +            conn.setDoOutput(true);
> +            conn.setRequestProperty("Content-Type", "application/json");
> +            conn.setRequestProperty("Accept", "application/json");
> +
> +            OutputStreamWriter osw = new
> OutputStreamWriter(conn.getOutputStream());
> +            osw.write(report);
> +            osw.flush();
> +            osw.close();
> +
> +            int resp_code = conn.getResponseCode();
> +
> +            if (resp_code == HttpURLConnection.HTTP_OK){
> +                s_logger.info("Usage Report succesfully send to: " +
> reportUri);
> +            } else {
> +                s_logger.warn("Failed to send Usage Report: " +
> conn.getResponseMessage());
> +            }
> +
> +        } catch (SocketTimeoutException e) {
> +            s_logger.warn("Sending Usage Report to " + reportUri + "
> timed out: " + e.getMessage());
> +        } catch (MalformedURLException e) {
> +            s_logger.warn(reportUri + " is a invalid URL for sending
> Usage Report to: "+ e.getMessage());
> +        } catch (ProtocolException e) {
> +            s_logger.warn("Sending Usage Report failed due to a invalid
> protocol: " + e.getMessage());
> +        } catch (IOException e) {
> +            s_logger.warn("Failed to write Usage Report due to a
> IOException: " + e.getMessage());
> +        }
> +    }
> +
> +    @DB
> +    private String getUniqueId() {
> +        String unique = null;
> +        Connection conn = null;
> +
> +        try {
> +            conn = TransactionLegacy.getStandaloneConnection();
> +
> +            PreparedStatement pstmt = conn.prepareStatement("SELECT
> version,updated FROM version ORDER BY id ASC LIMIT 1");
> +            ResultSet rs = pstmt.executeQuery();
> +            if (rs.next()) {
> +                unique = DigestUtils.sha256Hex(rs.getString(1) +
> rs.getString(2));
> +            } else {
> +                s_logger.debug("No rows found in the version table.
> Unable to obtain unique ID for this environment");
> +            }
> +
> +            rs.close();
> +        } catch (SQLException e) {
> +            s_logger.debug("Unable to get the unique ID of this
> environment: " + e.getMessage());
> +        } finally {
> +            try {
> +                conn.close();
> +            } catch (SQLException e) {
> +            }
> +        }
> +
> +        s_logger.debug("Usage Report Unique ID is: " + unique);
> +
> +        return unique;
> +    }
> +
> +    private Map<String, AtomicLongMap> getHostReport() {
> +        Map<String, AtomicLongMap> hostMap = new HashMap<String,
> AtomicLongMap>();
> +        AtomicLongMap<Object> host_types = AtomicLongMap.create();
> +        AtomicLongMap<Object> host_hypervisor_type =
> AtomicLongMap.create();
> +        AtomicLongMap<Object> host_version = AtomicLongMap.create();
> +
> +        SearchCriteria<HostVO> host_sc = _hostDao.createSearchCriteria();
> +        List<HostVO> hosts = _hostDao.search(host_sc, null);
> +        for (HostVO host : hosts) {
> +            host_types.getAndIncrement(host.getType());
> +            if (host.getHypervisorType() != null) {
> +
> host_hypervisor_type.getAndIncrement(host.getHypervisorType());
> +            }
> +
> +            host_version.getAndIncrement(host.getVersion());
> +        }
> +
> +        hostMap.put("version", host_version);
> +        hostMap.put("hypervisor_type", host_hypervisor_type);
> +        hostMap.put("type", host_types);
> +
> +        return hostMap;
> +    }
> +
> +    private Map<String, AtomicLongMap> getClusterReport() {
> +        Map<String, AtomicLongMap> clusterMap = new HashMap<String,
> AtomicLongMap>();
> +        AtomicLongMap<Object> cluster_hypervisor_type =
> AtomicLongMap.create();
> +        AtomicLongMap<Object> cluster_types = AtomicLongMap.create();
> +
> +        SearchCriteria<ClusterVO> cluster_sc =
> _clusterDao.createSearchCriteria();
> +        List<ClusterVO> clusters = _clusterDao.search(cluster_sc, null);
> +        for (ClusterVO cluster : clusters) {
> +            if (cluster.getClusterType() != null) {
> +                cluster_types.getAndIncrement(cluster.getClusterType());
> +            }
> +
> +            if (cluster.getHypervisorType() != null) {
> +
> cluster_hypervisor_type.getAndIncrement(cluster.getHypervisorType());
> +            }
> +        }
> +
> +        clusterMap.put("hypervisor_type", cluster_hypervisor_type);
> +        clusterMap.put("type", cluster_types);
> +
> +        return clusterMap;
> +    }
> +
> +    private Map<String, AtomicLongMap> getStoragePoolReport() {
> +        Map<String, AtomicLongMap> storagePoolMap = new HashMap<String,
> AtomicLongMap>();
> +        AtomicLongMap<Object> storage_pool_types = AtomicLongMap.create();
> +        AtomicLongMap<Object> storage_pool_provider =
> AtomicLongMap.create();
> +        AtomicLongMap<Object> storage_pool_scope = AtomicLongMap.create();
> +
> +        List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
> +        for (StoragePoolVO pool : storagePools) {
> +            if (pool.getPoolType() != null) {
> +                storage_pool_types.getAndIncrement(pool.getPoolType());
> +            }
> +
> +            if (pool.getStorageProviderName() != null) {
> +
> storage_pool_provider.getAndIncrement(pool.getStorageProviderName());
> +            }
> +
> +            if (pool.getScope() != null) {
> +                storage_pool_scope.getAndIncrement(pool.getScope());
> +            }
> +        }
> +
> +        storagePoolMap.put("type", storage_pool_types);
> +        storagePoolMap.put("provider", storage_pool_provider);
> +        storagePoolMap.put("scope", storage_pool_scope);
> +
> +        return storagePoolMap;
> +    }
> +
> +    private Map<String, AtomicLongMap> getDataCenterReport() {
> +        Map<String, AtomicLongMap> datacenterMap = new HashMap<String,
> AtomicLongMap>();
> +        AtomicLongMap<Object> network_type = AtomicLongMap.create();
> +        AtomicLongMap<Object> dns_provider = AtomicLongMap.create();
> +        AtomicLongMap<Object> dhcp_provider = AtomicLongMap.create();
> +        AtomicLongMap<Object> lb_provider = AtomicLongMap.create();
> +        AtomicLongMap<Object> firewall_provider = AtomicLongMap.create();
> +        AtomicLongMap<Object> gateway_provider = AtomicLongMap.create();
> +        AtomicLongMap<Object> userdata_provider = AtomicLongMap.create();
> +        AtomicLongMap<Object> vpn_provider = AtomicLongMap.create();
> +
> +        List<DataCenterVO> datacenters = _dataCenterDao.listAllZones();
> +        for (DataCenterVO datacenter : datacenters) {
> +            if (datacenter.getNetworkType() != null) {
> +                network_type.getAndIncrement(datacenter.getNetworkType());
> +            }
> +
> +            if (datacenter.getDnsProvider() != null) {
> +                dns_provider.getAndIncrement(datacenter.getDnsProvider());
> +            }
> +
> +            if (datacenter.getDhcpProvider() != null) {
> +
> dhcp_provider.getAndIncrement(datacenter.getDhcpProvider());
> +            }
> +
> +            if (datacenter.getLoadBalancerProvider() != null) {
> +
> lb_provider.getAndIncrement(datacenter.getLoadBalancerProvider());
> +            }
> +
> +            if (datacenter.getFirewallProvider() != null) {
> +
> firewall_provider.getAndIncrement(datacenter.getFirewallProvider());
> +            }
> +
> +            if (datacenter.getGatewayProvider() != null) {
> +
> gateway_provider.getAndIncrement(datacenter.getGatewayProvider());
> +            }
> +
> +            if (datacenter.getUserDataProvider() != null) {
> +
> userdata_provider.getAndIncrement(datacenter.getUserDataProvider());
> +            }
> +
> +            if (datacenter.getVpnProvider() != null) {
> +                vpn_provider.getAndIncrement(datacenter.getVpnProvider());
> +            }
> +        }
> +
> +        datacenterMap.put("network_type", network_type);
> +        datacenterMap.put("dns_provider", dns_provider);
> +        datacenterMap.put("dhcp_provider", dhcp_provider);
> +        datacenterMap.put("lb_provider", lb_provider);
> +        datacenterMap.put("firewall_provider", firewall_provider);
> +        datacenterMap.put("gateway_provider", gateway_provider);
> +        datacenterMap.put("userdata_provider", userdata_provider);
> +        datacenterMap.put("vpn_provider", vpn_provider);
> +
> +        return datacenterMap;
> +    }
> +
> +    private Map<String, AtomicLongMap> getInstanceReport() {
> +
> +        Map<String, AtomicLongMap> instanceMap = new HashMap<String,
> AtomicLongMap>();
> +        AtomicLongMap<Object> hypervisor_type = AtomicLongMap.create();
> +        AtomicLongMap<Object> instance_state = AtomicLongMap.create();
> +        AtomicLongMap<Object> instance_type = AtomicLongMap.create();
> +        AtomicLongMap<Object> ha_enabled = AtomicLongMap.create();
> +        AtomicLongMap<Object> dynamically_scalable =
> AtomicLongMap.create();
> +
> +        SearchCriteria<HostVO> host_sc = _hostDao.createSearchCriteria();
> +        List<HostVO> hosts = _hostDao.search(host_sc, null);
> +        for (HostVO host : hosts) {
> +            List<UserVmVO> vms = _userVmDao.listUpByHostId(host.getId());
> +            for (UserVmVO vm : vms) {
> +                VMInstanceVO vmVO = _vmInstance.findById(vm.getId());
> +
> +                if (vmVO.getHypervisorType() != null) {
> +
> hypervisor_type.getAndIncrement(vmVO.getHypervisorType());
> +                }
> +
> +                if (vmVO.getState() != null) {
> +                    instance_state.getAndIncrement(vmVO.getState());
> +                }
> +
> +                if (vmVO.getType() != null) {
> +                    instance_type.getAndIncrement(vmVO.getType());
> +                }
> +
> +                ha_enabled.getAndIncrement(vmVO.isHaEnabled());
> +
> dynamically_scalable.getAndIncrement(vmVO.isDynamicallyScalable());
> +            }
> +        }
> +
> +        instanceMap.put("hypervisor_type", hypervisor_type);
> +        instanceMap.put("state", instance_state);
> +        instanceMap.put("type", instance_type);
> +        instanceMap.put("ha_enabled", ha_enabled);
> +        instanceMap.put("dynamically_scalable", dynamically_scalable);
> +
> +        return instanceMap;
> +    }
> +
> +    class UsageCollector extends ManagedContextRunnable {
> +        @Override
> +        protected void runInContext() {
> +            try {
> +                s_logger.warn("UsageReporter is running...");
> +
> +                Map<String, Map> reportMap = new HashMap<String, Map>();
> +
> +                reportMap.put("hosts", getHostReport());
> +                reportMap.put("clusters", getClusterReport());
> +                reportMap.put("primaryStorage", getStoragePoolReport());
> +                reportMap.put("zones", getDataCenterReport());
> +                reportMap.put("instances", getInstanceReport());
> +
> +                sendReport(reportHost, uniqueID, reportMap);
> +
> +            } catch (Exception e) {
> +                s_logger.warn("Failed to compile Usage Report: " +
> e.getMessage());
> +            }
> +        }
> +    }
> +}
> \ No newline at end of file
>
>
> http://git-wip-us.apache.org/repos/asf/cloudstack/blob/026efd34/setup/db/db/schema-450to460.sql
> ----------------------------------------------------------------------
> diff --git a/setup/db/db/schema-450to460.sql
> b/setup/db/db/schema-450to460.sql
> index 8480c85..27a3d83 100644
> --- a/setup/db/db/schema-450to460.sql
> +++ b/setup/db/db/schema-450to460.sql
> @@ -19,4 +19,6 @@
>  -- Schema upgrade from 4.5.0 to 4.6.0
>  --
>
> -INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Advanced", 'DEFAULT',
> 'management-server', "stats.output.uri", "", "URI to additionally send
> StatsCollector statistics to", "", NULL, NULL, 0);
> \ No newline at end of file
> +INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Advanced", 'DEFAULT',
> 'management-server', "stats.output.uri", "", "URI to additionally send
> StatsCollector statistics to", "", NULL, NULL, 0);
> +
> +INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Advanced", 'DEFAULT',
> 'management-server', "usage.report.interval", 7, "Interval (days) between
> sending anonymous Usage Reports back to the CloudStack project", "", NULL,
> NULL, 0);
> \ No newline at end of file
>
>