You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@qpid.apache.org by ea...@apache.org on 2019/11/27 17:10:24 UTC

[qpid-dispatch] 01/04: Adding back /bin that was inadvertantly removed

This is an automated email from the ASF dual-hosted git repository.

eallen pushed a commit to branch eallen-DISPATCH-1385
in repository https://gitbox.apache.org/repos/asf/qpid-dispatch.git

commit c03d8227eda88cf979575fc44bcba1a076df8872
Author: Ernest Allen <ea...@redhat.com>
AuthorDate: Wed Nov 27 12:05:34 2019 -0500

    Adding back /bin that was inadvertantly removed
---
 bin/export.sh                          | 105 ++++++++++
 bin/find_ports.sh                      |  28 +++
 bin/grinder                            | 370 +++++++++++++++++++++++++++++++++
 bin/make_standalone_console_tarball.sh | 107 ++++++++++
 bin/rebuild.sh                         |  31 +++
 bin/record-coverage.sh                 |  80 +++++++
 bin/test.sh                            |  43 ++++
 7 files changed, 764 insertions(+)

diff --git a/bin/export.sh b/bin/export.sh
new file mode 100755
index 0000000..539af90
--- /dev/null
+++ b/bin/export.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# export.sh - Create a release archive.
+
+# run this script like this - 
+# Before executing this script, change directory to the folder in which this file is located, for example.
+#   1. cd /home/jdoe/qpid-dispatch/bin
+# Run the script like so - 
+#  2. ./export.sh <output_folder-full-path> <tag-name> 
+#  (Example : ./export.sh /home/jdoe/ 1.5.1 
+#  (/home/jdoe is the folder you want the tar.gz file to be put - specify the full path) 
+#  1.5.1 is the tag name
+# A file named qpid-dispatch-<tag-name>.tar.gz will be created at <output_folder-full-path>
+
+# Simply running ./export.sh will put the tar.gz file in the current folder and use the very latest createed tag 
+
+set -e
+trap "cleanup" 0 1 2 3 9 11 13 15
+
+# ME=export.sh
+ME=$(basename ${0})
+
+SRC=$(dirname $(dirname $(readlink -f $0)))
+echo Source directory=${SRC}
+
+usage()
+{
+    echo
+    echo "Usage: ${ME} [DIR] [TAG]"
+    exit 1
+}
+
+cleanup()
+{
+    trap - 0 1 2 3 9 11 13 15
+    echo
+    [ ${WORKDIR} ] && [ -d ${WORKDIR} ] && rm -rf ${WORKDIR}
+}
+
+
+DIR=$PWD
+
+# This will get the latest created tag
+TAG=$(git describe --tags --always)
+echo Using tag ${TAG} to create archive
+
+##
+## Allow overrides to be passed on the cmdline
+##
+if [ $# -gt 2 ]; then
+    usage
+elif [ $# -ge 1 ]; then
+    DIR=$1
+    if [ $# -eq 2 ]; then
+        TAG=$2
+    fi
+fi
+
+# verify the tag exists
+git rev-list -1 tags/${TAG} -- >/dev/null || usage
+
+# mktemp command creates a temp directory for example - /tmp/tmp.k8vDddIzni
+WORKDIR=$(mktemp -d)
+echo Working Directory=${WORKDIR}
+
+
+##
+## Create the archive
+##
+(
+    cd ${SRC}
+    MTIME=$(date -d @`git log -1 --pretty=format:%ct tags/${TAG}` '+%Y-%m-%d %H:%M:%S')
+    VERSION=$(git show tags/${TAG}:VERSION.txt)
+    ARCHIVE=$DIR/qpid-dispatch-${VERSION}.tar.gz
+    PREFIX=qpid-dispatch-${VERSION}
+    [ -d ${WORKDIR} ] || mkdir -p ${WORKDIR}
+    git archive --format=tar --prefix=${PREFIX}/ tags/${TAG} \
+        | tar -x -C ${WORKDIR}
+    cd ${WORKDIR}
+    tar -c -z \
+        --owner=root --group=root --numeric-owner \
+        --mtime="${MTIME}" \
+        -f ${ARCHIVE} ${PREFIX}
+    echo Created "${ARCHIVE}"
+    echo Success!!!
+)
diff --git a/bin/find_ports.sh b/bin/find_ports.sh
new file mode 100755
index 0000000..4c1239e
--- /dev/null
+++ b/bin/find_ports.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License
+#
+
+
+# Usage: $0 [ dir ]
+# Find all the listening ports mentioned in *.log files under dir.
+# With no dir search under current directory.
+
+find "$@" -name '*.log' | xargs gawk 'match($0, /Listening on .* ([0-9]+)/, m) { print m[1] } match($0, /Configured Listener: .*:([0-9]+)/, m) { print m[1] }'
+
diff --git a/bin/grinder b/bin/grinder
new file mode 100755
index 0000000..9a4aa77
--- /dev/null
+++ b/bin/grinder
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# a tool for post-processing Valgrind output from the unit tests
+# Use:
+#    1) configure the build to use valgrind and output xml
+#       $ cmake .. -DUSE_VALGRIND=Yes -DVALGRIND_XML=Yes
+#    2) build and run the unit tests
+#       $ make && make test
+#    3) run grinder from your build directory.  It will look for valgrind xml
+#       files named "valgrind-*.xml in the current directory and all
+#       subdirectories and process them. Output is sent to stdout
+#       $ ../bin/grinder
+#
+# Note: be sure to clean the build directory before running the unit tests
+# to remove old valgrind-*.xml files
+#
+
+import logging
+import os
+import re
+import sys
+import xml.etree.ElementTree as ET
+from xml.etree.ElementTree import ParseError
+
+
+class Frame(object):
+    """
+    Represents info for a single stack frame
+    """
+    FIELDS = ["fn", "dir", "file", "line"]
+    def __init__(self, frame):
+        self._fields = dict()
+        for tag in self.FIELDS:
+            _ = frame.find(tag)
+            self._fields[tag] = _.text if _ is not None else "<none>"
+
+    def __str__(self):
+        return ("(%s) %s/%s:%s" %
+                (self._fields['fn'],
+                 self._fields['dir'],
+                 self._fields['file'],
+                 self._fields['line']))
+
+    def __hash__(self):
+        return hash(self.__str__())
+
+
+class ErrorBase(object):
+    """
+    Base class representing a single valgrind error
+    """
+    def __init__(self, kind):
+        self.kind = kind
+        self.count = 1
+
+    def __hash__(self):
+        return hash(self.kind)
+
+    def __str__(self):
+        return "kind = %s  (count=%d)" % (self.kind, self.count)
+
+    def merge(self, other):
+        self.count += other.count
+
+    def __lt__(self, other):
+        return self.count < other.count
+    def __le__(self, other):
+        return self.count <= other.count
+    def __eq__(self, other):
+        return self.count == other.count
+    def __gt__(self, other):
+        return self.count > other.count
+    def __ge__(self, other):
+        return self.count >= other.count
+
+
+class GeneralError(ErrorBase):
+    """
+    For simple single stack errors
+    """
+    def __init__(self, error_xml):
+        kind = error_xml.find("kind").text
+        super(GeneralError, self).__init__(kind)
+        w = error_xml.find("what")
+        self._what = w.text if w is not None else "<none>"
+
+        # stack
+        self._stack = list()
+        s = error_xml.find("stack")
+        for frame in s.findall("frame"):
+            self._stack.append(Frame(frame))
+
+    def __hash__(self):
+        h = super(GeneralError, self).__hash__()
+        for f in self._stack:
+            h += hash(f)
+        return h
+
+    def __str__(self):
+        s = super(GeneralError, self).__str__() + "\n"
+        if self._what:
+            s += self._what + "\n"
+        s += "Stack:"
+        for frame in self._stack:
+            s += "\n  %s" % str(frame)
+        return s
+
+
+class LeakError(ErrorBase):
+    def __init__(self, error_xml):
+        kind = error_xml.find("kind").text
+        assert(kind.startswith("Leak_"))
+        super(LeakError, self).__init__(kind)
+        self._leaked_bytes = 0
+        self._leaked_blocks = 0
+        self._stack = list()
+
+        # xwhat:
+        #    leakedbytes
+        #    leakedblocks
+        lb = error_xml.find("xwhat/leakedbytes")
+        if lb is not None:
+            self._leaked_bytes = int(lb.text)
+        lb = error_xml.find("xwhat/leakedblocks")
+        if lb is not None:
+            self._leaked_blocks = int(lb.text)
+
+        # stack
+        s = error_xml.find("stack")
+        for frame in s.findall("frame"):
+            self._stack.append(Frame(frame))
+
+    def merge(self, other):
+        super(LeakError, self).merge(other)
+        self._leaked_bytes += other._leaked_bytes
+        self._leaked_blocks += other._leaked_blocks
+
+    def __hash__(self):
+        h = super(LeakError, self).__hash__()
+        for f in self._stack:
+            h += hash(f)
+        return h
+
+    def __str__(self):
+        s = super(LeakError, self).__str__() + "\n"
+        s += "Leaked Bytes = %d Blocks = %d\n" % (self._leaked_bytes,
+                                                  self._leaked_blocks)
+        s += "Stack:"
+        for frame in self._stack:
+            s += "\n  %s" % str(frame)
+        return s
+
+
+class InvalidMemError(ErrorBase):
+    def __init__(self, error_xml):
+        kind = error_xml.find("kind").text
+        super(InvalidMemError, self).__init__(kind)
+        # expect
+        #  what
+        #  stack  (invalid access)
+        #  followed by zero or more:
+        #      aux what  (aux stack description)
+        #      aux stack  (where alloced, freed)
+        self._what = "<none>"
+        self._stack = None
+        self._auxwhat = list()
+        self._aux_stacks = list()
+        for child in error_xml:
+            if child.tag == "what":
+                self._what = child.text
+            if child.tag == "auxwhat":
+                self._auxwhat.append(child.text)
+            if child.tag == "stack":
+                stack = list()
+                for frame in child.findall("frame"):
+                    stack.append(Frame(frame))
+                if self._stack == None:
+                    self._stack = stack
+                else:
+                    self._aux_stacks.append(stack)
+
+    def __hash__(self):
+        # for now don't include what/auxwhat as it may
+        # be different for the same codepath
+        h = super(InvalidMemError, self).__hash__()
+        for f in self._stack:
+            h += hash(f)
+        for s in self._aux_stacks:
+            for f in s:
+                h += hash(f)
+        return h
+
+    def __str__(self):
+        s = super(InvalidMemError, self).__str__() + "\n"
+        s += "%s\n" % self._what
+        s += "Stack:"
+        for frame in self._stack:
+            s += "\n  %s" % str(frame)
+
+        for what, stack in zip(self._auxwhat, self._aux_stacks):
+            s += "\n%s:" % what
+            for frame in stack:
+                s += "\n  %s" % str(frame)
+        return s
+
+
+class SignalError(ErrorBase):
+    def __init__(self, error_xml):
+        super(SignalError, self).__init__("FatalSignal")
+        # expects:
+        #  signo
+        #  signame
+        #  stack
+        self._signo = "<none>"
+        sn = error_xml.find("signo")
+        if sn is not None:
+            self._signo = sn.text
+        self._signame = "<none>"
+        sn = error_xml.find("signame")
+        if sn is not None:
+            self._signame = sn.text
+
+        self._stack = list()
+        s = error_xml.find("stack")
+        for frame in s.findall("frame"):
+            self._stack.append(Frame(frame))
+
+    def __hash__(self):
+        # for now don't include what/auxwhat as it may
+        # be different for the same codepath
+        h = super(SignalError, self).__hash__()
+        h += hash(self._signo)
+        h += hash(self._signame)
+        for f in self._stack:
+            h += hash(f)
+        return h
+
+    def __str__(self):
+        s = super(SignalError, self).__str__() + "\n"
+        s += "Signal %s (%s)\n" % (self._signo, self._signame)
+        s += "Stack:"
+        for frame in self._stack:
+            s += "\n  %s" % str(frame)
+        return s
+
+
+_ERROR_CLASSES = {
+    'InvalidRead':         InvalidMemError,
+    'InvalidWrite':        InvalidMemError,
+    'Leak_DefinitelyLost': LeakError,
+    'Leak_IndirectlyLost': LeakError,
+    'Leak_PossiblyLost':   LeakError,
+    'Leak_StillReachable': LeakError,
+    'UninitCondition':     GeneralError,
+    'SyscallParam':        GeneralError,
+    'InvalidFree':         InvalidMemError,
+    'FishyValue':          InvalidMemError,
+    # TBD:
+    'InvalidJump': None,
+    'UninitValue': None,
+}
+
+
+def parse_error(error_xml):
+    """
+    Factory that returns an Error instance
+    """
+    kind = error_xml.find("kind").text
+    e_cls = _ERROR_CLASSES.get(kind)
+    if e_cls:
+        return e_cls(error_xml)
+    raise Exception("Unsupported error type %s, please update grinder"
+                    " to handle it" % kind)
+
+
+def parse_xml_file(filename, exe_name='qdrouterd'):
+    """
+    Parse out errors from a valgrind output xml file
+    """
+    logging.debug("Parsing %s", filename)
+    error_list = list()
+    try:
+        root = ET.parse(filename).getroot()
+    except ParseError as exc:
+        if "no element found" not in str(exc):
+            logging.warning("Error parsing %s: %s - skipping",
+                            filename, str(exc))
+        else:
+            logging.debug("No errors found in: %s - skipping",
+                          filename)
+        return error_list
+
+    pv = root.find('protocolversion')
+    if pv is None or not "4" == pv.text:
+        # unsupported xml format version
+        logging.warning("Unsupported format version for %s, skipping...",
+                      filename)
+        return error_list
+
+    pt = root.find('protocoltool')
+    if pt is None or not "memcheck" == pt.text:
+        logging.warning("Not a memcheck file %s, skipping...",
+                        filename)
+        return error_list
+
+    if not exe_name in root.find('args/argv/exe').text:
+        # not from the target executable, skip
+        logging.debug("file %s is not generated from %s, skipping...",
+                      filename, exe_name)
+        return error_list
+
+    for error in root.findall('error'):
+        error_list.append(parse_error(error))
+
+    # sigabort, etc classified as fatal_signal
+    for signal in root.findall("fatal_signal"):
+        error_list.append(SignalError(signal))
+    return error_list
+
+
+def main():
+    errors_map = dict()
+    file_name = re.compile("valgrind-[0-9]+\.xml")
+    for dp, dn, fn in os.walk("."):
+        for name in fn:
+            if file_name.match(name):
+                errors = parse_xml_file(os.path.join(dp, name))
+                for e in errors:
+                    h = hash(e)
+                    if h in errors_map:
+                        # coalesce duplicate errors
+                        errors_map[h].merge(e)
+                    else:
+                        errors_map[h] = e
+
+    # sort by # of occurances
+    error_list = sorted([e for e in errors_map.values()], reverse=True)
+
+    if error_list:
+        for e in error_list:
+            print("\n-----")
+            print("%s" % str(e))
+        print("\n\n-----")
+        print("----- %s total issues detected" % len(error_list))
+        print("-----")
+    else:
+        print("No Valgrind errors found! Congratulations ;)")
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/bin/make_standalone_console_tarball.sh b/bin/make_standalone_console_tarball.sh
new file mode 100755
index 0000000..e7f68e8
--- /dev/null
+++ b/bin/make_standalone_console_tarball.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# export.sh - Create a release archive.
+set -e
+trap "cleanup" 0 1 2 3 9 11 13 15
+
+# ME=export.sh
+ME=$(basename ${0})
+
+SRC=$(dirname $(dirname $(readlink -f $0)))
+echo Source directory=${SRC}
+
+usage()
+{
+    echo
+    echo "Usage: ${ME} [DIR] [TAG]"
+    exit 1
+}
+
+cleanup()
+{
+    trap - 0 1 2 3 9 11 13 15
+    echo
+    [ ${WORKDIR} ] && [ -d ${WORKDIR} ] && rm -rf ${WORKDIR}
+}
+
+
+DIR=$PWD
+
+# This will get the latest created tag
+TAG=$(git describe --tags --always)
+
+##
+## Allow overrides to be passed on the cmdline
+##
+if [ $# -gt 2 ]; then
+    usage
+elif [ $# -ge 1 ]; then
+    DIR=$1
+    if [ $# -eq 2 ]; then
+        TAG=$2
+    fi
+fi
+
+if [ "$DIR" = "." ]; then 
+    DIR=$PWD
+fi
+
+echo Using tag ${TAG} to create archive
+echo File will be output to ${DIR}
+
+# verify the tag exists
+git rev-list -1 tags/${TAG} -- >/dev/null || usage
+
+# mktemp command creates a temp directory for example - /tmp/tmp.k8vDddIzni
+WORKDIR=$(mktemp -d)
+echo Working Directory=${WORKDIR}
+
+
+##
+## Create the archive
+##
+(
+    cd ${SRC}
+    MTIME=$(date -d @`git log -1 --pretty=format:%ct tags/${TAG}` '+%Y-%m-%d %H:%M:%S')
+    VERSION=$(git show tags/${TAG}:VERSION.txt)
+    ARCHIVE=$DIR/qpid-dispatch-console-${VERSION}.tar.gz
+    PREFIX=qpid-dispatch-${VERSION}
+    [ -d ${WORKDIR} ] || mkdir -p ${WORKDIR}
+    git archive --format=tar --prefix=${PREFIX}/ tags/${TAG} \
+        | tar -x -C ${WORKDIR}
+    cd ${WORKDIR}
+    BUILD_DIR=${WORKDIR}/build
+    INSTALL_DIR=${WORKDIR}/install
+    mkdir $BUILD_DIR
+    pushd $BUILD_DIR
+    cmake -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR -DCMAKE_BUILD_TYPE=Release ../$PREFIX
+    make install
+    pushd $INSTALL_DIR/share/qpid-dispatch/
+    tar -c -h -z \
+        --owner=root --group=root --numeric-owner \
+        --mtime="${MTIME}" \
+        -f ${ARCHIVE} console
+    popd
+    popd
+    echo Created "${ARCHIVE}"
+    echo Success!!!
+)
diff --git a/bin/rebuild.sh b/bin/rebuild.sh
new file mode 100755
index 0000000..465c103
--- /dev/null
+++ b/bin/rebuild.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+set -ev
+
+rm -rf $BUILD_DIR
+rm -rf $INSTALL_DIR
+
+mkdir $BUILD_DIR
+cd $BUILD_DIR
+
+cmake -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR -DCMAKE_BUILD_TYPE=Debug "$@" $SOURCE_DIR
+make -j4
+make install
diff --git a/bin/record-coverage.sh b/bin/record-coverage.sh
new file mode 100755
index 0000000..33fe278
--- /dev/null
+++ b/bin/record-coverage.sh
@@ -0,0 +1,80 @@
+#! /usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License
+#
+
+
+# This script collates coverage data already present from running instrumented code.
+#
+# It requires the lcov tool to be installed (this provides the lcov and genhtml commands)
+#
+# It will produce a coverage analysis for gcc or clang compiled builds and currently for
+# C and C++ parts of the build tree.
+#
+# It takes two command line arguments:
+# - The first is the dispatch router source tree: this is mandatory.
+# - The second is the build tree: this is optional and if not specified is assumed to be the
+#   current directory.
+#
+# The output is in the form of an html report which will be found in the generated html direectory.
+# - There will also be a number of intermediate files left in the current directory.
+#
+# The typical way to use it would be to use the "Coverage" build type to get instrumented
+# code, then to run the tests then to extract the coverage information from running the
+# tests.
+# Something like:
+#   cmake -DCMAKE_BUILD_TYPE=Coverage ..
+#   make
+#   make test
+#   make coverage
+
+# set -x
+
+# get full path
+function getpath {
+  pushd -n $1 > /dev/null
+  echo $(dirs -0 -l)
+  popd -n > /dev/null
+}
+
+SRC=${1?}
+BLD=${2:-.}
+
+BLDPATH=$(getpath $BLD)
+SRCPATH=$(getpath $SRC)
+
+# Get base profile
+# - this initialises 0 counts for every profiled file
+#   without this step any file with no counts at all wouldn't
+#   show up on the final output.
+lcov -c -i -d $BLDPATH -o dispatch-base.info
+
+# Get actual coverage data
+lcov -c -d $BLDPATH -o dispatch-ctest.info
+
+# Total them up
+lcov --add dispatch-base.info --add dispatch-ctest.info > dispatch-total-raw.info
+
+# Snip out stuff in /usr (we don't care about coverage in system code)
+lcov --remove dispatch-total-raw.info "/usr/include*" "/usr/share*" "${SRCPATH}/tests/*" > dispatch-total.info
+
+# Generate report
+rm -rf html
+genhtml -p $SRCPATH -p $BLDPATH dispatch-total.info --title "Dispatch Router Test Coverage" --demangle-cpp -o html
+
diff --git a/bin/test.sh b/bin/test.sh
new file mode 100755
index 0000000..f0fd69b
--- /dev/null
+++ b/bin/test.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+test -z "$SOURCE_DIR" -a -f config.sh && source ./config.sh
+
+if [[ -z "$SOURCE_DIR" ]]; then
+    echo "The devel environment isn't ready.  Run 'source config.sh' from"
+    echo "the base of the dispatch source tree"
+    exit 1
+fi
+
+set -ev
+
+rm -rf $BUILD_DIR
+rm -rf $INSTALL_DIR
+
+mkdir $BUILD_DIR
+cd $BUILD_DIR
+
+cmake -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR -DCMAKE_BUILD_TYPE=Debug ${DISPATCH_CMAKE_EXTRA_ARGS} $SOURCE_DIR
+make -j4
+# Test the build.
+ctest -VV ${DISPATCH_CTEST_EXTRA_ARGS}
+# Run system tests on the install.
+make install
+python $INSTALL_DIR/lib/qpid-dispatch/tests/run_system_tests.py


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@qpid.apache.org
For additional commands, e-mail: commits-help@qpid.apache.org